diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 9fbe05c093..0000000000 --- a/.coveragerc +++ /dev/null @@ -1,25 +0,0 @@ -[run] -branch = True -source = pymongo,bson,gridfs - -[paths] -source= - . - /data/mci/*/src - -[report] -exclude_lines = - if (.*and +)*_use_c( and.*)*: - def has_c - def get_version_string - ^except AttributeError: - except ImportError: - raise NotImplementedError - return NotImplemented - _use_c = True - if __name__ == '__main__': -partial_branches = - if (.*and +)*not _use_c( and.*)*: - -[html] -directory = htmlcov diff --git a/.evergreen/build-mac.sh b/.evergreen/build-mac.sh deleted file mode 100755 index 09950a592f..0000000000 --- a/.evergreen/build-mac.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -ex - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -for VERSION in 3.7 3.8 3.9 3.10; do - PYTHON=/Library/Frameworks/Python.framework/Versions/$VERSION/bin/python3 - rm -rf build - - # Install wheel if not already there. - if ! $PYTHON -m wheel version; then - createvirtualenv $PYTHON releasevenv - WHEELPYTHON=python - python -m pip install --upgrade wheel - else - WHEELPYTHON=$PYTHON - fi - - $WHEELPYTHON setup.py bdist_wheel - deactivate || true - rm -rf releasevenv - - # Test that each wheel is installable. - for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ - done -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/build-manylinux-internal.sh b/.evergreen/build-manylinux-internal.sh deleted file mode 100755 index 4fd43a67a3..0000000000 --- a/.evergreen/build-manylinux-internal.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -ex -cd /src - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -# Compile wheels -for PYTHON in /opt/python/*/bin/python; do - if [[ ! $PYTHON =~ (cp37|cp38|cp39|cp310) ]]; then - continue - fi - # https://github.com/pypa/manylinux/issues/49 - rm -rf build - $PYTHON setup.py bdist_wheel - rm -rf build - - # Audit wheels and write manylinux tag - for whl in dist/*.whl; do - # Skip already built manylinux wheels. - if [[ "$whl" != *"manylinux"* ]]; then - auditwheel repair $whl -w dist - rm $whl - fi - done - - # Test that each wheel is installable. - # Test without virtualenv because it's not present on manylinux containers. - for release in dist/*; do - testinstall $PYTHON $release "without-virtualenv" - mv $release validdist/ - done -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/build-manylinux.sh b/.evergreen/build-manylinux.sh deleted file mode 100755 index cac435fb11..0000000000 --- a/.evergreen/build-manylinux.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -ex - -docker version -# manylinux1 2021-05-05-b64d921 and manylinux2014 2021-05-05-1ac6ef3 were -# the last releases to generate pip < 20.3 compatible wheels. After that -# auditwheel was upgraded to v4 which produces PEP 600 manylinux_x_y wheels -# which requires pip >= 20.3. We use the older docker image to support older -# pip versions. -BUILD_WITH_TAG="$1" -if [ -n "$BUILD_WITH_TAG" ]; then - images=(quay.io/pypa/manylinux1_x86_64:2021-05-05-b64d921 \ - quay.io/pypa/manylinux1_i686:2021-05-05-b64d921 \ - quay.io/pypa/manylinux2014_x86_64:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_i686:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_aarch64:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_ppc64le:2021-05-05-1ac6ef3 \ - quay.io/pypa/manylinux2014_s390x:2021-05-05-1ac6ef3) -else - images=(quay.io/pypa/manylinux1_x86_64 \ - quay.io/pypa/manylinux1_i686 \ - quay.io/pypa/manylinux2014_x86_64 \ - quay.io/pypa/manylinux2014_i686 \ - quay.io/pypa/manylinux2014_aarch64 \ - quay.io/pypa/manylinux2014_ppc64le \ - quay.io/pypa/manylinux2014_s390x) -fi - -for image in "${images[@]}"; do - docker pull $image - docker run --rm -v "`pwd`:/src" $image /src/.evergreen/build-manylinux-internal.sh -done - -ls dist - -# Check for any unexpected files. -unexpected=$(find dist \! \( -iname dist -or \ - -iname '*cp37*' -or \ - -iname '*cp38*' -or \ - -iname '*cp39*' -or \ - -iname '*cp310*' \)) -if [ -n "$unexpected" ]; then - echo "Unexpected files:" $unexpected - exit 1 -fi diff --git a/.evergreen/build-windows.sh b/.evergreen/build-windows.sh deleted file mode 100755 index 09f5e7f0b4..0000000000 --- a/.evergreen/build-windows.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -ex - -# Get access to testinstall. -. .evergreen/utils.sh - -# Create temp directory for validated files. -rm -rf validdist -mkdir -p validdist -mv dist/* validdist || true - -for VERSION in 37 38 39 310; do - _pythons=("C:/Python/Python${VERSION}/python.exe" \ - "C:/Python/32/Python${VERSION}/python.exe") - for PYTHON in "${_pythons[@]}"; do - rm -rf build - $PYTHON setup.py bdist_wheel - - # Test that each wheel is installable. - for release in dist/*; do - testinstall $PYTHON $release - mv $release validdist/ - done - done -done - -mv validdist/* dist -rm -rf validdist -ls dist diff --git a/.evergreen/combine-coverage.sh b/.evergreen/combine-coverage.sh new file mode 100755 index 0000000000..36266c1842 --- /dev/null +++ b/.evergreen/combine-coverage.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Coverage combine merges (and removes) all the coverage files and +# generates a new .coverage file in the current directory. + +set -eu + +. .evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) +fi + +createvirtualenv "$PYTHON_BINARY" covenv +# Keep in sync with run-tests.sh +# coverage >=5 is needed for relative_files=true. +pip install -q "coverage[toml]>=5,<=7.5" + +pip list +ls -la coverage/ + +python -m coverage combine coverage/coverage.* +python -m coverage html -d htmlcov diff --git a/.evergreen/config.yml b/.evergreen/config.yml index ac7f97f6fa..91fa442775 100644 --- a/.evergreen/config.yml +++ b/.evergreen/config.yml @@ -12,2773 +12,53 @@ stepback: true # Actual testing tasks are marked with `type: test` command_type: system -# Protect ourself against rogue test case, or curl gone wild, that runs forever +# Protect ourselves against rogue test case, or curl gone wild, that runs forever # Good rule of thumb: the averageish length a task takes, times 5 # That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 1800 # 30 minutes is the longest we'll ever run +exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run (primarily + # for macos hosts) # What to do when evergreen hits the timeout (`post:` tasks are run automatically) timeout: - - command: shell.exec + - command: subprocess.exec params: - script: | - ls -la + binary: ls -la -functions: - "fetch source": - # Executes git clone and applies the submitted patch, if any - - command: git.get_project - params: - directory: "src" - # Applies the subitted patch, if any - # Deprecated. Should be removed. But still needed for certain agents (ZAP) - - command: git.apply_patch - # Make an evergreen exapanstion file with dynamic values - - command: shell.exec - params: - working_dir: "src" - script: | - # Get the current unique version of this checkout - if [ "${is_patch}" = "true" ]; then - CURRENT_VERSION=$(git describe)-patch-${version_id} - else - CURRENT_VERSION=latest - fi - - export DRIVERS_TOOLS="$(dirname $(pwd))/drivers-tools" - export PROJECT_DIRECTORY="$(pwd)" - - # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) - export PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) - fi - - export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" - export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" - export UPLOAD_BUCKET="${project}" - - cat < expansion.yml - CURRENT_VERSION: "$CURRENT_VERSION" - DRIVERS_TOOLS: "$DRIVERS_TOOLS" - MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" - MONGODB_BINARIES: "$MONGODB_BINARIES" - UPLOAD_BUCKET: "$UPLOAD_BUCKET" - PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" - PREPARE_SHELL: | - set -o errexit - export DRIVERS_TOOLS="$DRIVERS_TOOLS" - export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" - export MONGODB_BINARIES="$MONGODB_BINARIES" - export UPLOAD_BUCKET="$UPLOAD_BUCKET" - export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" - - export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" - # Installation of cryptography requires a rust compiler on some machines - export PATH="$MONGODB_BINARIES:/home/admin/.cargo/bin:$PATH" - export PROJECT="${project}" - EOT - # See what we've done - cat expansion.yml - - # Load the expansion file to make an evergreen variable with the current unique version - - command: expansions.update - params: - file: src/expansion.yml - - "prepare resources": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS - if [ "${project}" = "drivers-tools" ]; then - # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS - else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS - fi - echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config - - "upload coverage" : - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: src/.coverage - optional: true - # Upload the coverage report for all tasks in a single build to the same directory. - remote_file: ${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} - bucket: mciuploads - permissions: public-read - content_type: text/html - display_name: "Raw Coverage Report" - - "download and merge coverage" : - - command: shell.exec - params: - silent: true - working_dir: "src" - script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - - # Download all the task coverage files. - aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/coverage/ coverage/ - - command: shell.exec - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - # Coverage combine merges (and removes) all the coverage files and - # generates a new .coverage file in the current directory. - ls -la coverage/ - /opt/python/3.7/bin/python3 -m coverage combine coverage/coverage.* - /opt/python/3.7/bin/python3 -m coverage html -d htmlcov - # Upload the resulting html coverage report. - - command: shell.exec - params: - silent: true - working_dir: "src" - script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - aws s3 cp htmlcov/ s3://mciuploads/${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/htmlcov/ --recursive --acl public-read --region us-east-1 - # Attach the index.html with s3.put so it shows up in the Evergreen UI. - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: src/htmlcov/index.html - remote_file: ${UPLOAD_BUCKET}/coverage/${revision}/${version_id}/htmlcov/index.html - bucket: mciuploads - permissions: public-read - content_type: text/html - display_name: "Coverage Report HTML" - - - "upload mo artifacts": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz - - command: archive.targz_pack - params: - target: "mongo-coredumps.tgz" - source_dir: "./" - include: - - "./**.core" - - "./**.mdmp" # Windows: minidumps - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: mongo-coredumps.tgz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Core Dumps - Execution - optional: true - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: mongodb-logs.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "mongodb-logs.tar.gz" - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: drivers-tools/.evergreen/orchestration/server.log - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-orchestration.log - bucket: mciuploads - permissions: public-read - content_type: ${content_type|text/plain} - display_name: "orchestration.log" - - "upload working dir": - - command: archive.targz_pack - params: - target: "working-dir.tar.gz" - source_dir: ${PROJECT_DIRECTORY}/ - include: - - "./**" - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: working-dir.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-working-dir.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "working-dir.tar.gz" - - command: archive.targz_pack - params: - target: "drivers-dir.tar.gz" - source_dir: ${DRIVERS_TOOLS} - include: - - "./**" - exclude_files: - # Windows cannot read the mongod *.lock files because they are locked. - - "*.lock" - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: drivers-dir.tar.gz - remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/artifacts/${task_id}-${execution}-drivers-dir.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/x-gzip} - display_name: "drivers-dir.tar.gz" - - "upload test results": - - command: attach.results - params: - file_location: "${DRIVERS_TOOLS}/results.json" - - command: attach.xunit_results - params: - file: "src/xunit-results/TEST-*.xml" - - "bootstrap mongo-orchestration": - - command: shell.exec - params: - script: | - set -o xtrace - - # Enable core dumps if enabled on the machine - # Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml - if [ -f /proc/self/coredump_filter ]; then - # Set the shell process (and its children processes) to dump ELF headers (bit 4), - # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). - echo 0x13 > /proc/self/coredump_filter - - if [ -f /sbin/sysctl ]; then - # Check that the core pattern is set explicitly on our distro image instead - # of being the OS's default value. This ensures that coredump names are consistent - # across distros and can be picked up by Evergreen. - core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") - if [ "$core_pattern" = "dump_%e.%p.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi - fi - fi - - if [ $(uname -s) = "Darwin" ]; then - core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") - if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then - echo "Enabling coredumps" - ulimit -c unlimited - fi - fi - - ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} \ - TOPOLOGY=${TOPOLOGY} \ - AUTH=${AUTH} \ - SSL=${SSL} \ - STORAGE_ENGINE=${STORAGE_ENGINE} \ - DISABLE_TEST_COMMANDS=${DISABLE_TEST_COMMANDS} \ - ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ - REQUIRE_API_VERSION=${REQUIRE_API_VERSION} \ - LOAD_BALANCER=${LOAD_BALANCER} \ - bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh - # run-orchestration generates expansion file with the MONGODB_URI for the cluster - - command: expansions.update - params: - file: mo-expansion.yml - - command: expansions.update - params: - updates: - - key: MONGODB_STARTED - value: "1" - - "bootstrap data lake": - - command: shell.exec - type: setup - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - # The mongohouse build script needs to be passed the VARIANT variable, see - # https://github.com/10gen/mongohouse/blob/973cc11/evergreen.yaml#L65 - VARIANT=ubuntu1804 bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/build-mongohouse-local.sh - - command: shell.exec - type: setup - params: - background: true - script: | - set -o xtrace - ${PREPARE_SHELL} - bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-local.sh - - "stop mongo-orchestration": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh - - "run mod_wsgi tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} MOD_WSGI_VERSION=${MOD_WSGI_VERSION} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mod-wsgi-tests.sh - - "run mockupdb tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-mockupdb-tests.sh - - "run doctests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-doctests.sh - - "run tests": - # If testing FLE, start the KMS mock servers, first create the virtualenv. - - command: shell.exec - params: - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate_venv.sh - fi - # Run in the background so the mock servers don't block the EVG task. - - command: shell.exec - params: - background: true - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate_venv.sh - # The -u options forces the stdout and stderr streams to be unbuffered. - # TMPDIR is required to avoid "AF_UNIX path too long" errors. - TMPDIR="$(dirname $DRIVERS_TOOLS)" python -u kms_kmip_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 5698 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/expired.pem --port 8000 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/wrong-host.pem --port 8001 & - python -u kms_http_server.py --ca_file ../x509gen/ca.pem --cert_file ../x509gen/server.pem --port 8002 --require_client_cert & - fi - # Wait up to 10 seconds for the KMIP server to start. - - command: shell.exec - params: - script: | - if [ -n "${test_encryption}" ]; then - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/csfle - . ./activate_venv.sh - for i in $(seq 1 1 10); do - sleep 1 - if python -u kms_kmip_client.py; then - echo 'KMS KMIP server started!' - exit 0 - fi - done - echo 'Failed to start KMIP server!' - exit 1 - fi - - command: shell.exec - type: test - params: - silent: true - working_dir: "src" - script: | - if [ -n "${test_encryption}" ]; then - cat < fle_creds.sh - export FLE_AWS_KEY="${fle_aws_key}" - export FLE_AWS_SECRET="${fle_aws_secret}" - export FLE_AZURE_CLIENTID="${fle_azure_clientid}" - export FLE_AZURE_TENANTID="${fle_azure_tenantid}" - export FLE_AZURE_CLIENTSECRET="${fle_azure_clientsecret}" - export FLE_GCP_EMAIL="${fle_gcp_email}" - export FLE_GCP_PRIVATEKEY="${fle_gcp_privatekey}" - # Needed for generating temporary aws credentials. - export AWS_ACCESS_KEY_ID="${fle_aws_key}" - export AWS_SECRET_ACCESS_KEY="${fle_aws_secret}" - export AWS_DEFAULT_REGION=us-east-1 - EOT - fi - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - if [ -n "${set_xtrace_on}" ]; then - set -o xtrace - export SET_XTRACE_ON="${set_xtrace_on}" - fi - ${PREPARE_SHELL} - if [ -n "${MONGODB_STARTED}" ]; then - export PYMONGO_MUST_CONNECT=1 - fi - if [ -n "${DISABLE_TEST_COMMANDS}" ]; then - export PYMONGO_DISABLE_TEST_COMMANDS=1 - fi - if [ -n "${test_encryption}" ]; then - # Disable xtrace (just in case it was accidentally set). - set +x - . ./fle_creds.sh - rm -f ./fle_creds.sh - export LIBMONGOCRYPT_URL="${libmongocrypt_url}" - export TEST_ENCRYPTION=1 - fi - if [ -n "${test_crypt_shared}" ]; then - export TEST_CRYPT_SHARED=1 - fi - if [ -n "${test_pyopenssl}" ]; then - export TEST_PYOPENSSL=1 - fi - if [ -n "${SETDEFAULTENCODING}" ]; then - export SETDEFAULTENCODING="${SETDEFAULTENCODING}" - fi - if [ -n "${test_loadbalancer}" ]; then - export TEST_LOADBALANCER=1 - export LOAD_BALANCER=1 - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}" - fi - if [ -n "${test_serverless}" ]; then - export TEST_SERVERLESS=1 - export SERVERLESS_ATLAS_USER="${SERVERLESS_ATLAS_USER}" - export SERVERLESS_ATLAS_PASSWORD="${SERVERLESS_ATLAS_PASSWORD}" - export MONGODB_URI="${SERVERLESS_URI}" - export SINGLE_MONGOS_LB_URI="${MONGODB_URI}" - export MULTI_MONGOS_LB_URI="${MONGODB_URI}" - fi - - PYTHON_BINARY=${PYTHON_BINARY} \ - GREEN_FRAMEWORK=${GREEN_FRAMEWORK} \ - C_EXTENSIONS=${C_EXTENSIONS} \ - COVERAGE=${COVERAGE} \ - COMPRESSORS=${COMPRESSORS} \ - AUTH=${AUTH} \ - SSL=${SSL} \ - DATA_LAKE=${DATA_LAKE} \ - MONGODB_API_VERSION=${MONGODB_API_VERSION} \ - bash ${PROJECT_DIRECTORY}/.evergreen/run-tests.sh - - "run enterprise auth tests": - - command: shell.exec - type: test - params: - silent: true - working_dir: "src" - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - PYTHON_BINARY=${PYTHON_BINARY} SASL_HOST=${sasl_host} SASL_PORT=${sasl_port} SASL_USER=${sasl_user} SASL_PASS=${sasl_pass} SASL_DB=${sasl_db} PRINCIPAL=${principal} GSSAPI_DB=${gssapi_db} KEYTAB_BASE64=${keytab_base64} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} bash ${PROJECT_DIRECTORY}/.evergreen/run-enterprise-auth-tests.sh - - "run atlas tests": - - command: shell.exec - type: test - params: - silent: true - working_dir: "src" - script: | - cat < prepare_atlas_connectivity.sh - export ATLAS_FREE='${atlas_free}' - export ATLAS_REPL='${atlas_repl}' - export ATLAS_SHRD='${atlas_shrd}' - export ATLAS_TLS11='${atlas_tls11}' - export ATLAS_TLS12='${atlas_tls12}' - export ATLAS_SERVERLESS='${atlas_serverless}' - export ATLAS_SRV_FREE='${atlas_srv_free}' - export ATLAS_SRV_REPL='${atlas_srv_repl}' - export ATLAS_SRV_SHRD='${atlas_srv_shrd}' - export ATLAS_SRV_TLS11='${atlas_srv_tls11}' - export ATLAS_SRV_TLS12='${atlas_srv_tls12}' - export ATLAS_SRV_SERVERLESS='${atlas_srv_serverless}' - EOT - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - # Disable xtrace (just in case it was accidentally set). - set +x - . ./prepare_atlas_connectivity.sh - rm -f ./prepare_atlas_connectivity.sh - - PYTHON_BINARY=${PYTHON_BINARY} bash ${PROJECT_DIRECTORY}/.evergreen/run-atlas-tests.sh - - "add aws auth variables to file": - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - cat < ${DRIVERS_TOOLS}/.evergreen/auth_aws/aws_e2e_setup.json - { - "iam_auth_ecs_account" : "${iam_auth_ecs_account}", - "iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}", - "iam_auth_ecs_account_arn": "arn:aws:iam::557821124784:user/authtest_fargate_user", - "iam_auth_ecs_cluster": "${iam_auth_ecs_cluster}", - "iam_auth_ecs_task_definition": "${iam_auth_ecs_task_definition}", - "iam_auth_ecs_subnet_a": "${iam_auth_ecs_subnet_a}", - "iam_auth_ecs_subnet_b": "${iam_auth_ecs_subnet_b}", - "iam_auth_ecs_security_group": "${iam_auth_ecs_security_group}", - - "iam_auth_assume_aws_account" : "${iam_auth_assume_aws_account}", - "iam_auth_assume_aws_secret_access_key" : "${iam_auth_assume_aws_secret_access_key}", - "iam_auth_assume_role_name" : "${iam_auth_assume_role_name}", - - "iam_auth_ec2_instance_account" : "${iam_auth_ec2_instance_account}", - "iam_auth_ec2_instance_secret_access_key" : "${iam_auth_ec2_instance_secret_access_key}", - "iam_auth_ec2_instance_profile" : "${iam_auth_ec2_instance_profile}" - } - EOF - - "run aws auth test with regular aws credentials": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_regular_aws.js - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' - USER=$(urlencode ${iam_auth_ecs_account}) - PASS=$(urlencode ${iam_auth_ecs_secret_access_key}) - MONGODB_URI="mongodb://$USER:$PASS@localhost" - EOF - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh - - "run aws auth test with assume role credentials": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_assume_role.js - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias urlencode='${python3_binary} -c "import sys, urllib.parse as ulp; sys.stdout.write(ulp.quote_plus(sys.argv[1]))"' - alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' - USER=$(jsonkey AccessKeyId) - USER=$(urlencode $USER) - PASS=$(jsonkey SecretAccessKey) - PASS=$(urlencode $PASS) - SESSION_TOKEN=$(jsonkey SessionToken) - SESSION_TOKEN=$(urlencode $SESSION_TOKEN) - MONGODB_URI="mongodb://$USER:$PASS@localhost" - EOF - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} .evergreen/run-mongodb-aws-test.sh - - "run aws auth test with aws EC2 credentials": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - if [ "${skip_EC2_auth_test}" = "true" ]; then - echo "This platform does not support the EC2 auth test, skipping..." - exit 0 - fi - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - mongo aws_e2e_ec2.js - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh - - "run aws auth test with aws credentials as environment variables": - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - export AWS_ACCESS_KEY_ID=${iam_auth_ecs_account} - export AWS_SECRET_ACCESS_KEY=${iam_auth_ecs_secret_access_key} - EOF - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} PROJECT_DIRECTORY=${PROJECT_DIRECTORY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh - - "run aws auth test with aws credentials and session token as environment variables": - - command: shell.exec - type: test - params: - working_dir: "src" - silent: true - script: | - # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) - cat <<'EOF' > "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - alias jsonkey='${python3_binary} -c "import json,sys;sys.stdout.write(json.load(sys.stdin)[sys.argv[1]])" < ${DRIVERS_TOOLS}/.evergreen/auth_aws/creds.json' - export AWS_ACCESS_KEY_ID=$(jsonkey AccessKeyId) - export AWS_SECRET_ACCESS_KEY=$(jsonkey SecretAccessKey) - export AWS_SESSION_TOKEN=$(jsonkey SessionToken) - EOF - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} ASSERT_NO_URI_CREDS=true .evergreen/run-mongodb-aws-test.sh - - "run aws ECS auth test": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - if [ "${skip_ECS_auth_test}" = "true" ]; then - echo "This platform does not support the ECS auth test, skipping..." - exit 0 - fi - cd ${DRIVERS_TOOLS}/.evergreen/auth_aws - . ./activate_venv.sh - cat < setup.js - const mongo_binaries = "$MONGODB_BINARIES"; - const project_dir = "$PROJECT_DIRECTORY"; - EOF - - mongo --nodb setup.js aws_e2e_ecs.js - cd - - - "cleanup": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS || true - - "fix absolute paths": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do - perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename - done - - "windows fix": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - cat $i | tr -d '\r' > $i.new - mv $i.new $i - done - # Copy client certificate because symlinks do not work on Windows. - cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - - "make files executable": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - chmod +x $i - done - - "init test-results": - - command: shell.exec - params: - script: | - set -o xtrace - ${PREPARE_SHELL} - echo '{"results": [{ "status": "FAIL", "test_file": "Build", "log_raw": "No test-results.json found was created" } ]}' > ${PROJECT_DIRECTORY}/test-results.json - - "install dependencies": - - command: shell.exec - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" - # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && bash $file || echo "$file not available, skipping" - - "run-ocsp-test": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PYTHON_BINARY=${PYTHON_BINARY} \ - CA_FILE="$DRIVERS_TOOLS/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ - OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ - bash ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-tests.sh - - run-valid-ocsp-server: - - command: shell.exec - params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ${python3_binary} -m venv ./venv - ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - - command: shell.exec - params: - background: true - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ - -p 8100 -v - run-revoked-ocsp-server: - - command: shell.exec - params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ${python3_binary} -m venv ./venv - ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - - command: shell.exec - params: - background: true - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ - -p 8100 \ - -v \ - --fault revoked - run-valid-delegate-ocsp-server: - - command: shell.exec - params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ${python3_binary} -m venv ./venv - ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - - command: shell.exec - params: - background: true - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ - -p 8100 -v - run-revoked-delegate-ocsp-server: - - command: shell.exec - params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ${python3_binary} -m venv ./venv - ./venv/${venv_bin_dir|bin}/pip3 install -r mock-ocsp-responder-requirements.txt - - command: shell.exec - params: - background: true - script: | - cd ${DRIVERS_TOOLS}/.evergreen/ocsp - ./venv/${venv_bin_dir|bin}/python ocsp_mock.py \ - --ca_file ${OCSP_ALGORITHM}/ca.pem \ - --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ - --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ - -p 8100 \ - -v \ - --fault revoked - - "run load-balancer": - - command: shell.exec - params: - script: | - DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh start - - command: expansions.update - params: - file: lb-expansion.yml - - "stop load-balancer": - - command: shell.exec - params: - script: | - cd ${DRIVERS_TOOLS}/.evergreen - DRIVERS_TOOLS=${DRIVERS_TOOLS} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop - - "teardown_docker": - - command: shell.exec - params: - script: | - # Remove all Docker images - docker rmi -f $(docker images -a -q) &> /dev/null || true - - "build release": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - .evergreen/release.sh - - "upload release": - - command: archive.targz_pack - params: - target: "release-files.tgz" - source_dir: "src/dist" - include: - - "*" - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: release-files.tgz - remote_file: ${UPLOAD_BUCKET}/release/${revision}/${task_id}-${execution}-release-files.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Release files - - "download and merge releases": - - command: shell.exec - params: - silent: true - script: | - export AWS_ACCESS_KEY_ID=${aws_key} - export AWS_SECRET_ACCESS_KEY=${aws_secret} - - # Download all the task coverage files. - aws s3 cp --recursive s3://mciuploads/${UPLOAD_BUCKET}/release/${revision}/ release/ - - command: shell.exec - params: - shell: "bash" - script: | - set -o xtrace - ${PREPARE_SHELL} - # Combine releases into one directory. - ls -la release/ - mkdir releases - # Copy old manylinux release first since we want the newer manylinux - # wheels to override them. - mkdir old_manylinux - if mv release/*old_manylinux* old_manylinux; then - for REL in old_manylinux/*; do - tar zxvf $REL -C releases/ - done - fi - for REL in release/*; do - tar zxvf $REL -C releases/ - done - # Build source distribution. - cd src/ - /opt/python/3.7/bin/python3 setup.py sdist - cp dist/* ../releases - - command: archive.targz_pack - params: - target: "release-files-all.tgz" - source_dir: "releases/" - include: - - "*" - - command: s3.put - params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_file: release-files-all.tgz - remote_file: ${UPLOAD_BUCKET}/release-all/${revision}/${task_id}-${execution}-release-files-all.tar.gz - bucket: mciuploads - permissions: public-read - content_type: ${content_type|application/gzip} - display_name: Release files all +include: + - filename: .evergreen/generated_configs/functions.yml + - filename: .evergreen/generated_configs/tasks.yml + - filename: .evergreen/generated_configs/variants.yml pre: - func: "fetch source" - - func: "prepare resources" - - func: "windows fix" - - func: "fix absolute paths" - - func: "init test-results" - - func: "make files executable" - - func: "install dependencies" + - func: "setup system" + - func: "assume ec2 role" post: # Disabled, causing timeouts # - func: "upload working dir" + - func: "teardown system" - func: "upload coverage" - func: "upload mo artifacts" - func: "upload test results" - - func: "stop mongo-orchestration" - func: "cleanup" - - func: "teardown_docker" - -task_groups: - - name: serverless_task_group - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 # 30 minutes - setup_group: - - func: "fetch source" - - func: "prepare resources" - - command: shell.exec - params: - shell: "bash" - script: | - ${PREPARE_SHELL} - set +o xtrace - LOADBALANCED=ON \ - SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ - bash ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh - - command: expansions.update - params: - file: serverless-expansion.yml - teardown_group: - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - set +o xtrace - SERVERLESS_DRIVERS_GROUP=${SERVERLESS_DRIVERS_GROUP} \ - SERVERLESS_API_PUBLIC_KEY=${SERVERLESS_API_PUBLIC_KEY} \ - SERVERLESS_API_PRIVATE_KEY=${SERVERLESS_API_PRIVATE_KEY} \ - SERVERLESS_INSTANCE_NAME=${SERVERLESS_INSTANCE_NAME} \ - bash ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh - tasks: - - ".serverless" tasks: - # Wildcard task. Do you need to find out what tools are available and where? - # Throw it here, and execute this task on all buildvariants - - name: getdata - commands: - - command: shell.exec - type: test - params: - script: | - set -o xtrace - . ${DRIVERS_TOOLS}/.evergreen/download-mongodb.sh || true - get_distro || true - echo $DISTRO - echo $MARCH - echo $OS - uname -a || true - ls /etc/*release* || true - cc --version || true - gcc --version || true - clang --version || true - gcov --version || true - lcov --version || true - llvm-cov --version || true - echo $PATH - ls -la /usr/local/Cellar/llvm/*/bin/ || true - ls -la /usr/local/Cellar/ || true - scan-build --version || true - genhtml --version || true - valgrind --version || true - - - name: "release-mac" - tags: ["release_tag"] - run_on: macos-1014 - commands: - - func: "build release" - - func: "upload release" - - - name: "release-windows" - tags: ["release_tag"] - run_on: windows-64-vsMulti-small - commands: - - func: "build release" - - func: "upload release" - - - name: "release-manylinux" - tags: ["release_tag"] - run_on: ubuntu2004-large - exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). - commands: - - func: "build release" - - func: "upload release" - - - name: "release-old-manylinux" - tags: ["release_tag"] - run_on: ubuntu2004-large - exec_timeout_secs: 216000 # 60 minutes (manylinux task is slow). - commands: - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - set -o xtrace - ${PREPARE_SHELL} - .evergreen/build-manylinux.sh BUILD_WITH_TAG - - func: "upload release" - - - name: "release-combine" - tags: ["release_tag"] - run_on: ubuntu2004-small - depends_on: - - name: "*" - variant: ".release_tag" - patch_optional: true - commands: - - func: "download and merge releases" - -# Standard test tasks {{{ - - - name: "mockupdb" - tags: ["mockupdb"] - commands: - - func: "run mockupdb tests" - - - name: "doctests" - tags: ["doctests"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run doctests" - - - name: "test-3.6-standalone" - tags: ["3.6", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-3.6-replica_set" - tags: ["3.6", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-3.6-sharded_cluster" - tags: ["3.6", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-4.0-standalone" - tags: ["4.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.0-replica_set" - tags: ["4.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.0-sharded_cluster" - tags: ["4.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-4.2-standalone" - tags: ["4.2", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.2-replica_set" - tags: ["4.2", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.2-sharded_cluster" - tags: ["4.2", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-4.4-standalone" - tags: ["4.4", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-4.4-replica_set" - tags: ["4.4", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-4.4-sharded_cluster" - tags: ["4.4", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-5.0-standalone" - tags: ["5.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-5.0-replica_set" - tags: ["5.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-5.0-sharded_cluster" - tags: ["5.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "5.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - name: "test-6.0-standalone" - tags: ["6.0", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-6.0-replica_set" - tags: ["6.0", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-6.0-sharded_cluster" - tags: ["6.0", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "6.0" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-latest-standalone" - tags: ["latest", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-latest-replica_set" - tags: ["latest", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-latest-sharded_cluster" - tags: ["latest", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-rapid-standalone" - tags: ["rapid", "standalone"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "server" - - func: "run tests" - - - name: "test-rapid-replica_set" - tags: ["rapid", "replica_set"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "replica_set" - - func: "run tests" - - - name: "test-rapid-sharded_cluster" - tags: ["rapid", "sharded_cluster"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "rapid" - TOPOLOGY: "sharded_cluster" - - func: "run tests" - - - name: "test-serverless" - tags: ["serverless"] - commands: - - func: "run tests" - - - name: "test-enterprise-auth" - tags: ["enterprise-auth"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run enterprise auth tests" - - - name: "mod-wsgi-standalone" - tags: ["mod_wsgi"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - - func: "run mod_wsgi tests" - - - name: "mod-wsgi-replica-set" - tags: ["mod_wsgi"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "replica_set" - - func: "run mod_wsgi tests" - - - name: "no-server" - tags: ["no-server"] - commands: - - func: "run tests" - vars: - PYTHON_BINARY: /opt/python/3.7/bin/python3 - - - name: "atlas-connect" - tags: ["atlas-connect"] - commands: - - func: "run atlas tests" - - - name: atlas-data-lake-tests - commands: - - func: "bootstrap data lake" - - func: "run tests" - vars: - DATA_LAKE: "true" - - - name: test-ocsp-rsa-valid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-valid-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-invalid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-valid-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-soft-fail - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-valid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-valid-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-delegate-invalid-cert-server-staples - tags: ["ocsp", "ocsp-rsa", "ocsp-staple"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-valid-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-rsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-rsa"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "rsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "rsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-valid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-valid-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-invalid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-valid-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-soft-fail - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-revoked-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-valid-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples - tags: ["ocsp", "ocsp-ecdsa", "ocsp-staple"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-valid-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "true" - - - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-mustStaple-server-does-not-staple - tags: ["ocsp", "ocsp-ecdsa"] - commands: - - func: run-revoked-delegate-ocsp-server - vars: - OCSP_ALGORITHM: "ecdsa" - - func: "bootstrap mongo-orchestration" - vars: - ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json" - - func: run-ocsp-test - vars: - OCSP_ALGORITHM: "ecdsa" - OCSP_TLS_SHOULD_SUCCEED: "false" - - - name: "aws-auth-test-4.4" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "4.4" - - func: "add aws auth variables to file" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-5.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "5.0" - - func: "add aws auth variables to file" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-6.0" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "6.0" - - func: "add aws auth variables to file" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws ECS auth test" - - - name: "aws-auth-test-latest" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "latest" - - func: "add aws auth variables to file" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws ECS auth test" - - name: "aws-auth-test-rapid" - commands: - - func: "bootstrap mongo-orchestration" - vars: - AUTH: "auth" - ORCHESTRATION_FILE: "auth-aws.json" - TOPOLOGY: "server" - VERSION: "rapid" - - func: "add aws auth variables to file" - - func: "run aws auth test with regular aws credentials" - - func: "run aws auth test with assume role credentials" - - func: "run aws auth test with aws credentials as environment variables" - - func: "run aws auth test with aws credentials and session token as environment variables" - - func: "run aws auth test with aws EC2 credentials" - - func: "run aws ECS auth test" - - - name: load-balancer-test - commands: - - func: "bootstrap mongo-orchestration" - vars: - TOPOLOGY: "sharded_cluster" - LOAD_BALANCER: true - - func: "run load-balancer" - - func: "run tests" - - - name: "test-fips-standalone" - tags: ["fips"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "latest" - TOPOLOGY: "server" - PYTHON_BINARY: "/opt/mongodbtoolchain/v3/bin/python3" - - func: "run tests" -# }}} - - name: "coverage-report" - tags: ["coverage"] - depends_on: - # BUILD-3165: We can't use "*" (all tasks) and specify "variant". - # Instead list out all coverage tasks using tags. - - name: ".standalone" - variant: ".coverage_tag" - # Run the coverage task even if some tasks fail. - status: "*" - # Run the coverage task even if some tasks are not scheduled in a patch build. - patch_optional: true - - name: ".replica_set" - variant: ".coverage_tag" - status: "*" - patch_optional: true - - name: ".sharded_cluster" - variant: ".coverage_tag" - status: "*" - patch_optional: true - commands: - - func: "download and merge coverage" - - -axes: - # Choice of distro - - id: platform - display_name: OS - values: - - id: awslinux - display_name: "Amazon Linux 2018 (Enterprise)" - run_on: amazon1-2018-test - batchtime: 10080 # 7 days - variables: - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/linux-64-amazon-ami/master/latest/libmongocrypt.tar.gz - - id: archlinux-test - display_name: "Archlinux" - run_on: archlinux-test - batchtime: 10080 # 7 days - - id: debian92 - display_name: "Debian 9.2" - run_on: debian92-test - batchtime: 10080 # 7 days - variables: - python3_binary: "/opt/python/3.8/bin/python3" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/debian92/master/latest/libmongocrypt.tar.gz - - id: macos-1014 - display_name: "macOS 10.14" - run_on: macos-1014 - variables: - skip_EC2_auth_test: true - skip_ECS_auth_test: true - python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: macos-1100 - display_name: "macOS 11.00" - run_on: macos-1100 - variables: - skip_EC2_auth_test: true - skip_ECS_auth_test: true - python3_binary: /Library/Frameworks/Python.framework/Versions/3.8/bin/python3 - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/macos/master/latest/libmongocrypt.tar.gz - - id: rhel62 - display_name: "RHEL 6.2 (x86_64)" - run_on: rhel62-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-62-64-bit/master/latest/libmongocrypt.tar.gz - # Note that rhel70 isn't currently used since it doesn't - # have a system Python 3. We'll switch to rhel70 as our main test - # system (using /opt/python) in a future change. - - id: rhel70 - display_name: "RHEL 7.0" - run_on: rhel70-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: rhel70-fips - display_name: "RHEL 7.0 FIPS" - run_on: rhel70-fips - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/rhel-70-64-bit/master/latest/libmongocrypt.tar.gz - - id: ubuntu-16.04 - display_name: "Ubuntu 16.04" - run_on: ubuntu1604-test - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1604/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" - - id: ubuntu-18.04 - display_name: "Ubuntu 18.04" - run_on: ubuntu1804-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-64/master/latest/libmongocrypt.tar.gz - python3_binary: "/opt/python/3.8/bin/python3" - - id: ubuntu-20.04 - display_name: "Ubuntu 20.04" - run_on: ubuntu2004-small - batchtime: 10080 # 7 days - variables: - python3_binary: python3 - - id: rhel83-zseries - display_name: "RHEL 8.3 (zSeries)" - run_on: rhel83-zseries-small - batchtime: 10080 # 7 days - - id: rhel81-power8 - display_name: "RHEL 8.1 (POWER8)" - run_on: rhel81-power8-small - batchtime: 10080 # 7 days - - id: rhel82-arm64 - display_name: "RHEL 8.2 (ARM64)" - run_on: rhel82-arm64-small - batchtime: 10080 # 7 days - variables: - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/ubuntu1804-arm64/master/latest/libmongocrypt.tar.gz - - id: windows-64-vsMulti-small - display_name: "Windows 64" - run_on: windows-64-vsMulti-small - batchtime: 10080 # 7 days - variables: - skip_ECS_auth_test: true - python3_binary: "C:/python/Python38/python.exe" - venv_bin_dir: "Scripts" - libmongocrypt_url: https://s3.amazonaws.com/mciuploads/libmongocrypt/windows-test/master/latest/libmongocrypt.tar.gz - - # Test with authentication? - - id: auth - display_name: Authentication - values: - - id: auth - display_name: Auth - variables: - AUTH: "auth" - - id: noauth - display_name: NoAuth - variables: - AUTH: "noauth" - - # Test with SSL? - - id: ssl - display_name: SSL - values: - - id: ssl - display_name: SSL - variables: - SSL: "ssl" - - id: nossl - display_name: NoSSL - variables: - SSL: "nossl" - - # Test with Auth + SSL (combined for convenience)? - - id: auth-ssl - display_name: Auth SSL - values: - - id: auth-ssl - display_name: Auth SSL - variables: - AUTH: "auth" - SSL: "ssl" - - id: noauth-nossl - display_name: NoAuth NoSSL - variables: - AUTH: "noauth" - SSL: "nossl" - - # Choice of wire protocol compression support - - id: compression - display_name: Compression - values: - - id: snappy - display_name: snappy compression - variables: - COMPRESSORS: "snappy" - - id: zlib - display_name: zlib compression - variables: - COMPRESSORS: "zlib" - - id: zstd - display_name: zstd compression - variables: - COMPRESSORS: "zstd" - - # Choice of MongoDB server version - - id: mongodb-version - display_name: "MongoDB" - values: - - id: "3.6" - display_name: "MongoDB 3.6" - variables: - VERSION: "3.6" - - id: "4.0" - display_name: "MongoDB 4.0" - variables: - VERSION: "4.0" - - id: "4.2" - display_name: "MongoDB 4.2" - variables: - VERSION: "4.2" - - id: "4.4" - display_name: "MongoDB 4.4" - variables: - VERSION: "4.4" - - id: "5.0" - display_name: "MongoDB 5.0" - variables: - VERSION: "5.0" - - id: "6.0" - display_name: "MongoDB 6.0" - variables: - VERSION: "6.0" - - id: "latest" - display_name: "MongoDB latest" - variables: - VERSION: "latest" - - id: "rapid" - display_name: "MongoDB rapid" - variables: - VERSION: "rapid" - - # Choice of Python runtime version - - id: python-version - display_name: "Python" - values: - # Note: always display platform with python-version to avoid ambiguous display names. - # Linux - - id: "3.7" - display_name: "Python 3.7" - variables: - PYTHON_BINARY: "/opt/python/3.7/bin/python3" - - id: "3.8" - display_name: "Python 3.8" - variables: - PYTHON_BINARY: "/opt/python/3.8/bin/python3" - - id: "3.9" - display_name: "Python 3.9" - variables: - PYTHON_BINARY: "/opt/python/3.9/bin/python3" - - id: "3.10" - display_name: "Python 3.10" - variables: - PYTHON_BINARY: "/opt/python/3.10/bin/python3" - - id: "pypy3.7" - display_name: "PyPy 3.7" - variables: - PYTHON_BINARY: "/opt/python/pypy3.7/bin/pypy3" - - id: "pypy3.8" - display_name: "PyPy 3.8" - variables: - PYTHON_BINARY: "/opt/python/pypy3.8/bin/pypy3" - - - id: python-version-mac - display_name: "Python" - values: - - id: "system-python3" - display_name: "Python3" - variables: - PYTHON_BINARY: "python3" - - - id: python-version-windows - display_name: "Python" - values: - - id: "3.7" - display_name: "Python 3.7" - variables: - PYTHON_BINARY: "C:/python/Python37/python.exe" - - id: "3.8" - display_name: "Python 3.8" - variables: - PYTHON_BINARY: "C:/python/Python38/python.exe" - - id: "3.9" - display_name: "Python 3.9" - variables: - PYTHON_BINARY: "C:/python/Python39/python.exe" - - id: "3.10" - display_name: "Python 3.10" - variables: - PYTHON_BINARY: "C:/python/Python310/python.exe" - - - id: python-version-windows-32 - display_name: "Python" - values: - - id: "3.7" - display_name: "32-bit Python 3.7" - variables: - PYTHON_BINARY: "C:/python/32/Python37/python.exe" - - id: "3.8" - display_name: "32-bit Python 3.8" - variables: - PYTHON_BINARY: "C:/python/32/Python38/python.exe" - - id: "3.9" - display_name: "32-bit Python 3.9" - variables: - PYTHON_BINARY: "C:/python/32/Python39/python.exe" - - id: "3.10" - display_name: "32-bit Python 3.10" - variables: - PYTHON_BINARY: "C:/python/32/Python310/python.exe" - - # Choice of mod_wsgi version - - id: mod-wsgi-version - display_name: "mod_wsgi version" - values: - - id: "3" - display_name: "mod_wsgi 3.5" - variables: - MOD_WSGI_VERSION: "3" - - id: "4" - display_name: "mod_wsgi 4.x" - variables: - MOD_WSGI_VERSION: "4" - - # Choice of Python async framework - - id: green-framework - display_name: "Green Framework" - values: - - id: "eventlet" - display_name: "Eventlet" - variables: - GREEN_FRAMEWORK: "eventlet" - - id: "gevent" - display_name: "Gevent" - variables: - GREEN_FRAMEWORK: "gevent" - - # Install and use the driver's C-extensions? - - id: c-extensions - display_name: "C Extensions" - values: - - id: "without-c-extensions" - display_name: "Without C Extensions" - variables: - C_EXTENSIONS: "--no_ext" - - id: "with-c-extensions" - display_name: "With C Extensions" - variables: - C_EXTENSIONS: "" - - # Choice of MongoDB storage engine - - id: storage-engine - display_name: Storage - values: - - id: mmapv1 - display_name: MMAPv1 - variables: - STORAGE_ENGINE: "mmapv1" - - id: inmemory - display_name: InMemory - variables: - STORAGE_ENGINE: "inmemory" - - # Run with test commands disabled on server? - - id: disableTestCommands - display_name: Disable test commands - values: - - id: disabled - display_name: disabled - variables: - DISABLE_TEST_COMMANDS: "1" - - # Generate coverage report? - - id: coverage - display_name: "Coverage" - values: - - id: "coverage" - display_name: "Coverage" - tags: ["coverage_tag"] - variables: - COVERAGE: "coverage" - - # Run encryption tests? - - id: encryption - display_name: "Encryption" - values: - - id: "encryption" - display_name: "Encryption" - tags: ["encryption_tag"] - variables: - test_encryption: true - batchtime: 10080 # 7 days - - id: "encryption_crypt_shared" - display_name: "Encryption shared lib" - tags: ["encryption_tag"] - variables: - test_encryption: true - test_crypt_shared: true - batchtime: 10080 # 7 days - - # Run pyopenssl tests? - - id: pyopenssl - display_name: "PyOpenSSL" - values: - - id: "enabled" - display_name: "PyOpenSSL" - variables: - test_pyopenssl: true - batchtime: 10080 # 7 days - - - id: versionedApi - display_name: "versionedApi" - values: - # Test against a cluster with requireApiVersion=1. - - id: "requireApiVersion1" - display_name: "requireApiVersion1" - tags: [ "versionedApi_tag" ] - variables: - # REQUIRE_API_VERSION is set to make drivers-evergreen-tools - # start a cluster with the requireApiVersion parameter. - REQUIRE_API_VERSION: "1" - # MONGODB_API_VERSION is the apiVersion to use in the test suite. - MONGODB_API_VERSION: "1" - # Test against a cluster with acceptApiVersion2 but without - # requireApiVersion, and don't automatically add apiVersion to - # clients created in the test suite. - - id: "acceptApiVersion2" - display_name: "acceptApiVersion2" - tags: [ "versionedApi_tag" ] - variables: - ORCHESTRATION_FILE: "versioned-api-testing.json" - - # Run load balancer tests? - - id: loadbalancer - display_name: "Load Balancer" - values: - - id: "enabled" - display_name: "Load Balancer" - variables: - test_loadbalancer: true - batchtime: 10080 # 7 days - - - id: serverless - display_name: "Serverless" - values: - - id: "enabled" - display_name: "Serverless" - variables: - test_serverless: true - batchtime: 10080 # 7 days + - name: resync_specs + commands: + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] + args: + - .evergreen/scripts/resync-all-specs.sh + working_dir: src buildvariants: -- matrix_name: "tests-all" - matrix_spec: - platform: - # OSes that support versions of MongoDB>=2.6 with SSL. - - awslinux - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - -- matrix_name: "tests-archlinux" - matrix_spec: - platform: - # Archlinux supports MongoDB without SSL. - # MongoDB 4.2 drops support for archlinux (generic linux builds). - - archlinux-test - auth: "*" - ssl: "nossl" - display_name: "${platform} ${auth} ${ssl}" - tasks: - - ".4.0" - - ".3.6" - -- matrix_name: "tests-fips" - matrix_spec: - platform: - - rhel70-fips - auth: "auth" - ssl: "ssl" - display_name: "${platform} ${auth} ${ssl}" - tasks: - - "test-fips-standalone" - -- matrix_name: "test-macos" - matrix_spec: - platform: - # MacOS introduced SSL support with MongoDB >= 3.2. - # Older server versions (2.6, 3.0) are supported without SSL. - - macos-1014 - auth: "*" - ssl: "*" - exclude_spec: - # No point testing with SSL without auth. - - platform: macos-1014 - auth: "noauth" - ssl: "ssl" - display_name: "${platform} ${auth} ${ssl}" - tasks: - - ".latest" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - -- matrix_name: "test-macos-encryption" - matrix_spec: - platform: - - macos-1100 - auth: "auth" - ssl: "nossl" - encryption: "*" - display_name: "${encryption} ${platform} ${auth} ${ssl}" - tasks: &encryption-server-versions - - ".rapid" - - ".latest" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - rules: &encryption-exclude-rules - - if: - platform: "*" - auth: "*" - ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - -# Test one server version with zSeries, POWER8, and ARM. -- matrix_name: "test-different-cpu-architectures" - matrix_spec: - platform: - - rhel83-zseries # Added in 5.0.8 (SERVER-44074) - - rhel81-power8 # Added in 4.2.7 (SERVER-44072) - - rhel82-arm64 # Added in 4.4.2 (SERVER-48282) - auth-ssl: "*" - display_name: "${platform} ${auth-ssl}" - tasks: - - ".6.0" - -- matrix_name: "tests-python-version-ubuntu18-test-ssl" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - auth-ssl: "*" - coverage: "*" - display_name: "${python-version} ${platform} ${auth-ssl} ${coverage}" - tasks: &all-server-versions - - ".rapid" - - ".latest" - - ".6.0" - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - ".3.6" - -- matrix_name: "tests-pyopenssl" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - auth: "*" - ssl: "ssl" - pyopenssl: "*" - # Only test "noauth" with Python 3.7. - exclude_spec: - platform: ubuntu-18.04 - python-version: ["3.8", "3.9", "3.10", "pypy3.7", "pypy3.8"] - auth: "noauth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version} ${auth}" - tasks: - - '.replica_set' - # Test standalone and sharded only on 5.0 and later. - - '.5.0' - -- matrix_name: "tests-pyopenssl-macOS" - matrix_spec: - platform: macos-1014 - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${auth}" - tasks: - - '.replica_set' - -- matrix_name: "tests-pyopenssl-windows" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows: "*" - auth: "auth" - ssl: "ssl" - pyopenssl: "*" - display_name: "PyOpenSSL ${platform} ${python-version-windows} ${auth}" - tasks: - - '.replica_set' - -- matrix_name: "tests-python-version-ubuntu18-test-encryption" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - auth-ssl: noauth-nossl -# TODO: dependency error for 'coverage-report' task: -# dependency tests-python-version-rhel62-test-encryption_.../test-2.6-standalone is not present in the project config -# coverage: "*" - encryption: "*" - display_name: "${encryption} ${python-version} ${platform} ${auth-ssl}" - tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - - -- matrix_name: "tests-python-version-ubuntu18-without-c-extensions" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - c-extensions: without-c-extensions - auth-ssl: noauth-nossl - coverage: "*" - exclude_spec: - # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 - python-version: ["pypy3.7", "pypy3.8"] - c-extensions: "*" - auth-ssl: "*" - coverage: "*" - display_name: "${c-extensions} ${python-version} ${platform} ${auth} ${ssl} ${coverage}" - tasks: *all-server-versions - -- matrix_name: "tests-python-version-ubuntu18-compression" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - c-extensions: "*" - compression: "*" - exclude_spec: - # These interpreters are always tested without extensions. - - platform: ubuntu-18.04 - python-version: ["pypy3.7", "pypy3.8"] - c-extensions: "with-c-extensions" - compression: "*" - display_name: "${compression} ${c-extensions} ${python-version} ${platform}" - tasks: - - "test-latest-standalone" - - "test-5.0-standalone" - - "test-4.4-standalone" - - "test-4.2-standalone" - rules: - # Server versions 3.6 and 4.0 support snappy and zlib. - - if: - python-version: "*" - c-extensions: "*" - compression: ["snappy", "zlib"] - then: - add_tasks: - - "test-4.0-standalone" - - "test-3.6-standalone" - -- matrix_name: "tests-python-version-green-framework-ubuntu18" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - green-framework: "*" - auth-ssl: "*" - exclude_spec: - # Don't test green frameworks on these Python versions. - - platform: ubuntu-18.04 - python-version: ["pypy3.7", "pypy3.8"] - green-framework: "*" - auth-ssl: "*" - display_name: "${green-framework} ${python-version} ${platform} ${auth-ssl}" - tasks: *all-server-versions - -- matrix_name: "tests-windows-python-version" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows: "*" - auth-ssl: "*" - display_name: "${platform} ${python-version-windows} ${auth-ssl}" - tasks: *all-server-versions - -- matrix_name: "tests-windows-python-version-32-bit" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows-32: "*" - auth-ssl: "*" - display_name: "${platform} ${python-version-windows-32} ${auth-ssl}" - tasks: *all-server-versions - -- matrix_name: "tests-python-version-supports-openssl-102-test-ssl" - matrix_spec: - platform: awslinux - # Python 3.10+ requires OpenSSL 1.1.1+ - python-version: ["3.7", "3.8", "3.9", "pypy3.7", "pypy3.8"] - auth-ssl: "*" - display_name: "OpenSSL 1.0.2 ${python-version} ${platform} ${auth-ssl}" - tasks: - - ".5.0" - -- matrix_name: "tests-windows-encryption" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows: "*" - auth-ssl: "*" - encryption: "*" - display_name: "${encryption} ${platform} ${python-version-windows} ${auth-ssl}" - tasks: *encryption-server-versions - rules: - - if: - platform: "*" - python-version-windows: "*" - auth-ssl: "*" - encryption: [ "encryption_crypt_shared" ] - then: - remove_tasks: - - ".5.0" - - ".4.4" - - ".4.2" - - ".4.0" - -# Storage engine tests on Ubuntu 18.04 (x86_64) with Python 3.7. -- matrix_name: "tests-storage-engines" - matrix_spec: - platform: ubuntu-18.04 - storage-engine: "*" - python-version: 3.7 - display_name: "Storage ${storage-engine} ${python-version} ${platform}" - rules: - - if: - platform: ubuntu-18.04 - storage-engine: ["inmemory"] - python-version: "*" - then: - add_tasks: - - "test-latest-standalone" - - "test-5.0-standalone" - - "test-4.4-standalone" - - "test-4.2-standalone" - - "test-4.0-standalone" - - "test-3.6-standalone" - - if: - # MongoDB 4.2 drops support for MMAPv1 - platform: ubuntu-18.04 - storage-engine: ["mmapv1"] - python-version: "*" - then: - add_tasks: - - "test-4.0-standalone" - - "test-4.0-replica_set" - - "test-3.6-standalone" - - "test-3.6-replica_set" - -# enableTestCommands=0 tests on Ubuntu18 (x86_64) with Python 3.7. -- matrix_name: "test-disableTestCommands" - matrix_spec: - platform: ubuntu-18.04 - disableTestCommands: "*" - python-version: "3.7" - display_name: "Disable test commands ${python-version} ${platform}" - tasks: - - ".latest" - -- matrix_name: "test-linux-enterprise-auth" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version}" - tasks: - - name: "test-enterprise-auth" - -- matrix_name: "tests-windows-enterprise-auth" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows: "*" - auth: "auth" - display_name: "Enterprise ${auth} ${platform} ${python-version-windows}" - tasks: - - name: "test-enterprise-auth" - -- matrix_name: "tests-mod-wsgi" - matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.7", "3.8", "3.9", "3.10"] - mod-wsgi-version: "*" - exclude_spec: - # mod-wsgi 3.5 won't build against CPython 3.8+ - - platform: ubuntu-18.04 - python-version: ["3.8", "3.9", "3.10"] - mod-wsgi-version: "3" - display_name: "${mod-wsgi-version} ${python-version} ${platform}" - tasks: - - name: "mod-wsgi-standalone" - - name: "mod-wsgi-replica-set" - -- matrix_name: "mockupdb-tests" - matrix_spec: - platform: ubuntu-18.04 - python-version: 3.7 - display_name: "MockupDB Tests" - tasks: - - name: "mockupdb" - -- matrix_name: "tests-doctests" - matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.8"] - display_name: "Doctests ${python-version} ${platform}" - tasks: - - name: "doctests" - -- name: "no-server" - display_name: "No server test" - run_on: - - ubuntu1804-test - tasks: - - name: "no-server" - expansions: - set_xtrace_on: on - -- name: "Coverage Report" - display_name: "Coverage Report" - run_on: - - ubuntu1804-test - tasks: - - name: "coverage-report" - expansions: - set_xtrace_on: on - -- matrix_name: "atlas-connect" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - display_name: "Atlas connect ${python-version} ${platform}" - tasks: - - name: "atlas-connect" - -- matrix_name: "serverless" - matrix_spec: - platform: ubuntu-18.04 - python-version: "*" - auth-ssl: auth-ssl - serverless: "*" - display_name: "Serverless ${python-version} ${platform}" - tasks: - - "serverless_task_group" - -- matrix_name: "data-lake-spec-tests" - matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.7", "3.10"] - auth: "auth" - c-extensions: "*" - display_name: "Atlas Data Lake ${python-version} ${c-extensions}" - tasks: - - name: atlas-data-lake-tests - -- matrix_name: "stable-api-tests" - matrix_spec: - platform: ubuntu-18.04 - python-version: ["3.7", "3.10"] - auth: "auth" - versionedApi: "*" - display_name: "Versioned API ${versionedApi} ${python-version}" - batchtime: 10080 # 7 days - tasks: - # Versioned API was introduced in MongoDB 4.7 - - "test-latest-standalone" - - "test-5.0-standalone" - -- matrix_name: "ocsp-test" - matrix_spec: - # OCSP stapling is not supported on Ubuntu 18.04. - # See https://jira.mongodb.org/browse/SERVER-51364. - platform: ubuntu-20.04 - python-version: ["3.7", "3.10", "pypy3.7", "pypy3.8"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${python-version} ${mongodb-version}" - batchtime: 20160 # 14 days - tasks: - - name: ".ocsp" - -- matrix_name: "ocsp-test-windows" - matrix_spec: - platform: windows-64-vsMulti-small - python-version-windows: ["3.7", "3.10"] - mongodb-version: ["4.4", "5.0", "6.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${python-version-windows} ${mongodb-version}" - batchtime: 20160 # 14 days - tasks: - # Windows MongoDB servers do not staple OCSP responses and only support RSA. - - name: ".ocsp-rsa !.ocsp-staple" - -- matrix_name: "ocsp-test-macos" - matrix_spec: - platform: macos-1014 - mongodb-version: ["4.4", "5.0", "6.0", "latest"] - auth: "noauth" - ssl: "ssl" - display_name: "OCSP test ${platform} ${mongodb-version}" - batchtime: 20160 # 14 days - tasks: - # macOS MongoDB servers do not staple OCSP responses and only support RSA. - - name: ".ocsp-rsa !.ocsp-staple" - -- matrix_name: "aws-auth-test" - matrix_spec: - platform: [ubuntu-18.04] - python-version: ["3.7"] - display_name: "MONGODB-AWS Auth ${platform} ${python-version}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-rapid" - -- matrix_name: "aws-auth-test-mac" - matrix_spec: - platform: [macos-1014] - python-version-mac: ["system-python3"] - display_name: "MONGODB-AWS Auth ${platform} ${python-version-mac}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-rapid" -- matrix_name: "aws-auth-test-windows" - matrix_spec: - platform: [windows-64-vsMulti-small] - python-version-windows: "*" - display_name: "MONGODB-AWS Auth ${platform} ${python-version-windows}" - tasks: - - name: "aws-auth-test-4.4" - - name: "aws-auth-test-5.0" - - name: "aws-auth-test-latest" - - name: "aws-auth-test-6.0" - - name: "aws-auth-test-rapid" - -- matrix_name: "load-balancer" - matrix_spec: - platform: ubuntu-18.04 - mongodb-version: ["rapid", "latest", "6.0"] - auth-ssl: "*" - python-version: "*" - loadbalancer: "*" - display_name: "Load Balancer ${platform} ${python-version} ${mongodb-version} ${auth-ssl}" - tasks: - - name: "load-balancer-test" - -- name: Release - display_name: Release - batchtime: 20160 # 14 days - tags: ["release_tag"] - tasks: - - ".release_tag" - - # Platform notes - # i386 builds of OpenSSL or Cyrus SASL are not available - # Ubuntu16.04 ppc64le is only supported by MongoDB 3.4+ - # Ubuntu16.04 aarch64 is only supported by MongoDB 3.4+ - # Ubuntu16.04 s390x is only supported by MongoDB 3.4+ - # Ubuntu16.04 (x86) only supports MongoDB 3.2+ - # Debian 8.1 only supports MongoDB 3.4+ - # SUSE12 s390x is only supported by MongoDB 3.4+ - # No enterprise build for Archlinux, SSL not available - # RHEL 7 only supports 2.6+ - # RHEL 7.1 ppc64le is only supported by MongoDB 3.2+ - # RHEL 7.2 s390x is only supported by MongoDB 3.4+ - # Solaris MongoDB SSL builds are not available - # Darwin MongoDB SSL builds are not available for 2.6 - # SUSE12 x86_64 is only supported by MongoDB 3.2+ - # vim: set et sw=2 ts=2 : + - name: resync_specs + display_name: "Resync Specs" + run_on: rhel80-small + cron: '0 16 * * MON' + patchable: true + tasks: + - name: resync_specs diff --git a/.evergreen/generated_configs/functions.yml b/.evergreen/generated_configs/functions.yml new file mode 100644 index 0000000000..ce95648849 --- /dev/null +++ b/.evergreen/generated_configs/functions.yml @@ -0,0 +1,309 @@ +functions: + # Assume ec2 role + assume ec2 role: + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + duration_seconds: 3600 + + # Attach benchmark test results + attach benchmark test results: + - command: attach.results + params: + file_location: src/report.json + + # Cleanup + cleanup: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/cleanup.sh + working_dir: src + type: test + + # Download and merge coverage + download and merge coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/download-and-merge-coverage.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/combine-coverage.sh + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/upload-coverage-report.sh + - ${bucket_name} + - ${revision} + - ${version_id} + working_dir: src + silent: true + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/htmlcov/index.html + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/htmlcov/index.html + permissions: public-read + content_type: text/html + display_name: Coverage Report HTML + optional: "true" + type: setup + + # Fetch source + fetch source: + - command: git.get_project + params: + directory: src + + # Run server + run server: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-server + - ${TEST_NAME} + working_dir: src + include_expansions_in_env: + - VERSION + - TOPOLOGY + - AUTH + - SSL + - ORCHESTRATION_FILE + - PYTHON_BINARY + - PYTHON_VERSION + - STORAGE_ENGINE + - REQUIRE_API_VERSION + - DRIVERS_TOOLS + - TEST_CRYPT_SHARED + - AUTH_AWS + - LOAD_BALANCER + - LOCAL_ATLAS + - NO_EXT + type: test + - command: expansions.update + params: + file: ${DRIVERS_TOOLS}/mo-expansion.yml + + # Run tests + run tests: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - setup-tests + - ${TEST_NAME} + - ${SUB_TEST_NAME} + working_dir: src + include_expansions_in_env: + - AUTH + - SSL + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + - COVERAGE + - PYTHON_BINARY + - LIBMONGOCRYPT_URL + - MONGODB_URI + - PYTHON_VERSION + - DISABLE_TEST_COMMANDS + - GREEN_FRAMEWORK + - NO_EXT + - COMPRESSORS + - MONGODB_API_VERSION + - REQUIRE_API_VERSION + - DEBUG_LOG + - DISABLE_FLAKY + - ORCHESTRATION_FILE + - OCSP_SERVER_TYPE + - VERSION + - IS_WIN32 + - REQUIRE_FIPS + - TEST_MIN_DEPS + type: test + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - run-tests + working_dir: src + type: test + + # Send dashboard data + send dashboard data: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission-setup.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test + - command: expansions.update + params: + file: src/expansion.yml + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/perf-submission.sh + working_dir: src + include_expansions_in_env: + - requester + - revision_order_id + - project_id + - version_id + - build_variant + - parsed_order_id + - task_name + - task_id + - execution + - is_mainline + type: test + + # Setup system + setup system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/setup-system.sh + working_dir: src + include_expansions_in_env: + - is_patch + - project + - version_id + type: test + - command: expansions.update + params: + file: src/expansion.yml + + # Teardown system + teardown system: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/just.sh + - teardown-tests + working_dir: src + type: test + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/teardown.sh + working_dir: src + type: test + + # Upload coverage + upload coverage: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: s3.put + params: + remote_file: coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name} + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: src/.coverage + permissions: public-read + content_type: text/html + display_name: Raw Coverage Report + optional: "true" + type: setup + + # Upload mo artifacts + upload mo artifacts: + - command: ec2.assume_role + params: + role_arn: ${assume_role_arn} + type: setup + - command: archive.targz_pack + params: + target: mongo-coredumps.tgz + source_dir: ./ + include: + - ./**.core + - ./**.mdmp + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: mongo-coredumps.tgz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: Core Dumps - Execution + optional: "true" + type: setup + - command: s3.put + params: + remote_file: ${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + bucket: ${bucket_name} + local_file: ${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: drivers-tools-logs.tar.gz + optional: "true" + type: setup + + # Upload test results + upload test results: + - command: attach.results + params: + file_location: ${DRIVERS_TOOLS}/results.json + - command: attach.xunit_results + params: + file: src/xunit-results/TEST-*.xml diff --git a/.evergreen/generated_configs/tasks.yml b/.evergreen/generated_configs/tasks.yml new file mode 100644 index 0000000000..855cbefef8 --- /dev/null +++ b/.evergreen/generated_configs/tasks.yml @@ -0,0 +1,5116 @@ +tasks: + # Aws lambda tests + - name: test-aws-lambda-deployed + commands: + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: aws_lambda + tags: [aws_lambda] + + # Aws tests + - name: test-auth-aws-4.4-regular-python3.10 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "4.4" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: regular + PYTHON_VERSION: "3.10" + tags: [auth-aws, auth-aws-regular] + - name: test-auth-aws-5.0-assume-role-python3.11 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "5.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: assume-role + PYTHON_VERSION: "3.11" + tags: [auth-aws, auth-aws-assume-role] + - name: test-auth-aws-6.0-ec2-python3.12 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "6.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ec2 + PYTHON_VERSION: "3.12" + tags: [auth-aws, auth-aws-ec2] + - name: test-auth-aws-7.0-env-creds-python3.13 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "7.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: env-creds + PYTHON_VERSION: "3.13" + tags: [auth-aws, auth-aws-env-creds] + - name: test-auth-aws-8.0-session-creds-python3.14t + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: "8.0" + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: session-creds + PYTHON_VERSION: 3.14t + tags: [auth-aws, auth-aws-session-creds, free-threaded] + - name: test-auth-aws-rapid-web-identity-python3.14 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + PYTHON_VERSION: "3.14" + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-rapid-web-identity-session-name-python3.14 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: rapid + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: web-identity + AWS_ROLE_SESSION_NAME: test + PYTHON_VERSION: "3.14" + tags: [auth-aws, auth-aws-web-identity] + - name: test-auth-aws-latest-ecs-python3.10 + commands: + - func: run server + vars: + AUTH_AWS: "1" + VERSION: latest + - func: assume ec2 role + - func: run tests + vars: + TEST_NAME: auth_aws + SUB_TEST_NAME: ecs + PYTHON_VERSION: "3.10" + tags: [auth-aws, auth-aws-ecs] + + # Backport pr tests + - name: backport-pr + commands: + - func: assume ec2 role + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh + - mongodb + - mongo-python-driver + - ${github.amrom.workers.devmit} + working_dir: src + include_expansions_in_env: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_SESSION_TOKEN + type: test + + # Coverage report tests + - name: coverage-report + commands: + - func: download and merge coverage + depends_on: [{ name: .server-version, variant: .coverage_tag, status: "*", patch_optional: true }] + tags: [coverage, pr] + + # Getdata tests + - name: getdata + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/run-getdata.sh + working_dir: src + type: test + + # Import time tests + - name: check-import-time + commands: + - command: subprocess.exec + params: + binary: bash + args: + - .evergreen/scripts/check-import-time.sh + - ${revision} + - ${github.amrom.workers.devmit} + working_dir: src + type: test + tags: [pr] + + # Kms tests + - name: test-gcpkms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp + tags: [] + - name: test-gcpkms-fail + commands: + - func: run server + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: gcp-fail + tags: [pr] + - name: test-azurekms + commands: + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure + tags: [] + - name: test-azurekms-fail + commands: + - func: run server + - func: run tests + vars: + TEST_NAME: kms + SUB_TEST_NAME: azure-fail + tags: [pr] + + # Min deps tests + - name: test-min-deps-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_MIN_DEPS: "1" + tags: [test-min-deps, standalone-noauth-nossl] + - name: test-min-deps-python3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_MIN_DEPS: "1" + tags: [test-min-deps, replica_set-noauth-ssl] + - name: test-min-deps-python3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_MIN_DEPS: "1" + tags: [test-min-deps, sharded_cluster-auth-ssl] + + # Mod wsgi tests + - name: mod-wsgi-replica-set-python3.10 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.10" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.10" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.11 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.11" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.11" + tags: [mod_wsgi, pr] + - name: mod-wsgi-replica-set-python3.12 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.12" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: standalone + PYTHON_VERSION: "3.12" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.13 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.13" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.13" + tags: [mod_wsgi, pr] + - name: mod-wsgi-embedded-mode-replica-set-python3.14 + commands: + - func: run server + vars: + TOPOLOGY: replica_set + PYTHON_VERSION: "3.14" + - func: run tests + vars: + TEST_NAME: mod_wsgi + SUB_TEST_NAME: embedded + PYTHON_VERSION: "3.14" + tags: [mod_wsgi, pr] + + # No orchestration tests + - name: test-no-orchestration-python3.10 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.10" + tags: [test-no-orchestration, python-3.10] + - name: test-no-orchestration-python3.14 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: "3.14" + tags: [test-no-orchestration, python-3.14] + - name: test-no-orchestration-pypy3.10 + commands: + - func: assume ec2 role + - func: run tests + vars: + PYTHON_VERSION: pypy3.10 + tags: [test-no-orchestration, python-pypy3.10] + + # No toolchain tests + - name: test-no-toolchain-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + TEST_NAME: default_sync + tags: [test-no-toolchain, standalone-noauth-nossl] + - name: test-no-toolchain-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + TEST_NAME: default_async + tags: [test-no-toolchain, replica_set-noauth-ssl] + - name: test-no-toolchain-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + TEST_NAME: default_sync + tags: [test-no-toolchain, sharded_cluster-auth-ssl] + + # Ocsp tests + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-soft-fail-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-soft-fail-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-soft-fail-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-soft-fail-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-soft-fail-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-soft-fail-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-soft-fail-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-ecdsa + - "4.4" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-ecdsa + - "5.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-ecdsa + - "6.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-ecdsa + - "7.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-ecdsa + - "8.0" + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-ecdsa + - rapid + - ocsp-staple + - name: test-ocsp-ecdsa-delegate-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-ecdsa + - latest + - ocsp-staple + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-ecdsa, "4.4"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-ecdsa, "5.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-ecdsa, "6.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-ecdsa, "7.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-ecdsa, "8.0"] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-ecdsa, rapid] + - name: test-ocsp-ecdsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: ecdsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-ecdsa, latest] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-valid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-invalid-cert-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-soft-fail-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-soft-fail-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-soft-fail-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-soft-fail-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-soft-fail-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-soft-fail-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-soft-fail-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-valid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: valid-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: + - ocsp + - ocsp-rsa + - "4.4" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: + - ocsp + - ocsp-rsa + - "5.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: + - ocsp + - ocsp-rsa + - "6.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: + - ocsp + - ocsp-rsa + - "7.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: + - ocsp + - ocsp-rsa + - "8.0" + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: + - ocsp + - ocsp-rsa + - rapid + - ocsp-staple + - name: test-ocsp-rsa-delegate-invalid-cert-server-staples-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: + - ocsp + - ocsp-rsa + - latest + - ocsp-staple + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-delegate-malicious-invalid-cert-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: revoked-delegate + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v4.4-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "4.4" + tags: [ocsp, ocsp-rsa, "4.4"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v5.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "5.0" + tags: [ocsp, ocsp-rsa, "5.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v6.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "6.0" + tags: [ocsp, ocsp-rsa, "6.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v7.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "7.0" + tags: [ocsp, ocsp-rsa, "7.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-v8.0-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: "8.0" + tags: [ocsp, ocsp-rsa, "8.0"] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-rapid-python3.10 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.10" + VERSION: rapid + tags: [ocsp, ocsp-rsa, rapid] + - name: test-ocsp-rsa-malicious-no-responder-muststaple-server-does-not-staple-latest-python3.14 + commands: + - func: run tests + vars: + ORCHESTRATION_FILE: rsa-basic-tls-ocsp-mustStaple-disableStapling.json + OCSP_SERVER_TYPE: no-responder + TEST_NAME: ocsp + PYTHON_VERSION: "3.14" + VERSION: latest + tags: [ocsp, ocsp-rsa, latest] + + # Oidc tests + - name: test-auth-oidc-default + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: default + tags: [auth_oidc] + - name: test-auth-oidc-azure + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: azure + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gcp + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gcp + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-eks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: eks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-aks + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: aks + tags: [auth_oidc, auth_oidc_remote] + - name: test-auth-oidc-gke + commands: + - func: run tests + vars: + TEST_NAME: auth_oidc + SUB_TEST_NAME: gke + tags: [auth_oidc, auth_oidc_remote] + + # Perf tests + - name: perf-8.0-standalone-ssl + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-ssl-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: ssl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: sync + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + - name: perf-8.0-standalone-async + commands: + - func: run server + vars: + VERSION: v8.0-perf + SSL: nossl + - func: run tests + vars: + TEST_NAME: perf + SUB_TEST_NAME: async + - func: attach benchmark test results + - func: send dashboard data + tags: [perf] + + # Search index tests + - name: test-search-index-helpers + commands: + - func: assume ec2 role + - func: run server + vars: + TEST_NAME: search_index + - func: run tests + vars: + TEST_NAME: search_index + tags: [search_index] + + # Server version tests + - name: test-server-version-python3.14t-async-auth-nossl-replica-set + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - server-version + - python-3.14t + - replica_set-auth-nossl + - async + - free-threaded + - name: test-server-version-python3.13-sync-auth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - replica_set-auth-nossl + - sync + - name: test-server-version-python3.12-async-auth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - replica_set-auth-ssl + - async + - name: test-server-version-python3.11-sync-auth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - replica_set-auth-ssl + - sync + - name: test-server-version-python3.11-async-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - replica_set-noauth-nossl + - async + - pr + - name: test-server-version-python3.10-sync-noauth-nossl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - replica_set-noauth-nossl + - sync + - pr + - name: test-server-version-pypy3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - replica_set-noauth-ssl + - async + - name: test-server-version-python3.14-sync-noauth-ssl-replica-set-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - server-version + - python-3.14 + - replica_set-noauth-ssl + - sync + - name: test-server-version-python3.14-async-auth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-nossl + - async + - name: test-server-version-python3.14t-sync-auth-nossl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-nossl + - sync + - free-threaded + - name: test-server-version-python3.10-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - async + - pr + - name: test-server-version-python3.11-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.12-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.13-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.14-async-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.14t-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - async + - free-threaded + - name: test-server-version-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - name: test-server-version-python3.10-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - pr + - name: test-server-version-python3.11-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - server-version + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.13-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - server-version + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14-sync-auth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - server-version + - python-3.14 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.14t-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - sharded_cluster-auth-ssl + - sync + - free-threaded + - name: test-server-version-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-server-version-python3.12-async-noauth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - server-version + - python-3.12 + - sharded_cluster-noauth-nossl + - async + - name: test-server-version-python3.11-sync-noauth-nossl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - server-version + - python-3.11 + - sharded_cluster-noauth-nossl + - sync + - name: test-server-version-python3.10-async-noauth-ssl-sharded-cluster-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - sharded_cluster-noauth-ssl + - async + - name: test-server-version-pypy3.10-sync-noauth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: sharded_cluster + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - sharded_cluster-noauth-ssl + - sync + - name: test-server-version-python3.13-async-auth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - server-version + - python-3.13 + - standalone-auth-nossl + - async + - name: test-server-version-python3.12-sync-auth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - server-version + - python-3.12 + - standalone-auth-nossl + - sync + - name: test-server-version-python3.11-async-auth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - server-version + - python-3.11 + - standalone-auth-ssl + - async + - name: test-server-version-python3.10-sync-auth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - server-version + - python-3.10 + - standalone-auth-ssl + - sync + - name: test-server-version-python3.10-async-noauth-nossl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - server-version + - python-3.10 + - standalone-noauth-nossl + - async + - pr + - name: test-server-version-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - server-version + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pr + - name: test-server-version-python3.14-async-noauth-ssl-standalone-cov + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + COVERAGE: "1" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - server-version + - python-3.14 + - standalone-noauth-ssl + - async + - name: test-server-version-python3.14t-sync-noauth-ssl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: standalone + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - server-version + - python-3.14t + - standalone-noauth-ssl + - sync + - free-threaded + + # Standard tests + - name: test-standard-v4.2-python3.11-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.11 + - replica_set-noauth-ssl + - sync + - name: test-standard-v4.2-python3.14-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.14 + - replica_set-noauth-ssl + - sync + - name: test-standard-v4.2-python3.12-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.12 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v4.2-pypy3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - sync + - pypy + - name: test-standard-v4.2-python3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.10 + - standalone-noauth-nossl + - sync + - name: test-standard-v4.2-python3.14t-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-4.2 + - python-3.14t + - standalone-noauth-nossl + - sync + - free-threaded + - name: test-standard-v4.4-python3.11-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.11 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.14-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.14 + - replica_set-noauth-ssl + - async + - name: test-standard-v4.4-python3.12-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.12 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v4.4-pypy3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - async + - pypy + - name: test-standard-v4.4-python3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.10 + - standalone-noauth-nossl + - async + - name: test-standard-v4.4-python3.14t-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-4.4 + - python-3.14t + - standalone-noauth-nossl + - async + - free-threaded + - name: test-standard-v5.0-python3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.10 + - replica_set-noauth-ssl + - sync + - name: test-standard-v5.0-python3.14t-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.14t + - replica_set-noauth-ssl + - sync + - free-threaded + - name: test-standard-v5.0-python3.11-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.11 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.14-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.14 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v5.0-python3.13-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-5.0 + - python-3.13 + - standalone-noauth-nossl + - sync + - name: test-standard-v6.0-python3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.10 + - replica_set-noauth-ssl + - async + - name: test-standard-v6.0-python3.14t-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.14t + - replica_set-noauth-ssl + - async + - free-threaded + - name: test-standard-v6.0-python3.11-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.11 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v6.0-python3.14-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v6.0-python3.13-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-6.0 + - python-3.13 + - standalone-noauth-nossl + - async + - name: test-standard-v7.0-python3.13-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.13 + - replica_set-noauth-ssl + - sync + - name: test-standard-v7.0-python3.10-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.10 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-v7.0-python3.14t-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.14t + - sharded_cluster-auth-ssl + - sync + - free-threaded + - name: test-standard-v7.0-python3.12-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-3.12 + - standalone-noauth-nossl + - sync + - name: test-standard-v7.0-pypy3.10-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-7.0 + - python-pypy3.10 + - standalone-noauth-nossl + - sync + - pypy + - name: test-standard-v8.0-python3.13-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.13 + - replica_set-noauth-ssl + - async + - name: test-standard-v8.0-python3.10-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.10" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.10 + - sharded_cluster-auth-ssl + - async + - name: test-standard-v8.0-python3.14t-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: 3.14t + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.14t + - sharded_cluster-auth-ssl + - async + - free-threaded + - name: test-standard-v8.0-python3.12-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-3.12 + - standalone-noauth-nossl + - async + - name: test-standard-v8.0-pypy3.10-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-8.0 + - python-pypy3.10 + - standalone-noauth-nossl + - async + - pypy + - name: test-standard-latest-python3.12-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: "3.12" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.12 + - replica_set-noauth-ssl + - async + - pr + - name: test-standard-latest-pypy3.10-async-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-pypy3.10 + - replica_set-noauth-ssl + - async + - pypy + - name: test-standard-latest-python3.13-async-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.13" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.13 + - sharded_cluster-auth-ssl + - async + - pr + - name: test-standard-latest-python3.11-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.11" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.11 + - standalone-noauth-nossl + - async + - pr + - name: test-standard-latest-python3.14-async-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.14" + TEST_NAME: default_async + tags: + - test-standard + - server-latest + - python-3.14 + - standalone-noauth-nossl + - async + - pr + - name: test-standard-rapid-python3.12-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.12" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.12 + - replica_set-noauth-ssl + - sync + - name: test-standard-rapid-pypy3.10-sync-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: pypy3.10 + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-pypy3.10 + - replica_set-noauth-ssl + - sync + - pypy + - name: test-standard-rapid-python3.13-sync-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.13" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.13 + - sharded_cluster-auth-ssl + - sync + - name: test-standard-rapid-python3.11-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.11" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.11 + - standalone-noauth-nossl + - sync + - name: test-standard-rapid-python3.14-sync-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.14" + TEST_NAME: default_sync + tags: + - test-standard + - server-rapid + - python-3.14 + - standalone-noauth-nossl + - sync + + # Test non standard tests + - name: test-non-standard-v4.2-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.2" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-4.2 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v4.2-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-4.2 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v4.2-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-4.2 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v4.2-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.2 + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + - name: test-non-standard-v4.4-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-4.4 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v4.4-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-4.4 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-v4.4-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-4.4 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v4.4-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "4.4" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-4.4 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v5.0-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "5.0" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-5.0 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v5.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-5.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-non-standard-v5.0-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "5.0" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-5.0 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v6.0-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "6.0" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-6.0 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v6.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-6.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v6.0-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-6.0 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v6.0-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-6.0 + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + - name: test-non-standard-v7.0-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-7.0 + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-v7.0-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-7.0 + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-v7.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-7.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v7.0-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "7.0" + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-7.0 + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-v8.0-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: "8.0" + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-8.0 + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - name: test-non-standard-v8.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-8.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-v8.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-8.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-non-standard-v8.0-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: "8.0" + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-8.0 + - python-3.13 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-latest-python3.14t-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: 3.14t + tags: + - test-non-standard + - server-latest + - python-3.14t + - replica_set-noauth-ssl + - noauth + - free-threaded + - pr + - name: test-non-standard-latest-pypy3.10-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-latest + - python-pypy3.10 + - replica_set-noauth-ssl + - noauth + - pypy + - name: test-non-standard-latest-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.14" + tags: + - test-non-standard + - server-latest + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - pr + - name: test-non-standard-latest-python3.13-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: latest + PYTHON_VERSION: "3.13" + tags: + - test-non-standard + - server-latest + - python-3.13 + - standalone-noauth-nossl + - noauth + - pr + - name: test-non-standard-rapid-python3.11-noauth-ssl-replica-set + commands: + - func: run server + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: ssl + TOPOLOGY: replica_set + VERSION: rapid + PYTHON_VERSION: "3.11" + tags: + - test-non-standard + - server-rapid + - python-3.11 + - replica_set-noauth-ssl + - noauth + - name: test-non-standard-rapid-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.12" + tags: + - test-non-standard + - server-rapid + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-non-standard-rapid-python3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: "3.10" + tags: + - test-non-standard + - server-rapid + - python-3.10 + - standalone-noauth-nossl + - noauth + - name: test-non-standard-rapid-pypy3.10-noauth-nossl-standalone + commands: + - func: run server + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + - func: run tests + vars: + AUTH: noauth + SSL: nossl + TOPOLOGY: standalone + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-non-standard + - server-rapid + - python-pypy3.10 + - standalone-noauth-nossl + - noauth + - pypy + + # Test standard auth tests + - name: test-standard-auth-v4.2-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-4.2 + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.2-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.2" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.2 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v4.4-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-4.4 + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v4.4-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "4.4" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-4.4 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v5.0-python3.12-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: "3.12" + tags: + - test-standard-auth + - server-5.0 + - python-3.12 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v5.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "5.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-5.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v6.0-python3.13-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: "3.13" + tags: + - test-standard-auth + - server-6.0 + - python-3.13 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v6.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "6.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-6.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v7.0-python3.14t-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: 3.14t + tags: + - test-standard-auth + - server-7.0 + - python-3.14t + - sharded_cluster-auth-ssl + - auth + - free-threaded + - name: test-standard-auth-v7.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "7.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-7.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-v8.0-python3.14-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: "3.14" + tags: + - test-standard-auth + - server-8.0 + - python-3.14 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-v8.0-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: "8.0" + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-8.0 + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-latest-python3.11-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: "3.11" + tags: + - test-standard-auth + - server-latest + - python-3.11 + - sharded_cluster-auth-ssl + - auth + - pr + - name: test-standard-auth-latest-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: latest + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-latest + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy + - name: test-standard-auth-rapid-python3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: "3.10" + tags: + - test-standard-auth + - server-rapid + - python-3.10 + - sharded_cluster-auth-ssl + - auth + - name: test-standard-auth-rapid-pypy3.10-auth-ssl-sharded-cluster + commands: + - func: run server + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + - func: run tests + vars: + AUTH: auth + SSL: ssl + TOPOLOGY: sharded_cluster + VERSION: rapid + PYTHON_VERSION: pypy3.10 + tags: + - test-standard-auth + - server-rapid + - python-pypy3.10 + - sharded_cluster-auth-ssl + - auth + - pypy diff --git a/.evergreen/generated_configs/variants.yml b/.evergreen/generated_configs/variants.yml new file mode 100644 index 0000000000..9bae5f4680 --- /dev/null +++ b/.evergreen/generated_configs/variants.yml @@ -0,0 +1,616 @@ +buildvariants: + # Alternative hosts tests + - name: other-hosts-rhel9-fips-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL9-FIPS latest + run_on: + - rhel92-fips + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + REQUIRE_FIPS: "1" + PYTHON_BINARY: /usr/bin/python3.11 + tags: [] + - name: other-hosts-rhel8-zseries-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-zseries latest + run_on: + - rhel8-zseries-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-rhel8-power8-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-POWER8 latest + run_on: + - rhel8-power-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-rhel8-arm64-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts RHEL8-arm64 latest + run_on: + - rhel82-arm64-small + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [] + - name: other-hosts-amazon2023-latest + tasks: + - name: .test-no-toolchain + display_name: Other hosts Amazon2023 latest + run_on: + - amazon2023-arm64-latest-large-m8g + batchtime: 1440 + expansions: + VERSION: latest + NO_EXT: "1" + tags: [pr] + + # Atlas connect tests + - name: atlas-connect-rhel8 + tasks: + - name: .test-no-orchestration + display_name: Atlas connect RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: atlas_connect + tags: [pr] + + # Aws auth tests + - name: auth-aws-ubuntu-20 + tasks: + - name: .auth-aws !.auth-aws-ecs + display_name: Auth AWS Ubuntu-20 + run_on: + - ubuntu2004-small + tags: [] + - name: auth-aws-win64 + tasks: + - name: .auth-aws !.auth-aws-ecs + display_name: Auth AWS Win64 + run_on: + - windows-64-vsMulti-small + tags: [] + - name: auth-aws-macos + tasks: + - name: .auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2 + display_name: Auth AWS macOS + run_on: + - macos-14 + tags: [pr] + + # Aws lambda tests + - name: faas-lambda + tasks: + - name: .aws_lambda + display_name: FaaS Lambda + run_on: + - rhel87-small + + # Backport pr tests + - name: backport-pr + tasks: + - name: backport-pr + display_name: Backport PR + run_on: + - rhel87-small + + # Compression tests + - name: compression-snappy-rhel8 + tasks: + - name: .test-standard + display_name: Compression snappy RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: snappy + - name: compression-zlib-rhel8 + tasks: + - name: .test-standard + display_name: Compression zlib RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: zlib + - name: compression-zstd-rhel8 + tasks: + - name: .test-standard !.server-4.2 + display_name: Compression zstd RHEL8 + run_on: + - rhel87-small + expansions: + COMPRESSOR: zstd + + # Coverage report tests + - name: coverage-report + tasks: + - name: coverage-report + display_name: Coverage Report + run_on: + - rhel87-small + + # Disable test commands tests + - name: disable-test-commands-rhel8 + tasks: + - name: .test-standard .server-latest + display_name: Disable test commands RHEL8 + run_on: + - rhel87-small + expansions: + AUTH: auth + SSL: ssl + DISABLE_TEST_COMMANDS: "1" + + # Doctests tests + - name: doctests-rhel8 + tasks: + - name: .test-non-standard .standalone-noauth-nossl + display_name: Doctests RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: doctest + + # Encryption tests + - name: encryption-rhel8 + tasks: + - name: .test-non-standard + - name: .test-min-deps + display_name: Encryption RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-macos + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption macOS + run_on: + - macos-14 + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-win64 + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + tags: [encryption_tag] + - name: encryption-crypt_shared-rhel8 + tasks: + - name: .test-non-standard + - name: .test-min-deps + display_name: Encryption crypt_shared RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-crypt_shared-macos + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared macOS + run_on: + - macos-14 + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-crypt_shared-win64 + tasks: + - name: .test-non-standard !.pypy + display_name: Encryption crypt_shared Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + TEST_CRYPT_SHARED: "true" + tags: [encryption_tag] + - name: encryption-pyopenssl-rhel8 + tasks: + - name: .test-non-standard + display_name: Encryption PyOpenSSL RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: encryption + SUB_TEST_NAME: pyopenssl + tags: [encryption_tag] + + # Enterprise auth tests + - name: auth-enterprise-rhel8 + tasks: + - name: .test-standard-auth .auth !.free-threaded + display_name: Auth Enterprise RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + - name: auth-enterprise-macos + tasks: + - name: .test-standard-auth !.pypy .auth !.free-threaded + display_name: Auth Enterprise macOS + run_on: + - macos-14 + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + - name: auth-enterprise-win64 + tasks: + - name: .test-standard-auth !.pypy .auth + display_name: Auth Enterprise Win64 + run_on: + - windows-64-vsMulti-small + expansions: + TEST_NAME: enterprise_auth + AUTH: auth + + # Green framework tests + - name: green-gevent-rhel8 + tasks: + - name: .test-standard .sync !.free-threaded + display_name: Green Gevent RHEL8 + run_on: + - rhel87-small + expansions: + GREEN_FRAMEWORK: gevent + + # Import time tests + - name: import-time + tasks: + - name: check-import-time + display_name: Import Time + run_on: + - rhel87-small + + # Kms tests + - name: kms + tasks: + - name: test-gcpkms + batchtime: 1440 + - name: test-gcpkms-fail + - name: test-azurekms + batchtime: 1440 + - name: test-azurekms-fail + display_name: KMS + run_on: + - debian11-small + + # Load balancer tests + - name: load-balancer + tasks: + - name: .test-non-standard .server-6.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-7.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-8.0 .sharded_cluster-auth-ssl + - name: .test-non-standard .server-rapid .sharded_cluster-auth-ssl + - name: .test-non-standard .server-latest .sharded_cluster-auth-ssl + display_name: Load Balancer + run_on: + - rhel87-small + batchtime: 1440 + expansions: + TEST_NAME: load_balancer + + # Mockupdb tests + - name: mockupdb-rhel8 + tasks: + - name: .test-no-orchestration + display_name: MockupDB RHEL8 + run_on: + - rhel87-small + expansions: + TEST_NAME: mockupdb + tags: [pr] + + # Mod wsgi tests + - name: mod_wsgi-ubuntu-22 + tasks: + - name: .mod_wsgi + display_name: Mod_WSGI Ubuntu-22 + run_on: + - ubuntu2204-small + expansions: + MOD_WSGI_VERSION: "4" + + # No c ext tests + - name: no-c-ext-rhel8 + tasks: + - name: .test-standard + display_name: No C Ext RHEL8 + run_on: + - rhel87-small + + # No server tests + - name: no-server-rhel8 + tasks: + - name: .test-no-orchestration + display_name: No server RHEL8 + run_on: + - rhel87-small + tags: [pr] + + # Ocsp tests + - name: ocsp-rhel8 + tasks: + - name: .ocsp + display_name: OCSP RHEL8 + run_on: + - rhel87-small + batchtime: 10080 + - name: ocsp-win64 + tasks: + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 10080 + - name: ocsp-macos + tasks: + - name: .ocsp-rsa !.ocsp-staple .latest + - name: .ocsp-rsa !.ocsp-staple .4.4 + display_name: OCSP macOS + run_on: + - macos-14 + batchtime: 10080 + + # Oidc auth tests + - name: auth-oidc-ubuntu-22 + tasks: + - name: .auth_oidc_remote + display_name: Auth OIDC Ubuntu-22 + run_on: + - ubuntu2204-small + batchtime: 1440 + - name: auth-oidc-local-ubuntu-22 + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC Local Ubuntu-22 + run_on: + - ubuntu2204-small + batchtime: 1440 + tags: [pr] + - name: auth-oidc-macos + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC macOS + run_on: + - macos-14 + batchtime: 1440 + - name: auth-oidc-win64 + tasks: + - name: "!.auth_oidc_remote .auth_oidc" + display_name: Auth OIDC Win64 + run_on: + - windows-64-vsMulti-small + batchtime: 1440 + + # Perf tests + - name: performance-benchmarks + tasks: + - name: .perf + display_name: Performance Benchmarks + run_on: + - rhel90-dbx-perf-large + batchtime: 1440 + + # Pyopenssl tests + - name: pyopenssl-rhel8 + tasks: + - name: .test-standard .sync + - name: .test-standard .async .replica_set-noauth-ssl + display_name: PyOpenSSL RHEL8 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + - name: pyopenssl-macos + tasks: + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL macOS + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + - name: pyopenssl-win64 + tasks: + - name: .test-standard !.pypy .sync + - name: .test-standard !.pypy .async .replica_set-noauth-ssl + display_name: PyOpenSSL Win64 + run_on: + - rhel87-small + batchtime: 1440 + expansions: + SUB_TEST_NAME: pyopenssl + + # Search index tests + - name: search-index-helpers-rhel8-python3.10 + tasks: + - name: .search_index + display_name: Search Index Helpers RHEL8 Python3.10 + run_on: + - rhel87-small + expansions: + PYTHON_BINARY: /opt/python/3.10/bin/python3 + + # Server version tests + - name: mongodb-v4.2 + tasks: + - name: .server-version + display_name: "* MongoDB v4.2" + run_on: + - rhel87-small + expansions: + VERSION: "4.2" + tags: [coverage_tag] + - name: mongodb-v4.4 + tasks: + - name: .server-version + display_name: "* MongoDB v4.4" + run_on: + - rhel87-small + expansions: + VERSION: "4.4" + tags: [coverage_tag] + - name: mongodb-v5.0 + tasks: + - name: .server-version + display_name: "* MongoDB v5.0" + run_on: + - rhel87-small + expansions: + VERSION: "5.0" + tags: [coverage_tag] + - name: mongodb-v6.0 + tasks: + - name: .server-version + display_name: "* MongoDB v6.0" + run_on: + - rhel87-small + expansions: + VERSION: "6.0" + tags: [coverage_tag] + - name: mongodb-v7.0 + tasks: + - name: .server-version + display_name: "* MongoDB v7.0" + run_on: + - rhel87-small + expansions: + VERSION: "7.0" + tags: [coverage_tag] + - name: mongodb-v8.0 + tasks: + - name: .server-version + display_name: "* MongoDB v8.0" + run_on: + - rhel87-small + expansions: + VERSION: "8.0" + tags: [coverage_tag] + - name: mongodb-rapid + tasks: + - name: .server-version + display_name: "* MongoDB rapid" + run_on: + - rhel87-small + expansions: + VERSION: rapid + tags: [coverage_tag] + - name: mongodb-latest + tasks: + - name: .server-version + display_name: "* MongoDB latest" + run_on: + - rhel87-small + expansions: + VERSION: latest + tags: [coverage_tag] + + # Stable api tests + - name: stable-api-require-v1-rhel8-auth + tasks: + - name: .test-standard !.replica_set-noauth-ssl .server-5.0 + - name: .test-standard !.replica_set-noauth-ssl .server-6.0 + - name: .test-standard !.replica_set-noauth-ssl .server-7.0 + - name: .test-standard !.replica_set-noauth-ssl .server-8.0 + - name: .test-standard !.replica_set-noauth-ssl .server-rapid + - name: .test-standard !.replica_set-noauth-ssl .server-latest + display_name: Stable API require v1 RHEL8 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + REQUIRE_API_VERSION: "1" + MONGODB_API_VERSION: "1" + tags: [versionedApi_tag] + - name: stable-api-accept-v2-rhel8-auth + tasks: + - name: .test-standard .server-5.0 .standalone-noauth-nossl + - name: .test-standard .server-6.0 .standalone-noauth-nossl + - name: .test-standard .server-7.0 .standalone-noauth-nossl + - name: .test-standard .server-8.0 .standalone-noauth-nossl + - name: .test-standard .server-rapid .standalone-noauth-nossl + - name: .test-standard .server-latest .standalone-noauth-nossl + display_name: Stable API accept v2 RHEL8 Auth + run_on: + - rhel87-small + expansions: + AUTH: auth + ORCHESTRATION_FILE: versioned-api-testing.json + tags: [versionedApi_tag] + + # Standard nonlinux tests + - name: test-macos + tasks: + - name: .test-standard !.pypy + display_name: "* Test macOS" + run_on: + - macos-14 + tags: [standard-non-linux] + - name: test-macos-arm64 + tasks: + - name: .test-standard !.pypy .server-6.0 + - name: .test-standard !.pypy .server-7.0 + - name: .test-standard !.pypy .server-8.0 + - name: .test-standard !.pypy .server-rapid + - name: .test-standard !.pypy .server-latest + display_name: "* Test macOS Arm64" + run_on: + - macos-14-arm64 + tags: [standard-non-linux] + - name: test-win64 + tasks: + - name: .test-standard !.pypy + display_name: "* Test Win64" + run_on: + - windows-64-vsMulti-small + tags: [standard-non-linux] + - name: test-win32 + tasks: + - name: .test-standard !.pypy + display_name: "* Test Win32" + run_on: + - windows-64-vsMulti-small + expansions: + IS_WIN32: "1" + tags: [standard-non-linux] + + # Storage engine tests + - name: storage-inmemory-rhel8 + tasks: + - name: .test-standard .standalone-noauth-nossl + display_name: Storage InMemory RHEL8 + run_on: + - rhel87-small + expansions: + STORAGE_ENGINE: inmemory diff --git a/.evergreen/install-dependencies.sh b/.evergreen/install-dependencies.sh deleted file mode 100644 index 9f4bcdbb59..0000000000 --- a/.evergreen/install-dependencies.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -o xtrace # Write all commands first to stderr -set -o errexit # Exit the script with error if any of the commands fail - -# Copy PyMongo's test certificates over driver-evergreen-tools' -cp ${PROJECT_DIRECTORY}/test/certificates/* ${DRIVERS_TOOLS}/.evergreen/x509gen/ - -# Replace MongoOrchestration's client certificate. -cp ${PROJECT_DIRECTORY}/test/certificates/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - -if [ -w /etc/hosts ]; then - SUDO="" -else - SUDO="sudo" -fi - -# Add 'server' and 'hostname_not_in_cert' as a hostnames -echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts -echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts diff --git a/.evergreen/just.sh b/.evergreen/just.sh new file mode 100755 index 0000000000..bebbca8282 --- /dev/null +++ b/.evergreen/just.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -eu + +. .evergreen/scripts/setup-dev-env.sh +just "$@" diff --git a/.evergreen/perf.yml b/.evergreen/perf.yml deleted file mode 100644 index 8b3638d535..0000000000 --- a/.evergreen/perf.yml +++ /dev/null @@ -1,256 +0,0 @@ -######################################## -# Evergreen Template for MongoDB Drivers -######################################## - -# When a task that used to pass starts to fail -# Go through all versions that may have been skipped to detect -# when the task started failing -stepback: true - -# Mark a failure as a system/bootstrap failure (purple box) rather then a task -# failure by default. -# Actual testing tasks are marked with `type: test` -command_type: system - -# Protect ourself against rogue test case, or curl gone wild, that runs forever -# Good rule of thumb: the averageish length a task takes, times 5 -# That roughly accounts for variable system performance for various buildvariants -exec_timeout_secs: 3600 # 60 minutes is the longest we'll ever run - -# What to do when evergreen hits the timeout (`post:` tasks are run automatically) -timeout: - - command: shell.exec - params: - script: | - ls -la - -functions: - "fetch source": - # Executes git clone and applies the submitted patch, if any - - command: git.get_project - params: - directory: "src" - # Applies the subitted patch, if any - # Deprecated. Should be removed. But still needed for certain agents (ZAP) - - command: git.apply_patch - # Make an evergreen exapanstion file with dynamic values - - command: shell.exec - params: - working_dir: "src" - script: | - # Get the current unique version of this checkout - if [ "${is_patch}" = "true" ]; then - CURRENT_VERSION=$(git describe)-patch-${version_id} - else - CURRENT_VERSION=latest - fi - - export DRIVERS_TOOLS="$(pwd)/../drivers-tools" - export PROJECT_DIRECTORY="$(pwd)" - - # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) - export PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) - fi - - export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" - export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" - export UPLOAD_BUCKET="${project}" - - cat < expansion.yml - CURRENT_VERSION: "$CURRENT_VERSION" - DRIVERS_TOOLS: "$DRIVERS_TOOLS" - MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" - MONGODB_BINARIES: "$MONGODB_BINARIES" - UPLOAD_BUCKET: "$UPLOAD_BUCKET" - PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" - PREPARE_SHELL: | - set -o errexit - set -o xtrace - export DRIVERS_TOOLS="$DRIVERS_TOOLS" - export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" - export MONGODB_BINARIES="$MONGODB_BINARIES" - export UPLOAD_BUCKET="$UPLOAD_BUCKET" - export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" - - export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" - export PATH="$MONGODB_BINARIES:$PATH" - export PROJECT="${project}" - EOT - # See what we've done - cat expansion.yml - - # Load the expansion file to make an evergreen variable with the current unique version - - command: expansions.update - params: - file: src/expansion.yml - - "prepare resources": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS - if [ "${project}" = "drivers-tools" ]; then - # If this was a patch build, doing a fresh clone would not actually test the patch - cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS - else - git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS - fi - echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config - - "bootstrap mongo-orchestration": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} sh ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh - # run-orchestration generates expansion file with the MONGODB_URI for the cluster - - command: expansions.update - params: - file: mo-expansion.yml - - "stop mongo-orchestration": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - sh ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh - - "run perf tests": - - command: shell.exec - type: test - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - PROJECT_DIRECTORY=${PROJECT_DIRECTORY} sh ${PROJECT_DIRECTORY}/.evergreen/run-perf-tests.sh - - "attach benchmark test results": - - command: attach.results - params: - file_location: src/report.json - - "send dashboard data": - - command: perf.send - params: - file: src/results.json - - "cleanup": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - rm -rf $DRIVERS_TOOLS || true - - "fix absolute paths": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do - perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename - done - - "windows fix": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - cat $i | tr -d '\r' > $i.new - mv $i.new $i - done - # Copy client certificate because symlinks do not work on Windows. - cp ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem ${MONGO_ORCHESTRATION_HOME}/lib/client.pem - - "make files executable": - - command: shell.exec - params: - script: | - ${PREPARE_SHELL} - for i in $(find ${DRIVERS_TOOLS}/.evergreen ${PROJECT_DIRECTORY}/.evergreen -name \*.sh); do - chmod +x $i - done - - "install dependencies": - - command: shell.exec - params: - working_dir: "src" - script: | - ${PREPARE_SHELL} - file="${PROJECT_DIRECTORY}/.evergreen/install-dependencies.sh" - # Don't use ${file} syntax here because evergreen treats it as an empty expansion. - [ -f "$file" ] && sh $file || echo "$file not available, skipping" - -pre: - - func: "fetch source" - - func: "prepare resources" - # We don't run perf on Windows (yet) - #- func: "windows fix" - - func: "fix absolute paths" - - func: "make files executable" - # We're not testing with TLS (yet) - #- func: "install dependencies" - -post: - - func: "stop mongo-orchestration" - - func: "cleanup" - -tasks: - - name: "perf-3.6-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "3.6" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-4.0-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.0" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-4.2-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.2" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - - - name: "perf-4.4-standalone" - tags: ["perf"] - commands: - - func: "bootstrap mongo-orchestration" - vars: - VERSION: "4.4" - TOPOLOGY: "server" - - func: "run perf tests" - - func: "attach benchmark test results" - - func: "send dashboard data" - -buildvariants: - -- name: "perf-tests" - display_name: "Performance Benchmark Tests" - batchtime: 10080 # 7 days - run_on: centos6-perf - tasks: - - name: "perf-3.6-standalone" - - name: "perf-4.0-standalone" - - name: "perf-4.2-standalone" - - name: "perf-4.4-standalone" diff --git a/.evergreen/release.sh b/.evergreen/release.sh deleted file mode 100755 index 1fdd459ad9..0000000000 --- a/.evergreen/release.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -ex - -if [ "$(uname -s)" = "Darwin" ]; then - .evergreen/build-mac.sh -elif [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - .evergreen/build-windows.sh -else - .evergreen/build-manylinux.sh -fi diff --git a/.evergreen/remove-unimplemented-tests.sh b/.evergreen/remove-unimplemented-tests.sh new file mode 100755 index 0000000000..88ef137f86 --- /dev/null +++ b/.evergreen/remove-unimplemented-tests.sh @@ -0,0 +1,53 @@ +#!/bin/bash +PYMONGO=$(dirname "$(cd "$(dirname "$0")" || exit; pwd)") + +rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 +rm $PYMONGO/test/connection_monitoring/wait-queue-fairness.json # PYTHON-1873 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-application-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-checkout-error.json # PYTHON-4918 +rm $PYMONGO/test/discovery_and_monitoring/unified/pool-clear-min-pool-size-error.json # PYTHON-4918 +rm $PYMONGO/test/client-side-encryption/spec/unified/client-bulkWrite-qe.json # PYTHON-4929 + +# Python doesn't implement DRIVERS-3064 +rm $PYMONGO/test/collection_management/listCollections-rawdata.json +rm $PYMONGO/test/crud/unified/aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/bulkWrite-updateOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-delete-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/client-bulkWrite-update-rawdata.json +rm $PYMONGO/test/crud/unified/count-rawdata.json +rm $PYMONGO/test/crud/unified/countDocuments-rawdata.json +rm $PYMONGO/test/crud/unified/db-aggregate-rawdata.json +rm $PYMONGO/test/crud/unified/deleteMany-rawdata.json +rm $PYMONGO/test/crud/unified/deleteOne-rawdata.json +rm $PYMONGO/test/crud/unified/distinct-rawdata.json +rm $PYMONGO/test/crud/unified/estimatedDocumentCount-rawdata.json +rm $PYMONGO/test/crud/unified/find-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndDelete-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndReplace-rawdata.json +rm $PYMONGO/test/crud/unified/findOneAndUpdate-rawdata.json +rm $PYMONGO/test/crud/unified/insertMany-rawdata.json +rm $PYMONGO/test/crud/unified/insertOne-rawdata.json +rm $PYMONGO/test/crud/unified/replaceOne-rawdata.json +rm $PYMONGO/test/crud/unified/updateMany-rawdata.json +rm $PYMONGO/test/crud/unified/updateOne-rawdata.json +rm $PYMONGO/test/index_management/index-rawdata.json + +# PyMongo does not support modifyCollection +rm $PYMONGO/test/collection_management/modifyCollection-*.json + +# PYTHON-5248 - Remove support for MongoDB 4.0 +find /$PYMONGO /test -type f -name 'pre-42-*.json' -delete + +# PYTHON-3359 - Remove Database and Collection level timeout override +rm $PYMONGO/test/csot/override-collection-timeoutMS.json +rm $PYMONGO/test/csot/override-database-timeoutMS.json + +# PYTHON-2943 - Socks5 Proxy Support +rm $PYMONGO/test/uri_options/proxy-options.json + +echo "Done removing unimplemented tests" diff --git a/.evergreen/resync-specs.sh b/.evergreen/resync-specs.sh index b64868c5a9..d2bd89c781 100755 --- a/.evergreen/resync-specs.sh +++ b/.evergreen/resync-specs.sh @@ -1,6 +1,6 @@ #!/bin/bash -# exit when any command fails -set -e +# Resync test files from the specifications repo. +set -eu PYMONGO=$(dirname "$(cd "$(dirname "$0")"; pwd)") SPECS=${MDB_SPECS:-~/Work/specifications} @@ -39,15 +39,18 @@ while getopts 'b:c:s:' flag; do done shift $((OPTIND-1)) -if [ -z $BRANCH ] +if [ -n "$BRANCH" ] then git -C $SPECS checkout $BRANCH fi # Ensure the JSON files are up to date. -cd $SPECS/source -make -cd - +if ! [ -n "${CI:-}" ] +then + cd $SPECS/source + make + cd - +fi # cpjson unified-test-format/tests/invalid unified-test-format/invalid # * param1: Path to spec tests dir in specifications repo # * param2: Path to where the corresponding tests live in Python. @@ -56,7 +59,7 @@ cpjson () { cd "$SPECS"/source/$1 find . -name '*.json' | grep -Ev "${BLOCKLIST}" | cpio -pdm \ $PYMONGO/test/$2 - printf "\nIgnored files for ${PWD}\n" + printf "\nIgnored files for ${PWD}:\n" IGNORED_FILES="$(printf "\n%s\n" "$(diff <(find . -name '*.json' | sort) \ <(find . -name '*.json' | grep -Ev "${BLOCKLIST}" | sort))" | \ sed -e '/^[0-9]/d' | sed -e 's|< ./||g' )" @@ -70,8 +73,11 @@ for spec in "$@" do # Match the spec dir name, the python test dir name, and/or common abbreviations. case "$spec" in - atlas-data-lake-testing|data_lake) - cpjson atlas-data-lake-testing/tests/ data_lake + auth) + cpjson auth/tests/ auth + ;; + bson-binary-vector|bson_binary_vector) + cpjson bson-binary-vector/tests/ bson_binary_vector ;; bson-corpus|bson_corpus) cpjson bson-corpus/tests/ bson_corpus @@ -95,12 +101,25 @@ do cpjson client-side-encryption/limits/ client-side-encryption/limits cpjson client-side-encryption/etc/data client-side-encryption/etc/data ;; + connection-monitoring|connection_monitoring) + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring + ;; + connection-logging|connection_logging) + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + ;; cmap|CMAP|connection-monitoring-and-pooling) - cpjson connection-monitoring-and-pooling/tests cmap - rm $PYMONGO/test/cmap/wait-queue-fairness.json # PYTHON-1873 + cpjson connection-monitoring-and-pooling/tests/logging connection_logging + cpjson connection-monitoring-and-pooling/tests/cmap-format connection_monitoring ;; apm|APM|command-monitoring|command_monitoring) - cpjson command-monitoring/tests command_monitoring + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring + ;; + command-logging|command_logging) + cpjson command-logging-and-monitoring/tests/logging command_logging + ;; + clam|CLAM|command-logging-and-monitoring|command_logging_and_monitoring) + cpjson command-logging-and-monitoring/tests/logging command_logging + cpjson command-logging-and-monitoring/tests/monitoring command_monitoring ;; crud|CRUD) cpjson crud/tests/ crud @@ -111,18 +130,30 @@ do gridfs) cpjson gridfs/tests gridfs ;; + handshake) + cpjson mongodb-handshake/tests handshake + ;; + index|index-management) + cpjson index-management/tests index_management + ;; load-balancers|load_balancer) cpjson load-balancers/tests load_balancer ;; srv|SRV|initial-dns-seedlist-discovery|srv_seedlist) cpjson initial-dns-seedlist-discovery/tests/ srv_seedlist ;; + read-write-concern|read_write_concern) + cpjson read-write-concern/tests/operation read_write_concern/operation + ;; retryable-reads|retryable_reads) cpjson retryable-reads/tests/ retryable_reads ;; retryable-writes|retryable_writes) cpjson retryable-writes/tests/ retryable_writes ;; + run-command|run_command) + cpjson run-command/tests/ run_command + ;; sdam|SDAM|server-discovery-and-monitoring|discovery_and_monitoring) cpjson server-discovery-and-monitoring/tests/errors \ discovery_and_monitoring/errors @@ -132,8 +163,8 @@ do discovery_and_monitoring/sharded cpjson server-discovery-and-monitoring/tests/single \ discovery_and_monitoring/single - cpjson server-discovery-and-monitoring/tests/integration \ - discovery_and_monitoring_integration + cpjson server-discovery-and-monitoring/tests/unified \ + discovery_and_monitoring/unified cpjson server-discovery-and-monitoring/tests/load-balanced \ discovery_and_monitoring/load-balanced ;; @@ -142,6 +173,11 @@ do ;; server-selection|server_selection) cpjson server-selection/tests/ server_selection + rm -rf $PYMONGO/test/server_selection/logging # these tests live in server_selection_logging + cpjson server-selection/tests/logging server_selection_logging + ;; + server-selection-logging|server_selection_logging) + cpjson server-selection/tests/logging server_selection_logging ;; sessions) cpjson sessions/tests/ sessions @@ -149,7 +185,6 @@ do transactions|transactions-convenient-api) cpjson transactions/tests/ transactions cpjson transactions-convenient-api/tests/ transactions-convenient-api - rm $PYMONGO/test/transactions/legacy/errors-client.json # PYTHON-1894 ;; unified|unified-test-format) cpjson unified-test-format/tests/ unified-test-format/ diff --git a/.evergreen/run-atlas-tests.sh b/.evergreen/run-atlas-tests.sh deleted file mode 100644 index 3f8a1b45f0..0000000000 --- a/.evergreen/run-atlas-tests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -# Exit on error and enable trace. -set -o errexit -set -o xtrace - -if [ -z "$PYTHON_BINARY" ]; then - echo "No python binary specified" - PYTHON_BINARY=$(command -v python3) || true - if [ -z "$PYTHON_BINARY" ]; then - echo "Cannot test without python3 installed!" - exit 1 - fi -fi - -. .evergreen/utils.sh -createvirtualenv $PYTHON_BINARY atlastest -trap "deactivate; rm -rf atlastest" EXIT HUP - -echo "Running tests without dnspython" -python test/atlas/test_connection.py - -python -m pip install dnspython -echo "Running tests with dnspython" -MUST_TEST_SRV="1" python test/atlas/test_connection.py diff --git a/.evergreen/run-doctests.sh b/.evergreen/run-doctests.sh deleted file mode 100644 index eebb0f784c..0000000000 --- a/.evergreen/run-doctests.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} setup.py doc -t diff --git a/.evergreen/run-enterprise-auth-tests.sh b/.evergreen/run-enterprise-auth-tests.sh deleted file mode 100644 index e86e489d0d..0000000000 --- a/.evergreen/run-enterprise-auth-tests.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -# Don't trace to avoid secrets showing up in the logs -set -o errexit - -echo "Running enterprise authentication tests" - -export DB_USER="bob" -export DB_PASSWORD="pwd123" -if [ "Windows_NT" = "$OS" ]; then - echo "Setting GSSAPI_PASS" - export GSSAPI_PASS=${SASL_PASS} - export GSSAPI_CANONICALIZE="true" -else - # BUILD-3830 - touch ${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - export KRB5_CONFIG=${PROJECT_DIRECTORY}/.evergreen/krb5.conf.empty - - echo "Writing keytab" - echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab - echo "Running kinit" - kinit -k -t ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab -p ${PRINCIPAL} -fi -echo "Setting GSSAPI variables" -export GSSAPI_HOST=${SASL_HOST} -export GSSAPI_PORT=${SASL_PORT} -export GSSAPI_PRINCIPAL=${PRINCIPAL} - - -echo "Running tests" -${PYTHON_BINARY} setup.py clean -${PYTHON_BINARY} setup.py test --xunit-output=xunit-results diff --git a/.evergreen/run-mockupdb-tests.sh b/.evergreen/run-mockupdb-tests.sh deleted file mode 100755 index a76ed6316f..0000000000 --- a/.evergreen/run-mockupdb-tests.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Must be run from pymongo repo root -set -o xtrace -set -o errexit - -. .evergreen/utils.sh - -${PYTHON_BINARY} setup.py clean - -createvirtualenv ${PYTHON_BINARY} mockuptests -trap "deactivate; rm -rf mockuptests" EXIT HUP - -# Install PyMongo from git clone so mockup-tests don't -# download it from pypi. -python -m pip install . -python -m pip install --upgrade 'https://github.com/ajdavis/mongo-mockup-db/archive/master.zip' -cd ./test/mockupdb -python -m unittest discover -v diff --git a/.evergreen/run-mod-wsgi-tests.sh b/.evergreen/run-mod-wsgi-tests.sh deleted file mode 100644 index 9a167895f8..0000000000 --- a/.evergreen/run-mod-wsgi-tests.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -set -o xtrace -set -o errexit - -APACHE=$(command -v apache2 || command -v /usr/lib/apache2/mpm-prefork/apache2) || true -if [ -n "$APACHE" ]; then - APACHE_CONFIG=apache24ubuntu161404.conf -else - APACHE=$(command -v httpd) || true - if [ -z "$APACHE" ]; then - echo "Could not find apache2 binary" - exit 1 - else - APACHE_CONFIG=apache22amazon.conf - fi -fi - - -PYTHON_VERSION=$(${PYTHON_BINARY} -c "import sys; sys.stdout.write('.'.join(str(val) for val in sys.version_info[:2]))") - -export MOD_WSGI_SO=/opt/python/mod_wsgi/python_version/$PYTHON_VERSION/mod_wsgi_version/$MOD_WSGI_VERSION/mod_wsgi.so -export PYTHONHOME=/opt/python/$PYTHON_VERSION - -cd .. -$APACHE -k start -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG} -trap '$APACHE -k stop -f ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${APACHE_CONFIG}' EXIT HUP - -set +e -wget -t 1 -T 10 -O - "http://localhost:8080${PROJECT_DIRECTORY}" -STATUS=$? -set -e - -# Debug -cat error_log - -if [ $STATUS != 0 ]; then - exit $STATUS -fi - -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 -t 100 parallel http://localhost:8080${PROJECT_DIRECTORY} - -${PYTHON_BINARY} ${PROJECT_DIRECTORY}/test/mod_wsgi_test/test_client.py -n 25000 serial http://localhost:8080${PROJECT_DIRECTORY} diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh index 83f3975e9e..b8330de511 100755 --- a/.evergreen/run-mongodb-aws-ecs-test.sh +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -1,7 +1,6 @@ #!/bin/bash - -# Don't trace since the URI contains a password that shouldn't show up in the logs -set -o errexit # Exit the script with error if any of the commands fail +# Script run on an ECS host to test MONGODB-AWS. +set -eu ############################################ # Main Program # @@ -20,18 +19,20 @@ fi # Now we can safely enable xtrace set -o xtrace -# Install python3.7 with pip. -apt-get update -apt install python3.7 python3-pip -y - -authtest () { - echo "Running MONGODB-AWS ECS authentication tests with $PYTHON" - $PYTHON --version - $PYTHON -m pip install --upgrade wheel setuptools pip - cd src - $PYTHON -m pip install '.[aws]' - $PYTHON test/auth_aws/test_auth_aws.py - cd - -} +# Install python with pip. +PYTHON_VER="python3.10" +apt-get -qq update < /dev/null > /dev/null +apt-get -q install -y software-properties-common +# Use openpgp to avoid gpg key timeout. +mkdir -p $HOME/.gnupg +echo "keyserver keys.openpgp.org" >> $HOME/.gnupg/gpg.conf +add-apt-repository -y 'ppa:deadsnakes/ppa' +apt-get -qq install $PYTHON_VER $PYTHON_VER-venv build-essential $PYTHON_VER-dev -y < /dev/null > /dev/null -PYTHON="python3.7" authtest +export PYTHON_BINARY=$PYTHON_VER +export SET_XTRACE_ON=1 +cd src +rm -rf .venv +rm -f .evergreen/scripts/test-env.sh || true +bash ./.evergreen/just.sh setup-tests auth_aws ecs-remote +bash .evergreen/just.sh run-tests diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh deleted file mode 100755 index 9a33507cc8..0000000000 --- a/.evergreen/run-mongodb-aws-test.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit # Exit the script with error if any of the commands fail - -############################################ -# Main Program # -############################################ - -# Supported/used environment variables: -# MONGODB_URI Set the URI, including an optional username/password to use -# to connect to the server via MONGODB-AWS authentication -# mechanism. -# PYTHON_BINARY The Python version to use. - -echo "Running MONGODB-AWS authentication tests" -# ensure no secrets are printed in log files -set +x - -# load the script -shopt -s expand_aliases # needed for `urlencode` alias -[ -s "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" ] && source "${PROJECT_DIRECTORY}/prepare_mongodb_aws.sh" - -MONGODB_URI=${MONGODB_URI:-"mongodb://localhost"} -MONGODB_URI="${MONGODB_URI}/aws?authMechanism=MONGODB-AWS" -if [[ -n ${SESSION_TOKEN} ]]; then - MONGODB_URI="${MONGODB_URI}&authMechanismProperties=AWS_SESSION_TOKEN:${SESSION_TOKEN}" -fi - -export MONGODB_URI="$MONGODB_URI" - -if [ "$ASSERT_NO_URI_CREDS" = "true" ]; then - if echo "$MONGODB_URI" | grep -q "@"; then - echo "MONGODB_URI unexpectedly contains user credentials!"; - exit 1 - fi -fi - -# show test output -set -x - -# Workaround macOS python 3.9 incompatibility with system virtualenv. -if [ "$(uname -s)" = "Darwin" ]; then - VIRTUALENV="/Library/Frameworks/Python.framework/Versions/3.9/bin/python3 -m virtualenv" -else - VIRTUALENV=$(command -v virtualenv) -fi - -authtest () { - if [ "Windows_NT" = "$OS" ]; then - PYTHON=$(cygpath -m $PYTHON) - fi - - echo "Running MONGODB-AWS authentication tests with $PYTHON" - $PYTHON --version - - $VIRTUALENV -p $PYTHON --never-download venvaws - if [ "Windows_NT" = "$OS" ]; then - . venvaws/Scripts/activate - else - . venvaws/bin/activate - fi - python -m pip install '.[aws]' - python test/auth_aws/test_auth_aws.py - deactivate - rm -rf venvaws -} - -PYTHON=${PYTHON_BINARY:-} -if [ -z "$PYTHON" ]; then - echo "Cannot test without specifying PYTHON_BINARY" - exit 1 -fi - -authtest diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 0000000000..b34013a6ac --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Script run on a remote host to test MONGODB-OIDC. +set -eu + +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}..." + +if [ ${OIDC_ENV} == "k8s" ]; then + SUB_TEST_NAME=$K8S_VARIANT-remote +else + SUB_TEST_NAME=$OIDC_ENV-remote + sudo apt-get install -y python3-dev build-essential +fi + +bash ./.evergreen/just.sh setup-tests auth_oidc $SUB_TEST_NAME +bash ./.evergreen/just.sh run-tests "${@:1}" + +echo "Running MONGODB-OIDC authentication tests on ${OIDC_ENV}... done." diff --git a/.evergreen/run-ocsp-tests.sh b/.evergreen/run-ocsp-tests.sh deleted file mode 100644 index 0eb101aaa2..0000000000 --- a/.evergreen/run-ocsp-tests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -# For createvirtualenv. -. .evergreen/utils.sh - -if [ -z "$PYTHON_BINARY" ]; then - echo "No python binary specified" - PYTHON=$(command -v python3) || true - if [ -z "$PYTHON" ]; then - echo "Cannot test without python3 installed!" - exit 1 - fi -else - PYTHON="$PYTHON_BINARY" -fi - -createvirtualenv $PYTHON ocsptest -trap "deactivate; rm -rf ocsptest" EXIT HUP - -python -m pip install --prefer-binary pyopenssl requests service_identity - -OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED} CA_FILE=${CA_FILE} python test/ocsp/test_ocsp.py diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh deleted file mode 100644 index bc447a9569..0000000000 --- a/.evergreen/run-perf-tests.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -set -o xtrace -set -o errexit - -git clone https://github.com/mongodb-labs/driver-performance-test-data.git -cd driver-performance-test-data -tar xf extended_bson.tgz -tar xf parallel.tgz -tar xf single_and_multi_document.tgz -cd .. - -export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data" -export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" - -MTCBIN=/opt/mongodbtoolchain/v3/bin -VIRTUALENV="$MTCBIN/virtualenv -p $MTCBIN/python3" - -$VIRTUALENV pyperftest -. pyperftest/bin/activate -python -m pip install simplejson - -python setup.py build_ext -i -start_time=$(date +%s) -python test/performance/perf_test.py --locals -end_time=$(date +%s) -elapsed_secs=$((end_time-start_time)) - -cat results.json - -echo "{\"failures\": 0, \"results\": [{\"status\": \"pass\", \"exit_code\": 0, \"test_file\": \"BenchMarkTests\", \"start\": $start_time, \"end\": $end_time, \"elapsed\": $elapsed_secs}]}" > report.json - -cat report.json diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh index 4367bad246..c14215244e 100755 --- a/.evergreen/run-tests.sh +++ b/.evergreen/run-tests.sh @@ -1,218 +1,42 @@ #!/bin/bash -set -o errexit # Exit the script with error if any of the commands fail +# Run a test suite that was configured with setup-tests.sh. +set -eu -# Supported/used environment variables: -# SET_XTRACE_ON Set to non-empty to write all commands first to stderr. -# AUTH Set to enable authentication. Defaults to "noauth" -# SSL Set to enable SSL. Defaults to "nossl" -# PYTHON_BINARY The Python version to use. Defaults to whatever is available -# GREEN_FRAMEWORK The green framework to test with, if any. -# C_EXTENSIONS Pass --no_ext to setup.py, or not. -# COVERAGE If non-empty, run the test suite with coverage. -# TEST_ENCRYPTION If non-empty, install pymongocrypt. -# LIBMONGOCRYPT_URL The URL to download libmongocrypt. -# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) +SCRIPT_DIR="$( cd -- "$SCRIPT_DIR" > /dev/null 2>&1 && pwd )" +ROOT_DIR="$(dirname $SCRIPT_DIR)" -if [ -n "${SET_XTRACE_ON}" ]; then - set -o xtrace -else - set +x -fi - -AUTH=${AUTH:-noauth} -SSL=${SSL:-nossl} -PYTHON_BINARY=${PYTHON_BINARY:-} -GREEN_FRAMEWORK=${GREEN_FRAMEWORK:-} -C_EXTENSIONS=${C_EXTENSIONS:-} -COVERAGE=${COVERAGE:-} -COMPRESSORS=${COMPRESSORS:-} -MONGODB_API_VERSION=${MONGODB_API_VERSION:-} -TEST_ENCRYPTION=${TEST_ENCRYPTION:-} -TEST_CRYPT_SHARED=${TEST_CRYPT_SHARED:-} -LIBMONGOCRYPT_URL=${LIBMONGOCRYPT_URL:-} -DATA_LAKE=${DATA_LAKE:-} -TEST_ARGS="" - -if [ -n "$COMPRESSORS" ]; then - export COMPRESSORS=$COMPRESSORS -fi - -if [ -n "$MONGODB_API_VERSION" ]; then - export MONGODB_API_VERSION=$MONGODB_API_VERSION -fi +PREV_DIR=$(pwd) +cd $ROOT_DIR -if [ "$AUTH" != "noauth" ]; then - if [ ! -z "$DATA_LAKE" ]; then - export DB_USER="mhuser" - export DB_PASSWORD="pencil" - elif [ ! -z "$TEST_SERVERLESS" ]; then - export DB_USER=$SERVERLESS_ATLAS_USER - export DB_PASSWORD=$SERVERLESS_ATLAS_PASSWORD - else - export DB_USER="bob" - export DB_PASSWORD="pwd123" - fi -fi - -if [ "$SSL" != "nossl" ]; then - export CLIENT_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/client.pem" - export CA_PEM="$DRIVERS_TOOLS/.evergreen/x509gen/ca.pem" - - if [ -n "$TEST_LOADBALANCER" ]; then - export SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}&tls=true" - export MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}&tls=true" - fi -fi - -# For createvirtualenv. -. .evergreen/utils.sh - -if [ -z "$PYTHON_BINARY" ]; then - # Use Python 3 from the server toolchain to test on ARM, POWER or zSeries if a - # system python3 doesn't exist or exists but is older than 3.7. - if is_python_37 "$(command -v python3)"; then - PYTHON=$(command -v python3) - elif is_python_37 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then - PYTHON=$(command -v /opt/mongodbtoolchain/v3/bin/python3) - else - echo "Cannot test without python3.7+ installed!" - fi -elif [ "$COMPRESSORS" = "snappy" ]; then - createvirtualenv $PYTHON_BINARY snappytest - trap "deactivate; rm -rf snappytest" EXIT HUP - python -m pip install python-snappy - PYTHON=python -elif [ "$COMPRESSORS" = "zstd" ]; then - createvirtualenv $PYTHON_BINARY zstdtest - trap "deactivate; rm -rf zstdtest" EXIT HUP - python -m pip install zstandard - PYTHON=python +# Try to source the env file. +if [ -f $SCRIPT_DIR/scripts/env.sh ]; then + echo "Sourcing env inputs" + . $SCRIPT_DIR/scripts/env.sh else - PYTHON="$PYTHON_BINARY" -fi - -# PyOpenSSL test setup. -if [ -n "$TEST_PYOPENSSL" ]; then - createvirtualenv $PYTHON pyopenssltest - trap "deactivate; rm -rf pyopenssltest" EXIT HUP - PYTHON=python - - python -m pip install --prefer-binary pyopenssl requests service_identity -fi - -if [ -n "$TEST_ENCRYPTION" ]; then - createvirtualenv $PYTHON venv-encryption - trap "deactivate; rm -rf venv-encryption" EXIT HUP - PYTHON=python - - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - # PYTHON-2808 Ensure this machine has the CA cert for google KMS. - powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/" > /dev/null || true - fi - - if [ -z "$LIBMONGOCRYPT_URL" ]; then - echo "Cannot test client side encryption without LIBMONGOCRYPT_URL!" - exit 1 - fi - curl -O "$LIBMONGOCRYPT_URL" - mkdir libmongocrypt - tar xzf libmongocrypt.tar.gz -C ./libmongocrypt - ls -la libmongocrypt - ls -la libmongocrypt/nocrypto - # Use the nocrypto build to avoid dependency issues with older windows/python versions. - BASE=$(pwd)/libmongocrypt/nocrypto - if [ -f "${BASE}/lib/libmongocrypt.so" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.so - elif [ -f "${BASE}/lib/libmongocrypt.dylib" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib/libmongocrypt.dylib - elif [ -f "${BASE}/bin/mongocrypt.dll" ]; then - PYMONGOCRYPT_LIB=${BASE}/bin/mongocrypt.dll - # libmongocrypt's windows dll is not marked executable. - chmod +x $PYMONGOCRYPT_LIB - PYMONGOCRYPT_LIB=$(cygpath -m $PYMONGOCRYPT_LIB) - elif [ -f "${BASE}/lib64/libmongocrypt.so" ]; then - PYMONGOCRYPT_LIB=${BASE}/lib64/libmongocrypt.so - else - echo "Cannot find libmongocrypt shared object file" - exit 1 - fi - export PYMONGOCRYPT_LIB - - # TODO: Test with 'pip install pymongocrypt' - git clone https://github.com/mongodb/libmongocrypt.git libmongocrypt_git - python -m pip install --prefer-binary -r .evergreen/test-encryption-requirements.txt - python -m pip install ./libmongocrypt_git/bindings/python - python -c "import pymongocrypt; print('pymongocrypt version: '+pymongocrypt.__version__)" - python -c "import pymongocrypt; print('libmongocrypt version: '+pymongocrypt.libmongocrypt_version())" - # PATH is updated by PREPARE_SHELL for access to mongocryptd. - - # Get access to the AWS temporary credentials: - # CSFLE_AWS_TEMP_ACCESS_KEY_ID, CSFLE_AWS_TEMP_SECRET_ACCESS_KEY, CSFLE_AWS_TEMP_SESSION_TOKEN - . $DRIVERS_TOOLS/.evergreen/csfle/set-temp-creds.sh - - if [ -n "$TEST_CRYPT_SHARED" ]; then - echo "Testing CSFLE with crypt_shared lib" - $PYTHON $DRIVERS_TOOLS/.evergreen/mongodl.py --component crypt_shared \ - --version latest --out ../crypt_shared/ - export DYLD_FALLBACK_LIBRARY_PATH=../crypt_shared/lib/:$DYLD_FALLBACK_LIBRARY_PATH - export LD_LIBRARY_PATH=../crypt_shared/lib:$LD_LIBRARY_PATH - export PATH=../crypt_shared/bin:$PATH - fi - # Only run the encryption tests. - TEST_ARGS="-s test.test_encryption" + echo "Not sourcing env inputs" fi -if [ -n "$DATA_LAKE" ]; then - TEST_ARGS="-s test.test_data_lake" -fi - -# Don't download unittest-xml-reporting from pypi, which often fails. -if $PYTHON -c "import xmlrunner"; then - # The xunit output dir must be a Python style absolute path. - XUNIT_DIR="$(pwd)/xunit-results" - if [ "Windows_NT" = "$OS" ]; then # Magic variable in cygwin - XUNIT_DIR=$(cygpath -m $XUNIT_DIR) - fi - OUTPUT="--xunit-output=${XUNIT_DIR}" +# Handle test inputs. +if [ -f $SCRIPT_DIR/scripts/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/scripts/test-env.sh else - OUTPUT="" + echo "Missing test inputs, please run 'just setup-tests'" + exit 1 fi -echo "Running $AUTH tests over $SSL with python $PYTHON" -$PYTHON -c 'import sys; print(sys.version)' +cleanup_tests() { + # Avoid leaving the lock file in a changed state when we change the resolution type. + if [ -n "${TEST_MIN_DEPS:-}" ]; then + git checkout uv.lock || true + fi + cd $PREV_DIR +} -# Run the tests, and store the results in Evergreen compatible XUnit XML -# files in the xunit-results/ directory. - -# Run the tests with coverage if requested and coverage is installed. -# Only cover CPython. PyPy reports suspiciously low coverage. -PYTHON_IMPL=$($PYTHON -c "import platform; print(platform.python_implementation())") -COVERAGE_ARGS="" -if [ -n "$COVERAGE" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - if $PYTHON -m coverage --version; then - echo "INFO: coverage is installed, running tests with coverage..." - COVERAGE_ARGS="-m coverage run --branch" - else - echo "INFO: coverage is not installed, running tests without coverage..." - fi -fi +trap "cleanup_tests" SIGINT ERR -$PYTHON setup.py clean -if [ -z "$GREEN_FRAMEWORK" ]; then - if [ -z "$C_EXTENSIONS" ] && [ "$PYTHON_IMPL" = "CPython" ]; then - # Fail if the C extensions fail to build. +# Start the test runner. +uv run ${UV_ARGS} --reinstall-package pymongo .evergreen/scripts/run_tests.py "$@" - # This always sets 0 for exit status, even if the build fails, due - # to our hack to install PyMongo without C extensions when build - # deps aren't available. - $PYTHON setup.py build_ext -i - # This will set a non-zero exit status if either import fails, - # causing this script to exit. - $PYTHON -c "from bson import _cbson; from pymongo import _cmessage" - fi - - $PYTHON $COVERAGE_ARGS setup.py $C_EXTENSIONS test $TEST_ARGS $OUTPUT -else - # --no_ext has to come before "test" so there is no way to toggle extensions here. - $PYTHON green_framework_test.py $GREEN_FRAMEWORK $OUTPUT -fi +cleanup_tests diff --git a/.evergreen/scripts/__init__.py b/.evergreen/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.evergreen/scripts/check-import-time.sh b/.evergreen/scripts/check-import-time.sh new file mode 100755 index 0000000000..f7a1117b97 --- /dev/null +++ b/.evergreen/scripts/check-import-time.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Check for regressions in the import time of pymongo. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +source $HERE/env.sh + +pushd $HERE/../.. >/dev/null + +BASE_SHA="$1" +HEAD_SHA="$2" + +. .evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + PYTHON_BINARY=$(find_python3) +fi + +# Use the previous commit if this was not a PR run. +if [ "$BASE_SHA" == "$HEAD_SHA" ]; then + BASE_SHA=$(git rev-parse HEAD~1) +fi + +function get_import_time() { + local log_file + createvirtualenv "$PYTHON_BINARY" import-venv + python -m pip install -q ".[aws,encryption,gssapi,ocsp,snappy,zstd]" + # Import once to cache modules + python -c "import pymongo" + log_file="pymongo-$1.log" + python -X importtime -c "import pymongo" 2> $log_file +} + +get_import_time $HEAD_SHA +git stash || true +git checkout $BASE_SHA +get_import_time $BASE_SHA +git checkout $HEAD_SHA +git stash apply || true +python tools/compare_import_time.py $HEAD_SHA $BASE_SHA + +popd >/dev/null diff --git a/.evergreen/scripts/cleanup.sh b/.evergreen/scripts/cleanup.sh new file mode 100755 index 0000000000..f04a936fd2 --- /dev/null +++ b/.evergreen/scripts/cleanup.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Clean up resources at the end of an evergreen run. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +rm -rf "${DRIVERS_TOOLS}" || true +rm -f $HERE/../../secrets-export.sh || true diff --git a/.evergreen/scripts/configure-env.sh b/.evergreen/scripts/configure-env.sh new file mode 100755 index 0000000000..8dc328aab3 --- /dev/null +++ b/.evergreen/scripts/configure-env.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# Configure an evergreen test environment. +set -eu + +# Get the current unique version of this checkout +# shellcheck disable=SC2154 +if [ "${is_patch:-}" = "true" ]; then + # shellcheck disable=SC2154 + CURRENT_VERSION="$(git describe)-patch-$version_id" +else + CURRENT_VERSION=latest +fi + +PROJECT_DIRECTORY="$(pwd)" +DRIVERS_TOOLS="$(dirname $PROJECT_DIRECTORY)/drivers-tools" +CARGO_HOME=${CARGO_HOME:-${DRIVERS_TOOLS}/.cargo} +UV_TOOL_DIR=$PROJECT_DIRECTORY/.local/uv/tools +UV_CACHE_DIR=$PROJECT_DIRECTORY/.local/uv/cache +DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS/.bin" +MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + +# On Evergreen jobs, "CI" will be set, and we don't want to write to $HOME. +if [ "${CI:-}" == "true" ]; then + PYMONGO_BIN_DIR=${DRIVERS_TOOLS_BINARIES:-} +# We want to use a path that's already on PATH on spawn hosts. +else + PYMONGO_BIN_DIR=$HOME/cli_bin +fi + +PATH_EXT="$MONGODB_BINARIES:$DRIVERS_TOOLS_BINARIES:$PYMONGO_BIN_DIR:\$PATH" + +# Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory +if [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) + PROJECT_DIRECTORY=$(cygpath -m $PROJECT_DIRECTORY) + CARGO_HOME=$(cygpath -m $CARGO_HOME) + UV_TOOL_DIR=$(cygpath -m "$UV_TOOL_DIR") + UV_CACHE_DIR=$(cygpath -m "$UV_CACHE_DIR") + DRIVERS_TOOLS_BINARIES=$(cygpath -m "$DRIVERS_TOOLS_BINARIES") + MONGODB_BINARIES=$(cygpath -m "$MONGODB_BINARIES") + PYMONGO_BIN_DIR=$(cygpath -m "$PYMONGO_BIN_DIR") +fi + +SCRIPT_DIR="$PROJECT_DIRECTORY/.evergreen/scripts" + +if [ -f "$SCRIPT_DIR/env.sh" ]; then + echo "Reading $SCRIPT_DIR/env.sh file" + . "$SCRIPT_DIR/env.sh" + exit 0 +fi + +export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" +export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + +cat < "$SCRIPT_DIR"/env.sh +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" +export CURRENT_VERSION="$CURRENT_VERSION" +export DRIVERS_TOOLS="$DRIVERS_TOOLS" +export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +export MONGODB_BINARIES="$MONGODB_BINARIES" +export DRIVERS_TOOLS_BINARIES="$DRIVERS_TOOLS_BINARIES" +export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" + +export CARGO_HOME="$CARGO_HOME" +export UV_TOOL_DIR="$UV_TOOL_DIR" +export UV_CACHE_DIR="$UV_CACHE_DIR" +export UV_TOOL_BIN_DIR="$DRIVERS_TOOLS_BINARIES" +export PYMONGO_BIN_DIR="$PYMONGO_BIN_DIR" +export PATH="$PATH_EXT" +# shellcheck disable=SC2154 +export PROJECT="${project:-mongo-python-driver}" +export PIP_QUIET=1 +EOT + +# Write the .env file for drivers-tools. +rm -rf $DRIVERS_TOOLS +BRANCH=master +ORG=mongodb-labs +git clone --branch $BRANCH https://github.com/$ORG/drivers-evergreen-tools.git $DRIVERS_TOOLS + +cat < ${DRIVERS_TOOLS}/.env +SKIP_LEGACY_SHELL=1 +DRIVERS_TOOLS="$DRIVERS_TOOLS" +MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" +MONGODB_BINARIES="$MONGODB_BINARIES" +EOT + +# Add these expansions to make it easier to call out tests scripts from the EVG yaml +cat < expansion.yml +DRIVERS_TOOLS: "$DRIVERS_TOOLS" +PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" +EOT + +# If the toolchain is available, symlink binaries to the bin dir. This has to be done +# after drivers-tools is cloned, since we might be using its binary dir. +_bin_path="" +if [ "Windows_NT" == "${OS:-}" ]; then + _bin_path="/cygdrive/c/Python/Current/Scripts" +elif [ "$(uname -s)" == "Darwin" ]; then + _bin_path="/Library/Frameworks/Python.Framework/Versions/Current/bin" +else + _bin_path="/opt/python/Current/bin" +fi +if [ -d "${_bin_path}" ]; then + _suffix="" + if [ "Windows_NT" == "${OS:-}" ]; then + _suffix=".exe" + fi + echo "Symlinking binaries from toolchain" + mkdir -p $PYMONGO_BIN_DIR + ln -s ${_bin_path}/just${_suffix} $PYMONGO_BIN_DIR/just${_suffix} + ln -s ${_bin_path}/uv${_suffix} $PYMONGO_BIN_DIR/uv${_suffix} + ln -s ${_bin_path}/uvx${_suffix} $PYMONGO_BIN_DIR/uvx${_suffix} +fi diff --git a/.evergreen/scripts/create-spec-pr.sh b/.evergreen/scripts/create-spec-pr.sh new file mode 100755 index 0000000000..a5e49bb211 --- /dev/null +++ b/.evergreen/scripts/create-spec-pr.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +tools="$(realpath -s "../drivers-tools")" +pushd $tools/.evergreen/github_app || exit + +owner="mongodb" +repo="mongo-python-driver" + +# Bootstrap the app. +echo "bootstrapping" +source utils.sh +bootstrap drivers/comment-bot + +# Run the app. +source ./secrets-export.sh + +# Get a github access token for the git checkout. +echo "Getting github token..." + +token=$(bash ./get-access-token.sh $repo $owner) +if [ -z "${token}" ]; then + echo "Failed to get github access token!" + popd || exit + exit 1 +fi +echo "Getting github token... done." +popd || exit + +# Make the git checkout and create a new branch. +echo "Creating the git checkout..." +branch="spec-resync-"$(date '+%m-%d-%Y') + +git remote set-url origin https://x-access-token:${token}@github.com/$owner/$repo.git +git checkout -b $branch "origin/master" +git add ./test +git commit -am "resyncing specs $(date '+%m-%d-%Y')" +echo "Creating the git checkout... done." + +git push origin $branch +resp=$(curl -L \ + -X POST \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $token" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + -d "{\"title\":\"[Spec Resync] $(date '+%m-%d-%Y')\",\"body\":\"$(cat "$1")\",\"head\":\"${branch}\",\"base\":\"master\"}" \ + --url https://api.github.com/repos/$owner/$repo/pulls) +echo $resp | jq '.html_url' +echo "Creating the PR... done." + +rm -rf $tools diff --git a/.evergreen/scripts/download-and-merge-coverage.sh b/.evergreen/scripts/download-and-merge-coverage.sh new file mode 100755 index 0000000000..c006813ba9 --- /dev/null +++ b/.evergreen/scripts/download-and-merge-coverage.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Download all the task coverage files. +set -eu +aws s3 cp --recursive s3://"$1"/coverage/"$2"/"$3"/coverage/ coverage/ diff --git a/.evergreen/scripts/generate-config.sh b/.evergreen/scripts/generate-config.sh new file mode 100755 index 0000000000..70b4578cf9 --- /dev/null +++ b/.evergreen/scripts/generate-config.sh @@ -0,0 +1,6 @@ +#!/bin/bash +# Entry point for the generate-config pre-commit hook. + +set -eu + +python .evergreen/scripts/generate_config.py diff --git a/.evergreen/scripts/generate_config.py b/.evergreen/scripts/generate_config.py new file mode 100644 index 0000000000..daec0841d5 --- /dev/null +++ b/.evergreen/scripts/generate_config.py @@ -0,0 +1,1177 @@ +# Note: See CONTRIBUTING.md for how to update/run this file. +from __future__ import annotations + +import sys +from itertools import product + +from generate_config_utils import ( + ALL_PYTHONS, + ALL_VERSIONS, + BATCHTIME_DAY, + BATCHTIME_WEEK, + C_EXTS, + CPYTHONS, + DEFAULT_HOST, + HOSTS, + MIN_MAX_PYTHON, + OTHER_HOSTS, + PYPYS, + SYNCS, + TOPOLOGIES, + create_variant, + get_assume_role, + get_s3_put, + get_standard_auth_ssl, + get_subprocess_exec, + get_task_name, + get_variant_name, + get_versions_from, + handle_c_ext, + write_functions_to_file, + write_tasks_to_file, + write_variants_to_file, + zip_cycle, +) +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import ( + FunctionCall, + archive_targz_pack, + attach_results, + attach_xunit_results, + ec2_assume_role, + expansions_update, + git_get_project, +) +from shrub.v3.evg_task import EvgTask, EvgTaskDependency, EvgTaskRef + +############## +# Variants +############## + + +def create_ocsp_variants() -> list[BuildVariant]: + variants = [] + # OCSP tests on default host with all servers v4.4+. + # MongoDB servers on Windows and MacOS do not staple OCSP responses and only support RSA. + # Only test with MongoDB 4.4 and latest. + for host_name in ["rhel8", "win64", "macos"]: + host = HOSTS[host_name] + if host == DEFAULT_HOST: + tasks = [".ocsp"] + else: + tasks = [".ocsp-rsa !.ocsp-staple .latest", ".ocsp-rsa !.ocsp-staple .4.4"] + variant = create_variant( + tasks, + get_variant_name("OCSP", host), + host=host, + batchtime=BATCHTIME_WEEK, + ) + variants.append(variant) + return variants + + +def create_server_version_variants() -> list[BuildVariant]: + variants = [] + for version in ALL_VERSIONS: + display_name = get_variant_name("* MongoDB", version=version) + variant = create_variant( + [".server-version"], + display_name, + version=version, + host=DEFAULT_HOST, + tags=["coverage_tag"], + ) + variants.append(variant) + return variants + + +def create_standard_nonlinux_variants() -> list[BuildVariant]: + variants = [] + base_display_name = "* Test" + + # Test a subset on each of the other platforms. + for host_name in ("macos", "macos-arm64", "win64", "win32"): + tasks = [".test-standard !.pypy"] + # MacOS arm64 only works on server versions 6.0+ + if host_name == "macos-arm64": + tasks = [ + f".test-standard !.pypy .server-{version}" for version in get_versions_from("6.0") + ] + host = HOSTS[host_name] + tags = ["standard-non-linux"] + expansions = dict() + if host_name == "win32": + expansions["IS_WIN32"] = "1" + display_name = get_variant_name(base_display_name, host) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) + variants.append(variant) + + return variants + + +def create_encryption_variants() -> list[BuildVariant]: + variants = [] + tags = ["encryption_tag"] + batchtime = BATCHTIME_DAY + + def get_encryption_expansions(encryption): + expansions = dict(TEST_NAME="encryption") + if "crypt_shared" in encryption: + expansions["TEST_CRYPT_SHARED"] = "true" + if "PyOpenSSL" in encryption: + expansions["SUB_TEST_NAME"] = "pyopenssl" + return expansions + + # Test encryption on all hosts. + for encryption, host in product( + ["Encryption", "Encryption crypt_shared"], ["rhel8", "macos", "win64"] + ): + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, **expansions) + tasks = [".test-non-standard", ".test-min-deps"] + if host != "rhel8": + tasks = [".test-non-standard !.pypy"] + variant = create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + + # Test PyOpenSSL on linux. + host = DEFAULT_HOST + encryption = "Encryption PyOpenSSL" + expansions = get_encryption_expansions(encryption) + display_name = get_variant_name(encryption, host, **expansions) + variant = create_variant( + [".test-non-standard"], + display_name, + host=host, + expansions=expansions, + batchtime=batchtime, + tags=tags, + ) + variants.append(variant) + return variants + + +def create_load_balancer_variants(): + tasks = [ + f".test-non-standard .server-{v} .sharded_cluster-auth-ssl" + for v in get_versions_from("6.0") + ] + expansions = dict(TEST_NAME="load_balancer") + return [ + create_variant( + tasks, + "Load Balancer", + host=DEFAULT_HOST, + batchtime=BATCHTIME_DAY, + expansions=expansions, + ) + ] + + +def create_compression_variants(): + # Compression tests - use the standard linux tests. + host = DEFAULT_HOST + variants = [] + for compressor in "snappy", "zlib", "zstd": + expansions = dict(COMPRESSOR=compressor) + if compressor == "zstd": + tasks = [".test-standard !.server-4.2"] + else: + tasks = [".test-standard"] + display_name = get_variant_name(f"Compression {compressor}", host) + variants.append( + create_variant( + tasks, + display_name, + host=host, + expansions=expansions, + ) + ) + return variants + + +def create_enterprise_auth_variants(): + variants = [] + for host in ["rhel8", "macos", "win64"]: + expansions = dict(TEST_NAME="enterprise_auth", AUTH="auth") + display_name = get_variant_name("Auth Enterprise", host) + tasks = [".test-standard-auth .auth !.free-threaded"] + # https://jira.mongodb.org/browse/PYTHON-5586 + if host == "macos": + tasks = [".test-standard-auth !.pypy .auth !.free-threaded"] + if host == "win64": + tasks = [".test-standard-auth !.pypy .auth"] + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_pyopenssl_variants(): + base_name = "PyOpenSSL" + batchtime = BATCHTIME_DAY + expansions = dict(SUB_TEST_NAME="pyopenssl") + variants = [] + + for host in ["rhel8", "macos", "win64"]: + display_name = get_variant_name(base_name, host) + base_task = ".test-standard" if host == "rhel8" else ".test-standard !.pypy" + # We only need to run a subset on async. + tasks = [f"{base_task} .sync", f"{base_task} .async .replica_set-noauth-ssl"] + variants.append( + create_variant( + tasks, + display_name, + expansions=expansions, + batchtime=batchtime, + ) + ) + + return variants + + +def create_storage_engine_variants(): + host = DEFAULT_HOST + engines = ["InMemory"] + variants = [] + for engine in engines: + expansions = dict(STORAGE_ENGINE=engine.lower()) + tasks = [".test-standard .standalone-noauth-nossl"] + display_name = get_variant_name(f"Storage {engine}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_stable_api_variants(): + host = DEFAULT_HOST + tags = ["versionedApi_tag"] + variants = [] + types = ["require v1", "accept v2"] + + # All python versions across platforms. + for test_type in types: + expansions = dict(AUTH="auth") + # Test against a cluster with requireApiVersion=1. + if test_type == types[0]: + # REQUIRE_API_VERSION is set to make drivers-evergreen-tools + # start a cluster with the requireApiVersion parameter. + expansions["REQUIRE_API_VERSION"] = "1" + # MONGODB_API_VERSION is the apiVersion to use in the test suite. + expansions["MONGODB_API_VERSION"] = "1" + tasks = [ + f".test-standard !.replica_set-noauth-ssl .server-{v}" + for v in get_versions_from("5.0") + ] + else: + # Test against a cluster with acceptApiVersion2 but without + # requireApiVersion, and don't automatically add apiVersion to + # clients created in the test suite. + expansions["ORCHESTRATION_FILE"] = "versioned-api-testing.json" + tasks = [ + f".test-standard .server-{v} .standalone-noauth-nossl" + for v in get_versions_from("5.0") + ] + base_display_name = f"Stable API {test_type}" + display_name = get_variant_name(base_display_name, host, **expansions) + variant = create_variant(tasks, display_name, host=host, tags=tags, expansions=expansions) + variants.append(variant) + + return variants + + +def create_green_framework_variants(): + variants = [] + host = DEFAULT_HOST + for framework in ["gevent"]: + tasks = [".test-standard .sync !.free-threaded"] + expansions = dict(GREEN_FRAMEWORK=framework) + display_name = get_variant_name(f"Green {framework.capitalize()}", host) + variant = create_variant(tasks, display_name, host=host, expansions=expansions) + variants.append(variant) + return variants + + +def create_no_c_ext_variants(): + host = DEFAULT_HOST + tasks = [".test-standard"] + expansions = dict() + handle_c_ext(C_EXTS[0], expansions) + display_name = get_variant_name("No C Ext", host) + return [create_variant(tasks, display_name, host=host)] + + +def create_mod_wsgi_variants(): + host = HOSTS["ubuntu22"] + tasks = [".mod_wsgi"] + expansions = dict(MOD_WSGI_VERSION="4") + display_name = get_variant_name("Mod_WSGI", host) + return [create_variant(tasks, display_name, host=host, expansions=expansions)] + + +def create_disable_test_commands_variants(): + host = DEFAULT_HOST + expansions = dict(AUTH="auth", SSL="ssl", DISABLE_TEST_COMMANDS="1") + display_name = get_variant_name("Disable test commands", host) + tasks = [".test-standard .server-latest"] + return [create_variant(tasks, display_name, host=host, expansions=expansions)] + + +def create_oidc_auth_variants(): + variants = [] + for host_name in ["ubuntu22", "macos", "win64"]: + if host_name == "ubuntu22": + tasks = [".auth_oidc_remote"] + else: + tasks = ["!.auth_oidc_remote .auth_oidc"] + host = HOSTS[host_name] + variants.append( + create_variant( + tasks, + get_variant_name("Auth OIDC", host), + host=host, + batchtime=BATCHTIME_DAY, + ) + ) + # Add a specific local test to run on PRs. + if host_name == "ubuntu22": + tasks = ["!.auth_oidc_remote .auth_oidc"] + variants.append( + create_variant( + tasks, + get_variant_name("Auth OIDC Local", host), + tags=["pr"], + host=host, + batchtime=BATCHTIME_DAY, + ) + ) + return variants + + +def create_search_index_variants(): + host = DEFAULT_HOST + python = CPYTHONS[0] + return [ + create_variant( + [".search_index"], + get_variant_name("Search Index Helpers", host, python=python), + python=python, + host=host, + ) + ] + + +def create_mockupdb_variants(): + host = DEFAULT_HOST + expansions = dict(TEST_NAME="mockupdb") + return [ + create_variant( + [".test-no-orchestration"], + get_variant_name("MockupDB", host), + host=host, + tags=["pr"], + expansions=expansions, + ) + ] + + +def create_doctests_variants(): + host = DEFAULT_HOST + expansions = dict(TEST_NAME="doctest") + return [ + create_variant( + [".test-non-standard .standalone-noauth-nossl"], + get_variant_name("Doctests", host), + host=host, + expansions=expansions, + ) + ] + + +def create_atlas_connect_variants(): + host = DEFAULT_HOST + return [ + create_variant( + [".test-no-orchestration"], + get_variant_name("Atlas connect", host), + tags=["pr"], + host=DEFAULT_HOST, + expansions=dict(TEST_NAME="atlas_connect"), + ) + ] + + +def create_coverage_report_variants(): + return [create_variant(["coverage-report"], "Coverage Report", host=DEFAULT_HOST)] + + +def create_kms_variants(): + tasks = [] + tasks.append(EvgTaskRef(name="test-gcpkms", batchtime=BATCHTIME_DAY)) + tasks.append("test-gcpkms-fail") + tasks.append(EvgTaskRef(name="test-azurekms", batchtime=BATCHTIME_DAY)) + tasks.append("test-azurekms-fail") + return [create_variant(tasks, "KMS", host=HOSTS["debian11"])] + + +def create_import_time_variants(): + return [create_variant(["check-import-time"], "Import Time", host=DEFAULT_HOST)] + + +def create_backport_pr_variants(): + return [create_variant(["backport-pr"], "Backport PR", host=DEFAULT_HOST)] + + +def create_perf_variants(): + host = HOSTS["perf"] + return [create_variant([".perf"], "Performance Benchmarks", host=host, batchtime=BATCHTIME_DAY)] + + +def create_aws_auth_variants(): + variants = [] + + for host_name in ["ubuntu20", "win64", "macos"]: + expansions = dict() + # PYTHON-5604 - we need to skip ECS tests for now. + tasks = [".auth-aws !.auth-aws-ecs"] + tags = [] + if host_name == "macos": + tasks = [".auth-aws !.auth-aws-web-identity !.auth-aws-ecs !.auth-aws-ec2"] + tags = ["pr"] + elif host_name == "win64": + tasks = [".auth-aws !.auth-aws-ecs"] + host = HOSTS[host_name] + variant = create_variant( + tasks, + get_variant_name("Auth AWS", host), + host=host, + tags=tags, + expansions=expansions, + ) + variants.append(variant) + return variants + + +def create_no_server_variants(): + host = HOSTS["rhel8"] + name = get_variant_name("No server", host=host) + return [create_variant([".test-no-orchestration"], name, host=host, tags=["pr"])] + + +def create_alternative_hosts_variants(): + batchtime = BATCHTIME_DAY + variants = [] + + version = "latest" + for host_name in OTHER_HOSTS: + expansions = dict(VERSION="latest") + handle_c_ext(C_EXTS[0], expansions) + host = HOSTS[host_name] + tags = [] + if "fips" in host_name.lower(): + expansions["REQUIRE_FIPS"] = "1" + # Use explicit Python 3.11 binary on the host since the default python3 is 3.9. + expansions["PYTHON_BINARY"] = "/usr/bin/python3.11" + if "amazon" in host_name.lower(): + tags.append("pr") + variants.append( + create_variant( + [".test-no-toolchain"], + display_name=get_variant_name("Other hosts", host, version=version), + batchtime=batchtime, + host=host, + tags=tags, + expansions=expansions, + ) + ) + return variants + + +def create_aws_lambda_variants(): + host = HOSTS["rhel8"] + return [create_variant([".aws_lambda"], display_name="FaaS Lambda", host=host)] + + +############## +# Tasks +############## + + +def create_server_version_tasks(): + tasks = [] + task_combos = set() + # All combinations of topology, auth, ssl, and sync should be tested. + for (topology, auth, ssl, sync), python in zip_cycle( + list(product(TOPOLOGIES, ["auth", "noauth"], ["ssl", "nossl"], SYNCS)), ALL_PYTHONS + ): + task_combos.add((topology, auth, ssl, sync, python)) + + # Every python should be tested with sharded cluster, auth, ssl, with sync and async. + for python, sync in product(ALL_PYTHONS, SYNCS): + task_combos.add(("sharded_cluster", "auth", "ssl", sync, python)) + + # Assemble the tasks. + seen = set() + for topology, auth, ssl, sync, python in sorted(task_combos): + combo = f"{topology}-{auth}-{ssl}" + tags = ["server-version", f"python-{python}", combo, sync] + if combo in [ + "standalone-noauth-nossl", + "replica_set-noauth-nossl", + "sharded_cluster-auth-ssl", + ]: + combo = f"{combo}-{sync}" + if combo not in seen: + seen.add(combo) + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + if "t" in python: + tags.append("free-threaded") + if python not in PYPYS and "t" not in python: + expansions["COVERAGE"] = "1" + name = get_task_name( + "test-server-version", + python=python, + sync=sync, + **expansions, + ) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_no_toolchain_tasks(): + tasks = [] + + for topology, sync in zip_cycle(TOPOLOGIES, SYNCS): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-no-toolchain", + f"{topology}-{auth}-{ssl}", + ] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + name = get_task_name("test-no-toolchain", sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_test_non_standard_tasks(): + """For variants that set a TEST_NAME.""" + tasks = [] + task_combos = set() + # For each version and topology, rotate through the CPythons. + for (version, topology), python in zip_cycle(list(product(ALL_VERSIONS, TOPOLOGIES)), CPYTHONS): + pr = version == "latest" + task_combos.add((version, topology, python, pr)) + # For each PyPy and topology, rotate through the MongoDB versions. + for (python, topology), version in zip_cycle(list(product(PYPYS, TOPOLOGIES)), ALL_VERSIONS): + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-non-standard", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + auth, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-non-standard", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_test_standard_auth_tasks(): + """We only use auth on sharded clusters""" + tasks = [] + task_combos = set() + # Rotate through the CPython and MongoDB versions + for (version, topology), python in zip_cycle( + list(product(ALL_VERSIONS, ["sharded_cluster"])), CPYTHONS + ): + pr = version == "latest" + task_combos.add((version, topology, python, pr)) + # Rotate through each PyPy and MongoDB versions. + for (python, topology), version in zip_cycle( + list(product(PYPYS, ["sharded_cluster"])), ALL_VERSIONS + ): + task_combos.add((version, topology, python, False)) + for version, topology, python, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-standard-auth", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + auth, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-standard-auth", python=python, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_min_deps_tasks(): + """For variants that support testing with minimum dependencies.""" + tasks = [] + for topology in TOPOLOGIES: + auth, ssl = get_standard_auth_ssl(topology) + tags = ["test-min-deps", f"{topology}-{auth}-{ssl}"] + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["TEST_MIN_DEPS"] = "1" + name = get_task_name("test-min-deps", python=CPYTHONS[0], sync="sync", **test_vars) + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_standard_tasks(): + """For variants that do not set a TEST_NAME.""" + tasks = [] + task_combos = set() + # For each python and topology and sync/async, rotate through the the versions. + for (python, topology, sync), version in zip_cycle( + list(product(CPYTHONS + PYPYS, TOPOLOGIES, SYNCS)), ALL_VERSIONS + ): + pr = version == "latest" and python not in PYPYS + task_combos.add((version, topology, python, sync, pr)) + + for version, topology, python, sync, pr in sorted(task_combos): + auth, ssl = get_standard_auth_ssl(topology) + tags = [ + "test-standard", + f"server-{version}", + f"python-{python}", + f"{topology}-{auth}-{ssl}", + sync, + ] + if "t" in python: + tags.append("free-threaded") + if python in PYPYS: + tags.append("pypy") + if pr: + tags.append("pr") + expansions = dict(AUTH=auth, SSL=ssl, TOPOLOGY=topology, VERSION=version) + name = get_task_name("test-standard", python=python, sync=sync, **expansions) + server_func = FunctionCall(func="run server", vars=expansions) + test_vars = expansions.copy() + test_vars["PYTHON_VERSION"] = python + test_vars["TEST_NAME"] = f"default_{sync}" + test_func = FunctionCall(func="run tests", vars=test_vars) + tasks.append(EvgTask(name=name, tags=tags, commands=[server_func, test_func])) + return tasks + + +def create_no_orchestration_tasks(): + tasks = [] + for python in [*MIN_MAX_PYTHON, PYPYS[-1]]: + tags = [ + "test-no-orchestration", + f"python-{python}", + ] + name = get_task_name("test-no-orchestration", python=python) + assume_func = FunctionCall(func="assume ec2 role") + test_vars = dict(PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands = [assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) + return tasks + + +def create_kms_tasks(): + tasks = [] + for kms_type in ["gcp", "azure"]: + for success in [True, False]: + name = f"test-{kms_type}kms" + sub_test_name = kms_type + tags = [] + if not success: + name += "-fail" + sub_test_name += "-fail" + tags.append("pr") + commands = [] + if not success: + commands.append(FunctionCall(func="run server")) + test_vars = dict(TEST_NAME="kms", SUB_TEST_NAME=sub_test_name) + test_func = FunctionCall(func="run tests", vars=test_vars) + commands.append(test_func) + tasks.append(EvgTask(name=name, tags=tags, commands=commands)) + return tasks + + +def create_aws_tasks(): + tasks = [] + aws_test_types = [ + "regular", + "assume-role", + "ec2", + "env-creds", + "session-creds", + "web-identity", + "ecs", + ] + for version, test_type, python in zip_cycle(get_versions_from("4.4"), aws_test_types, CPYTHONS): + base_name = f"test-auth-aws-{version}" + base_tags = ["auth-aws"] + server_vars = dict(AUTH_AWS="1", VERSION=version) + server_func = FunctionCall(func="run server", vars=server_vars) + assume_func = FunctionCall(func="assume ec2 role") + tags = [*base_tags, f"auth-aws-{test_type}"] + if "t" in python: + tags.append("free-threaded") + name = get_task_name(f"{base_name}-{test_type}", python=python) + test_vars = dict(TEST_NAME="auth_aws", SUB_TEST_NAME=test_type, PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [server_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + if test_type == "web-identity": + tags = [*base_tags, "auth-aws-web-identity"] + name = get_task_name(f"{base_name}-web-identity-session-name", python=python) + test_vars = dict( + TEST_NAME="auth_aws", + SUB_TEST_NAME="web-identity", + AWS_ROLE_SESSION_NAME="test", + PYTHON_VERSION=python, + ) + if "t" in python: + tags.append("free-threaded") + test_func = FunctionCall(func="run tests", vars=test_vars) + funcs = [server_func, assume_func, test_func] + tasks.append(EvgTask(name=name, tags=tags, commands=funcs)) + + return tasks + + +def create_oidc_tasks(): + tasks = [] + for sub_test in ["default", "azure", "gcp", "eks", "aks", "gke"]: + vars = dict(TEST_NAME="auth_oidc", SUB_TEST_NAME=sub_test) + test_func = FunctionCall(func="run tests", vars=vars) + task_name = f"test-auth-oidc-{sub_test}" + tags = ["auth_oidc"] + if sub_test != "default": + tags.append("auth_oidc_remote") + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + + return tasks + + +def create_mod_wsgi_tasks(): + tasks = [] + for (test, topology), python in zip_cycle( + product(["standalone", "embedded-mode"], ["standalone", "replica_set"]), CPYTHONS + ): + if "t" in python: + continue + if test == "standalone": + task_name = "mod-wsgi-" + else: + task_name = "mod-wsgi-embedded-mode-" + task_name += topology.replace("_", "-") + task_name = get_task_name(task_name, python=python) + server_vars = dict(TOPOLOGY=topology, PYTHON_VERSION=python) + server_func = FunctionCall(func="run server", vars=server_vars) + vars = dict(TEST_NAME="mod_wsgi", SUB_TEST_NAME=test.split("-")[0], PYTHON_VERSION=python) + test_func = FunctionCall(func="run tests", vars=vars) + tags = ["mod_wsgi", "pr"] + commands = [server_func, test_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + +def _create_ocsp_tasks(algo, variant, server_type, base_task_name): + tasks = [] + file_name = f"{algo}-basic-tls-ocsp-{variant}.json" + + for version in get_versions_from("4.4"): + if version == "latest": + python = MIN_MAX_PYTHON[-1] + else: + python = MIN_MAX_PYTHON[0] + + vars = dict( + ORCHESTRATION_FILE=file_name, + OCSP_SERVER_TYPE=server_type, + TEST_NAME="ocsp", + PYTHON_VERSION=python, + VERSION=version, + ) + test_func = FunctionCall(func="run tests", vars=vars) + + tags = ["ocsp", f"ocsp-{algo}", version] + if "disableStapling" not in variant: + tags.append("ocsp-staple") + if algo == "valid-cert-server-staples" and version == "latest": + tags.append("pr") + + task_name = get_task_name( + f"test-ocsp-{algo}-{base_task_name}", + python=python, + version=version, + ) + tasks.append(EvgTask(name=task_name, tags=tags, commands=[test_func])) + + return tasks + + +def create_aws_lambda_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + vars = dict(TEST_NAME="aws_lambda") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-aws-lambda-deployed" + tags = ["aws_lambda"] + commands = [assume_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + +def create_search_index_tasks(): + assume_func = FunctionCall(func="assume ec2 role") + server_func = FunctionCall(func="run server", vars=dict(TEST_NAME="search_index")) + vars = dict(TEST_NAME="search_index") + test_func = FunctionCall(func="run tests", vars=vars) + task_name = "test-search-index-helpers" + tags = ["search_index"] + commands = [assume_func, server_func, test_func] + return [EvgTask(name=task_name, tags=tags, commands=commands)] + + +def create_perf_tasks(): + tasks = [] + for version, ssl, sync in product(["8.0"], ["ssl", "nossl"], ["sync", "async"]): + vars = dict(VERSION=f"v{version}-perf", SSL=ssl) + server_func = FunctionCall(func="run server", vars=vars) + vars = dict(TEST_NAME="perf", SUB_TEST_NAME=sync) + test_func = FunctionCall(func="run tests", vars=vars) + attach_func = FunctionCall(func="attach benchmark test results") + send_func = FunctionCall(func="send dashboard data") + task_name = f"perf-{version}-standalone" + if ssl == "ssl": + task_name += "-ssl" + if sync == "async": + task_name += "-async" + tags = ["perf"] + commands = [server_func, test_func, attach_func, send_func] + tasks.append(EvgTask(name=task_name, tags=tags, commands=commands)) + return tasks + + +def create_getdata_tasks(): + # Wildcard task. Do you need to find out what tools are available and where? + # Throw it here, and execute this task on all buildvariants + cmd = get_subprocess_exec(args=[".evergreen/scripts/run-getdata.sh"]) + return [EvgTask(name="getdata", commands=[cmd])] + + +def create_coverage_report_tasks(): + tags = ["coverage", "pr"] + task_name = "coverage-report" + # BUILD-3165: We can't use "*" (all tasks) and specify "variant". + # Instead list out all coverage tasks using tags. + # Run the coverage task even if some tasks fail. + # Run the coverage task even if some tasks are not scheduled in a patch build. + task_deps = [ + EvgTaskDependency( + name=".server-version", variant=".coverage_tag", status="*", patch_optional=True + ) + ] + cmd = FunctionCall(func="download and merge coverage") + return [EvgTask(name=task_name, tags=tags, depends_on=task_deps, commands=[cmd])] + + +def create_import_time_tasks(): + name = "check-import-time" + tags = ["pr"] + args = [".evergreen/scripts/check-import-time.sh", "${revision}", "${github.amrom.workers.devmit}"] + cmd = get_subprocess_exec(args=args) + return [EvgTask(name=name, tags=tags, commands=[cmd])] + + +def create_backport_pr_tasks(): + name = "backport-pr" + args = [ + "${DRIVERS_TOOLS}/.evergreen/github_app/backport-pr.sh", + "mongodb", + "mongo-python-driver", + "${github.amrom.workers.devmit}", + ] + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + cmd = get_subprocess_exec(args=args, include_expansions_in_env=include_expansions) + assume_func = FunctionCall(func="assume ec2 role") + return [EvgTask(name=name, commands=[assume_func, cmd], allowed_requesters=["commit"])] + + +def create_ocsp_tasks(): + tasks = [] + tests = [ + ("disableStapling", "valid", "valid-cert-server-does-not-staple"), + ("disableStapling", "revoked", "invalid-cert-server-does-not-staple"), + ("disableStapling", "valid-delegate", "delegate-valid-cert-server-does-not-staple"), + ("disableStapling", "revoked-delegate", "delegate-invalid-cert-server-does-not-staple"), + ("disableStapling", "no-responder", "soft-fail"), + ("mustStaple", "valid", "valid-cert-server-staples"), + ("mustStaple", "revoked", "invalid-cert-server-staples"), + ("mustStaple", "valid-delegate", "delegate-valid-cert-server-staples"), + ("mustStaple", "revoked-delegate", "delegate-invalid-cert-server-staples"), + ( + "mustStaple-disableStapling", + "revoked", + "malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "revoked-delegate", + "delegate-malicious-invalid-cert-mustStaple-server-does-not-staple", + ), + ( + "mustStaple-disableStapling", + "no-responder", + "malicious-no-responder-mustStaple-server-does-not-staple", + ), + ] + for algo in ["ecdsa", "rsa"]: + for variant, server_type, base_task_name in tests: + new_tasks = _create_ocsp_tasks(algo, variant, server_type, base_task_name) + tasks.extend(new_tasks) + + return tasks + + +############## +# Functions +############## + + +def create_upload_coverage_func(): + # Upload the coverage report for all tasks in a single build to the same directory. + remote_file = ( + "coverage/${revision}/${version_id}/coverage/coverage.${build_variant}.${task_name}" + ) + display_name = "Raw Coverage Report" + cmd = get_s3_put( + local_file="src/.coverage", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + return "upload coverage", [get_assume_role(), cmd] + + +def create_download_and_merge_coverage_func(): + include_expansions = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"] + args = [ + ".evergreen/scripts/download-and-merge-coverage.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + merge_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + combine_cmd = get_subprocess_exec(args=[".evergreen/combine-coverage.sh"]) + # Upload the resulting html coverage report. + args = [ + ".evergreen/scripts/upload-coverage-report.sh", + "${bucket_name}", + "${revision}", + "${version_id}", + ] + upload_cmd = get_subprocess_exec( + silent=True, include_expansions_in_env=include_expansions, args=args + ) + display_name = "Coverage Report HTML" + remote_file = "coverage/${revision}/${version_id}/htmlcov/index.html" + put_cmd = get_s3_put( + local_file="src/htmlcov/index.html", + remote_file=remote_file, + display_name=display_name, + content_type="text/html", + ) + cmds = [get_assume_role(), merge_cmd, combine_cmd, upload_cmd, put_cmd] + return "download and merge coverage", cmds + + +def create_upload_mo_artifacts_func(): + include = ["./**.core", "./**.mdmp"] # Windows: minidumps + archive_cmd = archive_targz_pack(target="mongo-coredumps.tgz", source_dir="./", include=include) + display_name = "Core Dumps - Execution" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/coredumps/${task_id}-${execution}-mongodb-coredumps.tar.gz" + s3_dumps = get_s3_put( + local_file="mongo-coredumps.tgz", remote_file=remote_file, display_name=display_name + ) + display_name = "drivers-tools-logs.tar.gz" + remote_file = "${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-drivers-tools-logs.tar.gz" + s3_logs = get_s3_put( + local_file="${DRIVERS_TOOLS}/.evergreen/test_logs.tar.gz", + remote_file=remote_file, + display_name=display_name, + ) + cmds = [get_assume_role(), archive_cmd, s3_dumps, s3_logs] + return "upload mo artifacts", cmds + + +def create_fetch_source_func(): + # Executes clone and applies the submitted patch, if any. + cmd = git_get_project(directory="src") + return "fetch source", [cmd] + + +def create_setup_system_func(): + # Make an evergreen expansion file with dynamic values. + includes = ["is_patch", "project", "version_id"] + args = [".evergreen/scripts/setup-system.sh"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + # Load the expansion file to make an evergreen variable with the current unique version. + expansion_cmd = expansions_update(file="src/expansion.yml") + return "setup system", [setup_cmd, expansion_cmd] + + +def create_upload_test_results_func(): + results_cmd = attach_results(file_location="${DRIVERS_TOOLS}/results.json") + xresults_cmd = attach_xunit_results(file="src/xunit-results/TEST-*.xml") + return "upload test results", [results_cmd, xresults_cmd] + + +def create_run_server_func(): + includes = [ + "VERSION", + "TOPOLOGY", + "AUTH", + "SSL", + "ORCHESTRATION_FILE", + "PYTHON_BINARY", + "PYTHON_VERSION", + "STORAGE_ENGINE", + "REQUIRE_API_VERSION", + "DRIVERS_TOOLS", + "TEST_CRYPT_SHARED", + "AUTH_AWS", + "LOAD_BALANCER", + "LOCAL_ATLAS", + "NO_EXT", + ] + args = [".evergreen/just.sh", "run-server", "${TEST_NAME}"] + sub_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + expansion_cmd = expansions_update(file="${DRIVERS_TOOLS}/mo-expansion.yml") + return "run server", [sub_cmd, expansion_cmd] + + +def create_run_tests_func(): + includes = [ + "AUTH", + "SSL", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SESSION_TOKEN", + "COVERAGE", + "PYTHON_BINARY", + "LIBMONGOCRYPT_URL", + "MONGODB_URI", + "PYTHON_VERSION", + "DISABLE_TEST_COMMANDS", + "GREEN_FRAMEWORK", + "NO_EXT", + "COMPRESSORS", + "MONGODB_API_VERSION", + "REQUIRE_API_VERSION", + "DEBUG_LOG", + "DISABLE_FLAKY", + "ORCHESTRATION_FILE", + "OCSP_SERVER_TYPE", + "VERSION", + "IS_WIN32", + "REQUIRE_FIPS", + "TEST_MIN_DEPS", + ] + args = [".evergreen/just.sh", "setup-tests", "${TEST_NAME}", "${SUB_TEST_NAME}"] + setup_cmd = get_subprocess_exec(include_expansions_in_env=includes, args=args) + test_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "run-tests"]) + return "run tests", [setup_cmd, test_cmd] + + +def create_cleanup_func(): + cmd = get_subprocess_exec(args=[".evergreen/scripts/cleanup.sh"]) + return "cleanup", [cmd] + + +def create_teardown_system_func(): + tests_cmd = get_subprocess_exec(args=[".evergreen/just.sh", "teardown-tests"]) + drivers_cmd = get_subprocess_exec(args=["${DRIVERS_TOOLS}/.evergreen/teardown.sh"]) + return "teardown system", [tests_cmd, drivers_cmd] + + +def create_assume_ec2_role_func(): + cmd = ec2_assume_role(role_arn="${aws_test_secrets_role}", duration_seconds=3600) + return "assume ec2 role", [cmd] + + +def create_attach_benchmark_test_results_func(): + cmd = attach_results(file_location="src/report.json") + return "attach benchmark test results", [cmd] + + +def create_send_dashboard_data_func(): + includes = [ + "requester", + "revision_order_id", + "project_id", + "version_id", + "build_variant", + "parsed_order_id", + "task_name", + "task_id", + "execution", + "is_mainline", + ] + cmds = [ + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission-setup.sh"] + ), + expansions_update(file="src/expansion.yml"), + get_subprocess_exec( + include_expansions_in_env=includes, args=[".evergreen/scripts/perf-submission.sh"] + ), + ] + return "send dashboard data", cmds + + +mod = sys.modules[__name__] +write_variants_to_file(mod) +write_tasks_to_file(mod) +write_functions_to_file(mod) diff --git a/.evergreen/scripts/generate_config_utils.py b/.evergreen/scripts/generate_config_utils.py new file mode 100644 index 0000000000..4eb6bcb0dc --- /dev/null +++ b/.evergreen/scripts/generate_config_utils.py @@ -0,0 +1,374 @@ +from __future__ import annotations + +from dataclasses import dataclass +from inspect import getmembers, isfunction +from itertools import cycle, zip_longest +from pathlib import Path +from typing import Any + +from shrub.v3.evg_build_variant import BuildVariant +from shrub.v3.evg_command import ( + EvgCommandType, + ec2_assume_role, + s3_put, + subprocess_exec, +) +from shrub.v3.evg_project import EvgProject +from shrub.v3.evg_task import EvgTaskRef +from shrub.v3.shrub_service import ShrubService + +############## +# Globals +############## + +ALL_VERSIONS = ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"] +CPYTHONS = ["3.10", "3.11", "3.12", "3.13", "3.14t", "3.14"] +PYPYS = ["pypy3.10"] +ALL_PYTHONS = CPYTHONS + PYPYS +MIN_MAX_PYTHON = [CPYTHONS[0], CPYTHONS[-1]] +BATCHTIME_WEEK = 10080 +BATCHTIME_DAY = 1440 +AUTH_SSLS = [("auth", "ssl"), ("noauth", "ssl"), ("noauth", "nossl")] +TOPOLOGIES = ["standalone", "replica_set", "sharded_cluster"] +C_EXTS = ["without_ext", "with_ext"] +SYNCS = ["sync", "async"] +DISPLAY_LOOKUP = dict( + ssl=dict(ssl="SSL", nossl="NoSSL"), + auth=dict(auth="Auth", noauth="NoAuth"), + topology=dict( + standalone="Standalone", replica_set="Replica Set", sharded_cluster="Sharded Cluster" + ), + test_suites=dict(default="Sync", default_async="Async"), + sync={"sync": "Sync", "async": "Async"}, + coverage={"1": "cov"}, + no_ext={"1": "No C"}, + test_min_deps={True: "Min Deps"}, +) +HOSTS = dict() + + +@dataclass +class Host: + name: str + run_on: str + display_name: str + variables: dict[str, str] | None + + +# Hosts with toolchains. +HOSTS["rhel8"] = Host("rhel8", "rhel87-small", "RHEL8", dict()) +HOSTS["win64"] = Host("win64", "windows-64-vsMulti-small", "Win64", dict()) +HOSTS["win32"] = Host("win32", "windows-64-vsMulti-small", "Win32", dict()) +HOSTS["macos"] = Host("macos", "macos-14", "macOS", dict()) +HOSTS["macos-arm64"] = Host("macos-arm64", "macos-14-arm64", "macOS Arm64", dict()) +HOSTS["ubuntu20"] = Host("ubuntu20", "ubuntu2004-small", "Ubuntu-20", dict()) +HOSTS["ubuntu22"] = Host("ubuntu22", "ubuntu2204-small", "Ubuntu-22", dict()) +HOSTS["rhel7"] = Host("rhel7", "rhel79-small", "RHEL7", dict()) +HOSTS["perf"] = Host("perf", "rhel90-dbx-perf-large", "", dict()) +HOSTS["debian11"] = Host("debian11", "debian11-small", "Debian11", dict()) +DEFAULT_HOST = HOSTS["rhel8"] + +# Other hosts +OTHER_HOSTS = ["RHEL9-FIPS", "RHEL8-zseries", "RHEL8-POWER8", "RHEL8-arm64", "Amazon2023"] +for name, run_on in zip( + OTHER_HOSTS, + [ + "rhel92-fips", + "rhel8-zseries-small", + "rhel8-power-small", + "rhel82-arm64-small", + "amazon2023-arm64-latest-large-m8g", + ], +): + HOSTS[name] = Host(name, run_on, name, dict()) + +############## +# Helpers +############## + + +def create_variant_generic( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + host: Host | str | None = None, + default_run_on="rhel87-small", + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + """Create a build variant for the given inputs.""" + task_refs = [] + if isinstance(host, str): + host = HOSTS[host] + for t in tasks: + if isinstance(t, EvgTaskRef): + task_refs.append(t) + else: + task_refs.append(EvgTaskRef(name=t)) + expansions = expansions and expansions.copy() or dict() + if "run_on" in kwargs: + run_on = kwargs.pop("run_on") + elif host: + run_on = [host.run_on] + if host.variables: + expansions.update(host.variables) + else: + run_on = [default_run_on] + if isinstance(run_on, str): + run_on = [run_on] + name = display_name.replace(" ", "-").replace("*-", "").lower() + return BuildVariant( + name=name, + display_name=display_name, + tasks=task_refs, + expansions=expansions or None, + run_on=run_on, + **kwargs, + ) + + +def create_variant( + tasks: list[str | EvgTaskRef], + display_name: str, + *, + version: str | None = None, + host: Host | str | None = None, + python: str | None = None, + expansions: dict | None = None, + **kwargs: Any, +) -> BuildVariant: + expansions = expansions and expansions.copy() or dict() + if version: + expansions["VERSION"] = version + if python: + expansions["PYTHON_BINARY"] = get_python_binary(python, host) + return create_variant_generic( + tasks, display_name, version=version, host=host, expansions=expansions, **kwargs + ) + + +def get_python_binary(python: str, host: Host) -> str: + """Get the appropriate python binary given a python version and host.""" + name = host.name + if name in ["win64", "win32"]: + if name == "win32": + base = "C:/python/32" + else: + base = "C:/python" + python_dir = python.replace(".", "").replace("t", "") + return f"{base}/Python{python_dir}/python{python}.exe" + + if name in ["rhel8", "ubuntu22", "ubuntu20", "rhel7"]: + return f"/opt/python/{python}/bin/python3" + + if name in ["macos", "macos-arm64"]: + bin_name = "python3t" if "t" in python else "python3" + python_dir = python.replace("t", "") + framework_dir = "PythonT" if "t" in python else "Python" + return f"/Library/Frameworks/{framework_dir}.Framework/Versions/{python_dir}/bin/{bin_name}" + + raise ValueError(f"no match found for python {python} on {name}") + + +def get_versions_from(min_version: str) -> list[str]: + """Get all server versions starting from a minimum version.""" + min_version_float = float(min_version) + rapid_latest = ["rapid", "latest"] + versions = [v for v in ALL_VERSIONS if v not in rapid_latest] + return [v for v in versions if float(v) >= min_version_float] + rapid_latest + + +def get_versions_until(max_version: str) -> list[str]: + """Get all server version up to a max version.""" + max_version_float = float(max_version) + versions = [v for v in ALL_VERSIONS if v not in ["rapid", "latest"]] + versions = [v for v in versions if float(v) <= max_version_float] + if not len(versions): + raise ValueError(f"No server versions found less <= {max_version}") + return versions + + +def get_common_name(base: str, sep: str, **kwargs) -> str: + display_name = base + version = kwargs.pop("VERSION", None) + version = version or kwargs.pop("version", None) + if version: + if version not in ["rapid", "latest"]: + version = f"v{version}" + display_name = f"{display_name}{sep}{version}" + for key, value in kwargs.items(): + name = value + if key.lower() == "python": + if not value.startswith("pypy"): + name = f"Python{value}" + else: + name = f"PyPy{value.replace('pypy', '')}" + elif key.lower() in DISPLAY_LOOKUP and value in DISPLAY_LOOKUP[key.lower()]: + name = DISPLAY_LOOKUP[key.lower()][value] + else: + continue + display_name = f"{display_name}{sep}{name}" + return display_name + + +def get_variant_name(base: str, host: str | Host | None = None, **kwargs) -> str: + """Get the display name of a variant.""" + display_name = base + if isinstance(host, str): + host = HOSTS[host] + if host is not None: + display_name += f" {host.display_name}" + return get_common_name(display_name, " ", **kwargs) + + +def get_task_name(base: str, **kwargs): + return get_common_name(base, "-", **kwargs).replace(" ", "-").lower() + + +def zip_cycle(*iterables, empty_default=None): + """Get all combinations of the inputs, cycling over the shorter list(s).""" + cycles = [cycle(i) for i in iterables] + for _ in zip_longest(*iterables): + yield tuple(next(i, empty_default) for i in cycles) + + +def handle_c_ext(c_ext, expansions) -> None: + """Handle c extension option.""" + if c_ext == C_EXTS[0]: + expansions["NO_EXT"] = "1" + + +def get_standard_auth_ssl(topology): + auth = "auth" if topology == "sharded_cluster" else "noauth" + ssl = "nossl" if topology == "standalone" else "ssl" + return auth, ssl + + +def get_assume_role(**kwargs): + kwargs.setdefault("command_type", EvgCommandType.SETUP) + kwargs.setdefault("role_arn", "${assume_role_arn}") + return ec2_assume_role(**kwargs) + + +def get_subprocess_exec(**kwargs): + kwargs.setdefault("binary", "bash") + kwargs.setdefault("working_dir", "src") + kwargs.setdefault("command_type", EvgCommandType.TEST) + return subprocess_exec(**kwargs) + + +def get_s3_put(**kwargs): + kwargs["aws_key"] = "${AWS_ACCESS_KEY_ID}" + kwargs["aws_secret"] = "${AWS_SECRET_ACCESS_KEY}" # noqa:S105 + kwargs["aws_session_token"] = "${AWS_SESSION_TOKEN}" # noqa:S105 + kwargs["bucket"] = "${bucket_name}" + kwargs.setdefault("optional", "true") + kwargs.setdefault("permissions", "public-read") + kwargs.setdefault("content_type", "${content_type|application/x-gzip}") + kwargs.setdefault("command_type", EvgCommandType.SETUP) + return s3_put(**kwargs) + + +def generate_yaml(tasks=None, variants=None): + """Generate the yaml for a given set of tasks and variants.""" + project = EvgProject(tasks=tasks, buildvariants=variants) + out = ShrubService.generate_yaml(project) + # Dedent by two spaces to match what we use in config.yml + lines = [line[2:] for line in out.splitlines()] + print("\n".join(lines)) + + +################## +# Generate Config +################## + + +def write_variants_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "variants.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("buildvariants:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if not name.endswith("_variants"): + continue + if not name.startswith("create_"): + raise ValueError("Variant creators must start with create_") + title = name.replace("create_", "").replace("_variants", "").replace("_", " ").capitalize() + project = EvgProject(tasks=None, buildvariants=func()) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_tasks_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "tasks.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("tasks:\n") + + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_tasks"): + continue + if not name.startswith("create_"): + raise ValueError("Task creators must start with create_") + title = name.replace("create_", "").replace("_tasks", "").replace("_", " ").capitalize() + project = EvgProject(tasks=func(), buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title} tests\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") + + +def write_functions_to_file(mod): + here = Path(__file__).absolute().parent + target = here.parent / "generated_configs" / "functions.yml" + if target.exists(): + target.unlink() + with target.open("w") as fid: + fid.write("functions:\n") + + functions = dict() + for name, func in sorted(getmembers(mod, isfunction)): + if name.startswith("_") or not name.endswith("_func"): + continue + if not name.startswith("create_"): + raise ValueError("Function creators must start with create_") + title = name.replace("create_", "").replace("_func", "").replace("_", " ").capitalize() + func_name, cmds = func() + functions = dict() + functions[func_name] = cmds + project = EvgProject(functions=functions, tasks=None, buildvariants=None) + out = ShrubService.generate_yaml(project).splitlines() + with target.open("a") as fid: + fid.write(f" # {title}\n") + for line in out[1:]: + fid.write(f"{line}\n") + fid.write("\n") + + # Remove extra trailing newline: + data = target.read_text().splitlines() + with target.open("w") as fid: + for line in data[:-1]: + fid.write(f"{line}\n") diff --git a/.evergreen/scripts/install-dependencies.sh b/.evergreen/scripts/install-dependencies.sh new file mode 100755 index 0000000000..23d865d0d8 --- /dev/null +++ b/.evergreen/scripts/install-dependencies.sh @@ -0,0 +1,77 @@ +#!/bin/bash +# Install the dependencies needed for an evergreen run. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" > /dev/null + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh +fi + +# Set up the default bin directory. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + PYMONGO_BIN_DIR="$HOME/.local/bin" + export PATH="$PYMONGO_BIN_DIR:$PATH" +fi + +# Helper function to pip install a dependency using a temporary python env. +function _pip_install() { + _HERE=$(dirname ${BASH_SOURCE:-$0}) + . $_HERE/../utils.sh + _VENV_PATH=$(mktemp -d) + if [ "Windows_NT" = "${OS:-}" ]; then + _VENV_PATH=$(cygpath -m $_VENV_PATH) + fi + echo "Installing $2 using pip..." + createvirtualenv "$(find_python3)" $_VENV_PATH + python -m pip install $1 + _suffix="" + if [ "Windows_NT" = "${OS:-}" ]; then + _suffix=".exe" + fi + ln -s "$(which $2)" $PYMONGO_BIN_DIR/${2}${_suffix} + # uv also comes with a uvx binary. + if [ $2 == "uv" ]; then + ln -s "$(which uvx)" $PYMONGO_BIN_DIR/uvx${_suffix} + fi + echo "Installed to ${PYMONGO_BIN_DIR}" + echo "Installing $2 using pip... done." +} + +# Ensure just is installed. +if ! command -v just &>/dev/null; then + # On most systems we can install directly. + _TARGET="" + if [ "Windows_NT" = "${OS:-}" ]; then + _TARGET="--target x86_64-pc-windows-msvc" + fi + _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} + echo "Installing just..." + mkdir -p "$_BIN_DIR" 2>/dev/null || true + curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- $_TARGET --to "$_BIN_DIR" || { + # Remove just file if it exists (can be created if there was an install error). + rm -f ${_BIN_DIR}/just + _pip_install rust-just just + } + echo "Installing just... done." +fi + +# Ensure uv is installed. +if ! command -v uv &>/dev/null; then + _BIN_DIR=$PYMONGO_BIN_DIR + mkdir -p ${_BIN_DIR} + echo "Installing uv..." + # On most systems we can install directly. + curl -LsSf https://astral.sh/uv/install.sh | env UV_INSTALL_DIR="$_BIN_DIR" INSTALLER_NO_MODIFY_PATH=1 sh || { + _pip_install uv uv + } + if [ "Windows_NT" = "${OS:-}" ]; then + chmod +x "$(cygpath -u $_BIN_DIR)/uv.exe" + fi + echo "Installing uv... done." +fi + +popd > /dev/null diff --git a/.evergreen/scripts/kms_tester.py b/.evergreen/scripts/kms_tester.py new file mode 100644 index 0000000000..e3833ae63a --- /dev/null +++ b/.evergreen/scripts/kms_tester.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +import os + +from utils import ( + DRIVERS_TOOLS, + LOGGER, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) + +DIRS = dict( + gcp=f"{DRIVERS_TOOLS}/.evergreen/csfle/gcpkms", + azure=f"{DRIVERS_TOOLS}/.evergreen/csfle/azurekms", +) + + +def _setup_azure_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up Azure VM...") + azure_dir = DIRS["azure"] + env = base_env.copy() + env["AZUREKMS_SRC"] = TMP_DRIVER_FILE + env["AZUREKMS_DST"] = "~/" + run_command(f"{azure_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["AZUREKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{azure_dir}/run-command.sh", env=env) + + env["AZUREKMS_CMD"] = "NO_EXT=1 bash .evergreen/just.sh setup-tests kms azure-remote" + run_command(f"{azure_dir}/run-command.sh", env=env) + LOGGER.info("Setting up Azure VM... done.") + + +def _setup_gcp_vm(base_env: dict[str, str]) -> None: + LOGGER.info("Setting up GCP VM...") + gcp_dir = DIRS["gcp"] + env = base_env.copy() + env["GCPKMS_SRC"] = TMP_DRIVER_FILE + env["GCPKMS_DST"] = f"{env['GCPKMS_INSTANCENAME']}:" + run_command(f"{gcp_dir}/copy-file.sh", env=env) + + env = base_env.copy() + env["GCPKMS_CMD"] = "tar xf mongo-python-driver.tgz" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "sudo apt-get install -y python3-dev build-essential" + run_command(f"{gcp_dir}/run-command.sh", env=env) + + env["GCPKMS_CMD"] = "NO_EXT=1 bash ./.evergreen/just.sh setup-tests kms gcp-remote" + run_command(f"{gcp_dir}/run-command.sh", env=env) + LOGGER.info("Setting up GCP VM...") + + +def _load_kms_config(sub_test_target: str) -> dict[str, str]: + target_dir = DIRS[sub_test_target] + config = read_env(f"{target_dir}/secrets-export.sh") + base_env = os.environ.copy() + for key, value in config.items(): + base_env[key] = str(value) + return base_env + + +def setup_kms(sub_test_name: str) -> None: + if "-" in sub_test_name: + sub_test_target, sub_test_type = sub_test_name.split("-") + else: + sub_test_target = sub_test_name + sub_test_type = "" + + assert sub_test_target in ["azure", "gcp"], sub_test_target + assert sub_test_type in ["", "remote", "fail"], sub_test_type + success = sub_test_type != "fail" + kms_dir = DIRS[sub_test_target] + + if sub_test_target == "azure": + write_env("TEST_FLE_AZURE_AUTO") + else: + write_env("TEST_FLE_GCP_AUTO") + + write_env("SUCCESS", success) + + # For remote tests, there is no further work required. + if sub_test_type == "remote": + return + + if sub_test_target == "azure": + run_command("./setup-secrets.sh", cwd=kms_dir) + + if success: + create_archive() + if sub_test_target == "azure": + os.environ["AZUREKMS_VMNAME_PREFIX"] = "PYTHON_DRIVER" + + # Found using "az vm image list --output table" + os.environ[ + "AZUREKMS_IMAGE" + ] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + os.environ["GCPKMS_IMAGEFAMILY"] = "debian-12" + + run_command("./setup.sh", cwd=kms_dir) + base_env = _load_kms_config(sub_test_target) + + if sub_test_target == "azure": + _setup_azure_vm(base_env) + else: + _setup_gcp_vm(base_env) + + if sub_test_target == "azure": + config = read_env(f"{kms_dir}/secrets-export.sh") + if success: + write_env("AZUREKMS_VMNAME", config["AZUREKMS_VMNAME"]) + + write_env("KEY_NAME", config["AZUREKMS_KEYNAME"]) + write_env("KEY_VAULT_ENDPOINT", config["AZUREKMS_KEYVAULTENDPOINT"]) + + +def test_kms_send_to_remote(sub_test_name: str) -> None: + env = _load_kms_config(sub_test_name) + if sub_test_name == "azure": + key_name = os.environ["KEY_NAME"] + key_vault_endpoint = os.environ["KEY_VAULT_ENDPOINT"] + env[ + "AZUREKMS_CMD" + ] = f'KEY_NAME="{key_name}" KEY_VAULT_ENDPOINT="{key_vault_endpoint}" bash ./.evergreen/just.sh run-tests' + else: + env["GCPKMS_CMD"] = "./.evergreen/just.sh run-tests" + cmd = f"{DIRS[sub_test_name]}/run-command.sh" + run_command(cmd, env=env) + + +def teardown_kms(sub_test_name: str) -> None: + run_command(f"{DIRS[sub_test_name]}/teardown.sh") + + +if __name__ == "__main__": + setup_kms() diff --git a/.evergreen/scripts/mod_wsgi_tester.py b/.evergreen/scripts/mod_wsgi_tester.py new file mode 100644 index 0000000000..5968849068 --- /dev/null +++ b/.evergreen/scripts/mod_wsgi_tester.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +import os +import sys +import time +import urllib.error +import urllib.request +from pathlib import Path +from shutil import which + +from utils import LOGGER, ROOT, run_command, write_env + + +def make_request(url, timeout=10): + for _ in range(int(timeout)): + try: + urllib.request.urlopen(url) # noqa: S310 + return + except urllib.error.HTTPError: + pass + time.sleep(1) + raise TimeoutError(f"Failed to access {url}") + + +def setup_mod_wsgi(sub_test_name: str) -> None: + env = os.environ.copy() + if sub_test_name == "embedded": + env["MOD_WSGI_CONF"] = "mod_wsgi_test_embedded.conf" + elif sub_test_name == "standalone": + env["MOD_WSGI_CONF"] = "mod_wsgi_test.conf" + else: + raise ValueError("mod_wsgi sub test must be either 'standalone' or 'embedded'") + write_env("MOD_WSGI_CONF", env["MOD_WSGI_CONF"]) + apache = which("apache2") + if not apache and Path("/usr/lib/apache2/mpm-prefork/apache2").exists(): + apache = "/usr/lib/apache2/mpm-prefork/apache2" + if apache: + apache_config = "apache24ubuntu161404.conf" + else: + apache = which("httpd") + if not apache: + raise ValueError("Could not find apache2 or httpd") + apache_config = "apache22amazon.conf" + python_version = ".".join(str(val) for val in sys.version_info[:2]) + mod_wsgi_version = 4 + so_file = f"/opt/python/mod_wsgi/python_version/{python_version}/mod_wsgi_version/{mod_wsgi_version}/mod_wsgi.so" + write_env("MOD_WSGI_SO", so_file) + env["MOD_WSGI_SO"] = so_file + env["PYTHONHOME"] = f"/opt/python/{python_version}" + env["PROJECT_DIRECTORY"] = project_directory = str(ROOT) + write_env("APACHE_BINARY", apache) + write_env("APACHE_CONFIG", apache_config) + uri1 = f"http://localhost:8080/interpreter1{project_directory}" + write_env("TEST_URI1", uri1) + uri2 = f"http://localhost:8080/interpreter2{project_directory}" + write_env("TEST_URI2", uri2) + run_command(f"{apache} -k start -f {ROOT}/test/mod_wsgi_test/{apache_config}", env=env) + + # Wait for the endpoints to be available. + try: + make_request(uri1, 10) + make_request(uri2, 10) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def test_mod_wsgi() -> None: + sys.path.insert(0, ROOT) + from test.mod_wsgi_test.test_client import main, parse_args + + uri1 = os.environ["TEST_URI1"] + uri2 = os.environ["TEST_URI2"] + args = f"-n 25000 -t 100 parallel {uri1} {uri2}" + try: + main(*parse_args(args.split())) + + args = f"-n 25000 serial {uri1} {uri2}" + main(*parse_args(args.split())) + except Exception as e: + LOGGER.error(Path("error_log").read_text()) + raise e + + +def teardown_mod_wsgi() -> None: + apache = os.environ["APACHE_BINARY"] + apache_config = os.environ["APACHE_CONFIG"] + + run_command(f"{apache} -k stop -f {ROOT}/test/mod_wsgi_test/{apache_config}") + + +if __name__ == "__main__": + setup_mod_wsgi() diff --git a/.evergreen/scripts/oidc_tester.py b/.evergreen/scripts/oidc_tester.py new file mode 100644 index 0000000000..ac2960371e --- /dev/null +++ b/.evergreen/scripts/oidc_tester.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +import os + +from utils import ( + DRIVERS_TOOLS, + TMP_DRIVER_FILE, + create_archive, + read_env, + run_command, + write_env, +) + +K8S_NAMES = ["aks", "gke", "eks"] +K8S_REMOTE_NAMES = [f"{n}-remote" for n in K8S_NAMES] + + +def _get_target_dir(sub_test_name: str) -> str: + if sub_test_name == "default": + target_dir = "auth_oidc" + elif sub_test_name.startswith("azure"): + target_dir = "auth_oidc/azure" + elif sub_test_name.startswith("gcp"): + target_dir = "auth_oidc/gcp" + elif sub_test_name in K8S_NAMES + K8S_REMOTE_NAMES: + target_dir = "auth_oidc/k8s" + else: + raise ValueError(f"Invalid sub test name '{sub_test_name}'") + return f"{DRIVERS_TOOLS}/.evergreen/{target_dir}" + + +def setup_oidc(sub_test_name: str) -> dict[str, str] | None: + target_dir = _get_target_dir(sub_test_name) + env = os.environ.copy() + + if sub_test_name == "eks" and "AWS_ACCESS_KEY_ID" in os.environ: + # Store AWS creds for kubectl access. + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + + if sub_test_name == "azure": + env["AZUREOIDC_VMNAME_PREFIX"] = "PYTHON_DRIVER" + if "-remote" not in sub_test_name: + if sub_test_name == "azure": + # Found using "az vm image list --output table" + env["AZUREOIDC_IMAGE"] = "Canonical:0001-com-ubuntu-server-jammy:22_04-lts-gen2:latest" + else: + env["GCPKMS_IMAGEFAMILY"] = "debian-12" + run_command(f"bash {target_dir}/setup.sh", env=env) + if sub_test_name in K8S_NAMES: + run_command(f"bash {target_dir}/setup-pod.sh {sub_test_name}") + run_command(f"bash {target_dir}/run-self-test.sh") + return None + + source_file = None + if sub_test_name == "default": + source_file = f"{target_dir}/secrets-export.sh" + elif sub_test_name in ["azure-remote", "gcp-remote"]: + source_file = "./secrets-export.sh" + if sub_test_name in K8S_REMOTE_NAMES: + return os.environ.copy() + if source_file is None: + return None + + config = read_env(source_file) + write_env("MONGODB_URI_SINGLE", config["MONGODB_URI_SINGLE"]) + write_env("MONGODB_URI", config["MONGODB_URI"]) + write_env("DB_IP", config["MONGODB_URI"]) + + if sub_test_name == "default": + write_env("OIDC_TOKEN_FILE", config["OIDC_TOKEN_FILE"]) + write_env("OIDC_TOKEN_DIR", config["OIDC_TOKEN_DIR"]) + if "OIDC_DOMAIN" in config: + write_env("OIDC_DOMAIN", config["OIDC_DOMAIN"]) + elif sub_test_name == "azure-remote": + write_env("AZUREOIDC_RESOURCE", config["AZUREOIDC_RESOURCE"]) + elif sub_test_name == "gcp-remote": + write_env("GCPOIDC_AUDIENCE", config["GCPOIDC_AUDIENCE"]) + return config + + +def test_oidc_send_to_remote(sub_test_name: str) -> None: + env = os.environ.copy() + target_dir = _get_target_dir(sub_test_name) + create_archive() + if sub_test_name in ["azure", "gcp"]: + upper_name = sub_test_name.upper() + env[f"{upper_name}OIDC_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env[ + f"{upper_name}OIDC_TEST_CMD" + ] = f"NO_EXT=1 OIDC_ENV={sub_test_name} ./.evergreen/run-mongodb-oidc-test.sh" + elif sub_test_name in K8S_NAMES: + env["K8S_DRIVERS_TAR_FILE"] = TMP_DRIVER_FILE + env["K8S_TEST_CMD"] = "OIDC_ENV=k8s ./.evergreen/run-mongodb-oidc-test.sh" + run_command(f"bash {target_dir}/run-driver-test.sh", env=env) + + +def teardown_oidc(sub_test_name: str) -> None: + target_dir = _get_target_dir(sub_test_name) + # For k8s, make sure an error while tearing down the pod doesn't prevent + # the Altas server teardown. + error = None + if sub_test_name in K8S_NAMES: + try: + run_command(f"bash {target_dir}/teardown-pod.sh") + except Exception as e: + error = e + run_command(f"bash {target_dir}/teardown.sh") + if error: + raise error diff --git a/.evergreen/scripts/perf-submission-setup.sh b/.evergreen/scripts/perf-submission-setup.sh new file mode 100755 index 0000000000..ecb38751a5 --- /dev/null +++ b/.evergreen/scripts/perf-submission-setup.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# shellcheck disable=SC2154 +if [ "${requester}" == "commit" ]; then + echo "is_mainline: true" >> expansion.yml +else + echo "is_mainline: false" >> expansion.yml +fi + +# We parse the username out of the order_id as patches append that in and SPS does not need that information +# shellcheck disable=SC2154 +echo "parsed_order_id: $(echo "${revision_order_id}" | awk -F'_' '{print $NF}')" >> expansion.yml diff --git a/.evergreen/scripts/perf-submission.sh b/.evergreen/scripts/perf-submission.sh new file mode 100755 index 0000000000..f7c3ea6664 --- /dev/null +++ b/.evergreen/scripts/perf-submission.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# We use the requester expansion to determine whether the data is from a mainline evergreen run or not + +set -eu + +# Submit the performance data to the SPS endpoint +# shellcheck disable=SC2154 +response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \ + "https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=${parsed_order_id}&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=${is_mainline}" \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d @results.json) + +http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}') +response_body=$(echo "$response" | sed '/HTTP_STATUS/d') + +# We want to throw an error if the data was not successfully submitted +if [ "$http_status" -ne 200 ]; then + echo "Error: Received HTTP status $http_status" + echo "Response Body: $response_body" + exit 1 +fi + +echo "Response Body: $response_body" +echo "HTTP Status: $http_status" diff --git a/.evergreen/scripts/resync-all-specs.py b/.evergreen/scripts/resync-all-specs.py new file mode 100644 index 0000000000..8e58e56da2 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.py @@ -0,0 +1,126 @@ +from __future__ import annotations + +import argparse +import os +import pathlib +import subprocess +from argparse import Namespace +from subprocess import CalledProcessError +from typing import Optional + + +def resync_specs(directory: pathlib.Path, errored: dict[str, str]) -> None: + """Actually sync the specs""" + print("Beginning to sync specs") + for spec in os.scandir(directory): + if not spec.is_dir(): + continue + + if spec.name in ["asynchronous"]: + continue + try: + subprocess.run( + ["bash", "./.evergreen/resync-specs.sh", spec.name], # noqa: S603, S607 + capture_output=True, + text=True, + check=True, + ) + except CalledProcessError as exc: + errored[spec.name] = exc.stderr + print("Done syncing specs") + + +def apply_patches(errored): + print("Beginning to apply patches") + subprocess.run(["bash", "./.evergreen/remove-unimplemented-tests.sh"], check=True) # noqa: S603, S607 + try: + subprocess.run( + ["git apply -R --allow-empty --whitespace=fix ./.evergreen/spec-patch/*"], # noqa: S607 + shell=True, # noqa: S602 + check=True, + stderr=subprocess.PIPE, + ) + except CalledProcessError as exc: + errored["applying patches"] = exc.stderr + + +def check_new_spec_directories(directory: pathlib.Path) -> list[str]: + """Check to see if there are any directories in the spec repo that don't exist in pymongo/test""" + spec_dir = pathlib.Path(os.environ["MDB_SPECS"]) / "source" + spec_set = { + entry.name.replace("-", "_") + for entry in os.scandir(spec_dir) + if entry.is_dir() + and (pathlib.Path(entry.path) / "tests").is_dir() + and len(list(os.scandir(pathlib.Path(entry.path) / "tests"))) > 1 + } + test_set = {entry.name.replace("-", "_") for entry in os.scandir(directory) if entry.is_dir()} + known_mappings = { + "ocsp_support": "ocsp", + "client_side_operations_timeout": "csot", + "mongodb_handshake": "handshake", + "load_balancers": "load_balancer", + "connection_monitoring_and_pooling": "connection_monitoring", + "command_logging_and_monitoring": "command_logging", + "initial_dns_seedlist_discovery": "srv_seedlist", + "server_discovery_and_monitoring": "sdam_monitoring", + } + + for k, v in known_mappings.items(): + if k in spec_set: + spec_set.remove(k) + spec_set.add(v) + return list(spec_set - test_set) + + +def write_summary(errored: dict[str, str], new: list[str], filename: Optional[str]) -> None: + """Generate the PR description""" + pr_body = "" + process = subprocess.run( + ["git diff --name-only | awk -F'/' '{print $2}' | sort | uniq"], # noqa: S607 + shell=True, # noqa: S602 + capture_output=True, + text=True, + check=True, + ) + succeeded = process.stdout.strip().split() + if len(succeeded) > 0: + pr_body += "The following specs were changed:\n -" + pr_body += "\n -".join(succeeded) + pr_body += "\n" + if len(errored) > 0: + pr_body += "\n\nThe following spec syncs encountered errors:" + for k, v in errored.items(): + pr_body += f"\n -{k}\n```{v}\n```" + pr_body += "\n" + if len(new) > 0: + pr_body += "\n\nThe following directories are in the specification repository and not in our test directory:\n -" + pr_body += "\n -".join(new) + pr_body += "\n" + if pr_body != "": + if filename is None: + print(f"\n{pr_body}") + else: + with open(filename, "w") as f: + # replacements made for proper json + f.write(pr_body.replace("\n", "\\n").replace("\t", "\\t")) + + +def main(args: Namespace): + directory = pathlib.Path("./test") + errored: dict[str, str] = {} + resync_specs(directory, errored) + apply_patches(errored) + new = check_new_spec_directories(directory) + write_summary(errored, new, args.filename) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Python Script to resync all specs and generate summary for PR." + ) + parser.add_argument( + "--filename", help="Name of file for the summary to be written into.", default=None + ) + args = parser.parse_args() + main(args) diff --git a/.evergreen/scripts/resync-all-specs.sh b/.evergreen/scripts/resync-all-specs.sh new file mode 100755 index 0000000000..41e4a2bc73 --- /dev/null +++ b/.evergreen/scripts/resync-all-specs.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Run spec syncing script and create PR +set -eu + +# SETUP +SRC_URL="https://github.com/mongodb/specifications.git" +# needs to be set for resync-specs.sh +SPEC_SRC="$(realpath "../specifications")" +SCRIPT="$(realpath "./.evergreen/resync-specs.sh")" + +# Clone the spec repo if the directory does not exist +if [[ ! -d $SPEC_SRC ]]; then + git clone $SRC_URL $SPEC_SRC + if [[ $? -ne 0 ]]; then + echo "Error: Failed to clone repository." + exit 1 + fi +fi + +# Set environment variable to the cloned spec repo for resync-specs.sh +export MDB_SPECS="$SPEC_SRC" + +# Check that resync-specs.sh exists and is executable +if [[ ! -x $SCRIPT ]]; then + echo "Error: $SCRIPT not found or is not executable." + exit 1 +fi + +PR_DESC="spec_sync.txt" + +# run python script that actually does all the resyncing +if ! [ -n "${CI:-}" ] +then + # we're running locally + python3 ./.evergreen/scripts/resync-all-specs.py +else + /opt/devtools/bin/python3.11 ./.evergreen/scripts/resync-all-specs.py --filename "$PR_DESC" + if [[ -f $PR_DESC ]]; then + # changes were made -> call scrypt to create PR for us + .evergreen/scripts/create-spec-pr.sh "$PR_DESC" + rm "$PR_DESC" + fi +fi diff --git a/.evergreen/scripts/run-getdata.sh b/.evergreen/scripts/run-getdata.sh new file mode 100755 index 0000000000..9435a5fcc3 --- /dev/null +++ b/.evergreen/scripts/run-getdata.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Get the debug data for an evergreen task. +set -eu + +. ${DRIVERS_TOOLS}/.evergreen/get-distro.sh || true +get_distro || true +echo $DISTRO +echo $MARCH +echo $OS + +set -x +uname -a || true +ls /etc/*release* || true +cc --version || true +gcc --version || true +clang --version || true +gcov --version || true +lcov --version || true +llvm-cov --version || true +echo $PATH +ls -la /usr/local/Cellar/llvm/*/bin/ || true +ls -la /usr/local/Cellar/ || true +scan-build --version || true +genhtml --version || true +valgrind --version || true +set +x diff --git a/.evergreen/scripts/run-server.sh b/.evergreen/scripts/run-server.sh new file mode 100755 index 0000000000..298eedcd3e --- /dev/null +++ b/.evergreen/scripts/run-server.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +uv run $HERE/run_server.py "$@" diff --git a/.evergreen/scripts/run_server.py b/.evergreen/scripts/run_server.py new file mode 100644 index 0000000000..a35fbb57a8 --- /dev/null +++ b/.evergreen/scripts/run_server.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import os +from typing import Any + +from utils import DRIVERS_TOOLS, ROOT, get_test_options, run_command + + +def set_env(name: str, value: Any = "1") -> None: + os.environ[name] = str(value) + + +def start_server(): + opts, extra_opts = get_test_options( + "Run a MongoDB server. All given flags will be passed to run-orchestration.sh in DRIVERS_TOOLS.", + require_sub_test_name=False, + allow_extra_opts=True, + ) + test_name = opts.test_name + + # drivers-evergreen-tools expects the version variable to be named MONGODB_VERSION. + if "VERSION" in os.environ: + os.environ["MONGODB_VERSION"] = os.environ["VERSION"] + + if test_name == "auth_aws": + set_env("AUTH_AWS") + + elif test_name == "load_balancer": + set_env("LOAD_BALANCER") + + elif test_name == "search_index": + os.environ["TOPOLOGY"] = "replica_set" + os.environ["MONGODB_VERSION"] = "7.0" + + if not os.environ.get("TEST_CRYPT_SHARED"): + set_env("SKIP_CRYPT_SHARED") + + if opts.ssl: + extra_opts.append("--ssl") + if test_name != "ocsp": + certs = ROOT / "test/certificates" + set_env("TLS_CERT_KEY_FILE", certs / "client.pem") + set_env("TLS_PEM_KEY_FILE", certs / "server.pem") + set_env("TLS_CA_FILE", certs / "ca.pem") + + if opts.auth: + extra_opts.append("--auth") + + if opts.verbose: + extra_opts.append("-v") + elif opts.quiet: + extra_opts.append("-q") + + cmd = ["bash", f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", *extra_opts] + run_command(cmd, cwd=DRIVERS_TOOLS) + + +if __name__ == "__main__": + start_server() diff --git a/.evergreen/scripts/run_tests.py b/.evergreen/scripts/run_tests.py new file mode 100644 index 0000000000..c1c29c58bc --- /dev/null +++ b/.evergreen/scripts/run_tests.py @@ -0,0 +1,219 @@ +from __future__ import annotations + +import json +import logging +import os +import platform +import shutil +import sys +from datetime import datetime +from pathlib import Path +from shutil import which + +try: + import importlib_metadata +except ImportError: + from importlib import metadata as importlib_metadata + + +import pytest +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command + +AUTH = os.environ.get("AUTH", "noauth") +SSL = os.environ.get("SSL", "nossl") +UV_ARGS = os.environ.get("UV_ARGS", "") +TEST_PERF = os.environ.get("TEST_PERF") +GREEN_FRAMEWORK = os.environ.get("GREEN_FRAMEWORK") +TEST_ARGS = os.environ.get("TEST_ARGS", "").split() +TEST_NAME = os.environ.get("TEST_NAME") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") + + +def list_packages(): + packages = set() + for distribution in importlib_metadata.distributions(): + if distribution.name: + packages.add(distribution.name) + print("Package Version URL") + print("------------------- ----------- ----------------------------------------------------") + for name in sorted(packages): + distribution = importlib_metadata.distribution(name) + url = "" + if distribution.origin is not None: + url = distribution.origin.url + print(f"{name:20s}{distribution.version:12s}{url}") + print("------------------- ----------- ----------------------------------------------------\n") + + +def handle_perf(start_time: datetime): + end_time = datetime.now() + elapsed_secs = (end_time - start_time).total_seconds() + with open("results.json") as fid: + results = json.load(fid) + LOGGER.info("results.json:\n%s", json.dumps(results, indent=2)) + + results = dict( + status="PASS", + exit_code=0, + test_file="BenchMarkTests", + start=int(start_time.timestamp()), + end=int(end_time.timestamp()), + elapsed=elapsed_secs, + ) + report = dict(failures=0, results=[results]) + LOGGER.info("report.json\n%s", json.dumps(report, indent=2)) + + with open("report.json", "w", newline="\n") as fid: + json.dump(report, fid) + + +def handle_green_framework() -> None: + if GREEN_FRAMEWORK == "gevent": + from gevent import monkey + + monkey.patch_all() + + # Never run async tests with a framework. + if len(TEST_ARGS) <= 1: + TEST_ARGS.extend(["-m", "not default_async and default"]) + else: + for i in range(len(TEST_ARGS) - 1): + if "-m" in TEST_ARGS[i]: + TEST_ARGS[i + 1] = f"not default_async and {TEST_ARGS[i + 1]}" + + LOGGER.info(f"Running tests with {GREEN_FRAMEWORK}...") + + +def handle_c_ext() -> None: + if platform.python_implementation() != "CPython": + return + sys.path.insert(0, str(ROOT / "tools")) + from fail_if_no_c import main as fail_if_no_c + + fail_if_no_c() + + +def handle_pymongocrypt() -> None: + import pymongocrypt + + LOGGER.info(f"pymongocrypt version: {pymongocrypt.__version__})") + LOGGER.info(f"libmongocrypt version: {pymongocrypt.libmongocrypt_version()})") + + +def handle_aws_lambda() -> None: + env = os.environ.copy() + target_dir = ROOT / "test/lambda" + env["TEST_LAMBDA_DIRECTORY"] = str(target_dir) + env.setdefault("AWS_REGION", "us-east-1") + dirs = ["pymongo", "gridfs", "bson"] + # Store the original .so files. + before_sos = [] + for dname in dirs: + before_sos.extend(f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")) + # Build the c extensions. + docker = which("docker") or which("podman") + if not docker: + raise ValueError("Could not find docker!") + image = "quay.io/pypa/manylinux2014_x86_64:latest" + run_command( + f'{docker} run --rm -v "{ROOT}:/src" --platform linux/amd64 {image} /src/test/lambda/build_internal.sh' + ) + for dname in dirs: + target = ROOT / "test/lambda/mongodb" / dname + shutil.rmtree(target, ignore_errors=True) + shutil.copytree(ROOT / dname, target) + # Remove the original so files from the lambda directory. + for so_path in before_sos: + (ROOT / "test/lambda/mongodb" / so_path).unlink() + # Remove the new so files from the ROOT directory. + for dname in dirs: + so_paths = [f"{f.parent.name}/{f.name}" for f in (ROOT / dname).glob("*.so")] + for so_path in list(so_paths): + if so_path not in before_sos: + Path(so_path).unlink() + + script_name = "run-deployed-lambda-aws-tests.sh" + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/aws_lambda/{script_name}", env=env) + + +def run() -> None: + # Add diagnostic for python version. + print("Running with python", sys.version) + + # List the installed packages. + list_packages() + + # Handle green framework first so they can patch modules. + if GREEN_FRAMEWORK: + handle_green_framework() + + # Ensure C extensions if applicable. + if not os.environ.get("NO_EXT"): + handle_c_ext() + + if os.environ.get("PYMONGOCRYPT_LIB"): + handle_pymongocrypt() + + LOGGER.info(f"Test setup:\n{AUTH=}\n{SSL=}\n{UV_ARGS=}\n{TEST_ARGS=}") + + # Record the start time for a perf test. + if TEST_PERF: + start_time = datetime.now() + + # Run mod_wsgi tests using the helper. + if TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import test_mod_wsgi + + test_mod_wsgi() + return + + # Send kms tests to run remotely. + if TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import test_kms_send_to_remote + + test_kms_send_to_remote(SUB_TEST_NAME) + return + + # Handle doctests. + if TEST_NAME == "doctest": + from sphinx.cmd.build import main + + result = main("-E -b doctest doc ./doc/_build/doctest".split()) + sys.exit(result) + + # Send ecs tests to run remotely. + if TEST_NAME == "auth_aws" and SUB_TEST_NAME == "ecs": + run_command(f"{DRIVERS_TOOLS}/.evergreen/auth_aws/aws_setup.sh ecs") + return + + # Send OIDC tests to run remotely. + if ( + TEST_NAME == "auth_oidc" + and SUB_TEST_NAME != "default" + and not SUB_TEST_NAME.endswith("-remote") + ): + from oidc_tester import test_oidc_send_to_remote + + test_oidc_send_to_remote(SUB_TEST_NAME) + return + + # Run deployed aws lambda tests. + if TEST_NAME == "aws_lambda": + handle_aws_lambda() + return + + if os.environ.get("DEBUG_LOG"): + TEST_ARGS.extend(f"-o log_cli_level={logging.DEBUG}".split()) + + # Run local tests. + ret = pytest.main(TEST_ARGS + sys.argv[1:]) + if ret != 0: + sys.exit(ret) + + # Handle perf test post actions. + if TEST_PERF: + handle_perf(start_time) + + +if __name__ == "__main__": + run() diff --git a/.evergreen/scripts/setup-dev-env.sh b/.evergreen/scripts/setup-dev-env.sh new file mode 100755 index 0000000000..209857d542 --- /dev/null +++ b/.evergreen/scripts/setup-dev-env.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# Set up a development environment on an evergreen host. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" +ROOT=$(dirname "$(dirname $HERE)") +pushd $ROOT > /dev/null + +# Bail early if running on GitHub Actions. +if [ -n "${GITHUB_ACTION:-}" ]; then + exit 0 +fi + +# Source the env files to pick up common variables. +if [ -f $HERE/env.sh ]; then + . $HERE/env.sh +fi +# PYTHON_BINARY or PYTHON_VERSION may be defined in test-env.sh. +if [ -f $HERE/test-env.sh ]; then + . $HERE/test-env.sh +fi + +# Ensure dependencies are installed. +bash $HERE/install-dependencies.sh + +# Get the appropriate UV_PYTHON. +. $ROOT/.evergreen/utils.sh + +if [ -z "${PYTHON_BINARY:-}" ]; then + if [ -n "${PYTHON_VERSION:-}" ]; then + PYTHON_BINARY=$(get_python_binary $PYTHON_VERSION) + else + PYTHON_BINARY=$(find_python3) + fi +fi +export UV_PYTHON=${PYTHON_BINARY} +echo "Using python $UV_PYTHON" + +# Add the default install path to the path if needed. +if [ -z "${PYMONGO_BIN_DIR:-}" ]; then + export PATH="$PATH:$HOME/.local/bin" +fi + +# Set up venv, making sure c extensions build unless disabled. +if [ -z "${NO_EXT:-}" ]; then + export PYMONGO_C_EXT_MUST_BUILD=1 +fi +# Set up visual studio env on Windows spawn hosts. +if [ -f $HOME/.visualStudioEnv.sh ]; then + set +u + SSH_TTY=1 source $HOME/.visualStudioEnv.sh + set -u +fi +uv sync + +echo "Setting up python environment... done." + +popd > /dev/null diff --git a/.evergreen/scripts/setup-system.sh b/.evergreen/scripts/setup-system.sh new file mode 100755 index 0000000000..9158414cce --- /dev/null +++ b/.evergreen/scripts/setup-system.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Set up the system on an evergreen host. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +pushd "$(dirname "$(dirname $HERE)")" +echo "Setting up system..." +bash .evergreen/scripts/configure-env.sh +source .evergreen/scripts/env.sh +bash $DRIVERS_TOOLS/.evergreen/setup.sh +bash .evergreen/scripts/install-dependencies.sh +popd + +# Enable core dumps if enabled on the machine +# Copied from https://github.com/mongodb/mongo/blob/master/etc/evergreen.yml +if [ -f /proc/self/coredump_filter ]; then + # Set the shell process (and its children processes) to dump ELF headers (bit 4), + # anonymous shared mappings (bit 1), and anonymous private mappings (bit 0). + echo 0x13 >/proc/self/coredump_filter + + if [ -f /sbin/sysctl ]; then + # Check that the core pattern is set explicitly on our distro image instead + # of being the OS's default value. This ensures that coredump names are consistent + # across distros and can be picked up by Evergreen. + core_pattern=$(/sbin/sysctl -n "kernel.core_pattern") + if [ "$core_pattern" = "dump_%e.%p.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi + fi +fi + +if [ "$(uname -s)" = "Darwin" ]; then + core_pattern_mac=$(/usr/sbin/sysctl -n "kern.corefile") + if [ "$core_pattern_mac" = "dump_%N.%P.core" ]; then + echo "Enabling coredumps" + ulimit -c unlimited + fi +fi + +if [ -w /etc/hosts ]; then + SUDO="" +else + SUDO="sudo" +fi + +# Add 'server' and 'hostname_not_in_cert' as a hostnames +echo "127.0.0.1 server" | $SUDO tee -a /etc/hosts +echo "127.0.0.1 hostname_not_in_cert" | $SUDO tee -a /etc/hosts + +echo "Setting up system... done." diff --git a/.evergreen/scripts/setup-tests.sh b/.evergreen/scripts/setup-tests.sh new file mode 100755 index 0000000000..1074c7eaaf --- /dev/null +++ b/.evergreen/scripts/setup-tests.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Set up the test environment, including secrets and services. +set -eu + +# Supported/used environment variables: +# AUTH Set to enable authentication. Defaults to "noauth" +# SSL Set to enable SSL. Defaults to "nossl" +# GREEN_FRAMEWORK The green framework to test with, if any. +# COVERAGE If non-empty, run the test suite with coverage. +# COMPRESSORS If non-empty, install appropriate compressor. +# LIBMONGOCRYPT_URL The URL to download libmongocrypt. +# TEST_CRYPT_SHARED If non-empty, install crypt_shared lib. +# MONGODB_API_VERSION The mongodb api version to use in tests. +# MONGODB_URI If non-empty, use as the MONGODB_URI in tests. + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $SCRIPT_DIR/env.sh ]; then + source $SCRIPT_DIR/env.sh +fi + +echo "Setting up tests with args \"$*\"..." +uv run $SCRIPT_DIR/setup_tests.py "$@" +echo "Setting up tests with args \"$*\"... done." diff --git a/.evergreen/scripts/setup_tests.py b/.evergreen/scripts/setup_tests.py new file mode 100644 index 0000000000..3f0a8cc7f9 --- /dev/null +++ b/.evergreen/scripts/setup_tests.py @@ -0,0 +1,483 @@ +from __future__ import annotations + +import base64 +import io +import os +import platform +import shutil +import stat +import tarfile +from pathlib import Path +from urllib import request + +from utils import ( + DRIVERS_TOOLS, + ENV_FILE, + HERE, + LOGGER, + PLATFORM, + ROOT, + TEST_SUITE_MAP, + Distro, + get_test_options, + read_env, + run_command, + write_env, +) + +# Passthrough environment variables. +PASS_THROUGH_ENV = [ + "GREEN_FRAMEWORK", + "NO_EXT", + "MONGODB_API_VERSION", + "DEBUG_LOG", + "PYTHON_BINARY", + "PYTHON_VERSION", + "REQUIRE_FIPS", + "IS_WIN32", +] + +# Map the test name to test extra. +EXTRAS_MAP = { + "auth_aws": "aws", + "auth_oidc": "aws", + "encryption": "encryption", + "enterprise_auth": "gssapi", + "kms": "encryption", + "ocsp": "ocsp", + "pyopenssl": "ocsp", +} + + +# Map the test name to test group. +GROUP_MAP = dict(mockupdb="mockupdb", perf="perf") + +# The python version used for perf tests. +PERF_PYTHON_VERSION = "3.10.11" + + +def is_set(var: str) -> bool: + value = os.environ.get(var, "") + return len(value.strip()) > 0 + + +def get_distro() -> Distro: + name = "" + version_id = "" + arch = platform.machine() + with open("/etc/os-release") as fid: + for line in fid.readlines(): + line = line.replace('"', "") # noqa: PLW2901 + if line.startswith("NAME="): + _, _, name = line.strip().partition("=") + if line.startswith("VERSION_ID="): + _, _, version_id = line.strip().partition("=") + return Distro(name=name, version_id=version_id, arch=arch) + + +def setup_libmongocrypt(): + target = "" + if PLATFORM == "windows": + # PYTHON-2808 Ensure this machine has the CA cert for google KMS. + if is_set("TEST_FLE_GCP_AUTO"): + run_command('powershell.exe "Invoke-WebRequest -URI https://oauth2.googleapis.com/"') + target = "windows-test" + + elif PLATFORM == "darwin": + target = "macos" + + else: + distro = get_distro() + if distro.name.startswith("Debian"): + target = f"debian{distro.version_id}" + elif distro.name.startswith("Ubuntu"): + if distro.version_id == "20.04": + target = "debian11" + elif distro.version_id == "22.04": + target = "debian12" + elif distro.version_id == "24.04": + target = "debian13" + elif distro.name.startswith("Red Hat"): + if distro.version_id.startswith("7"): + target = "rhel-70-64-bit" + elif distro.version_id.startswith("8"): + if distro.arch == "aarch64": + target = "rhel-82-arm64" + else: + target = "rhel-80-64-bit" + + if not is_set("LIBMONGOCRYPT_URL"): + if not target: + raise ValueError("Cannot find libmongocrypt target for current platform!") + url = f"https://s3.amazonaws.com/mciuploads/libmongocrypt/{target}/master/latest/libmongocrypt.tar.gz" + else: + url = os.environ["LIBMONGOCRYPT_URL"] + + shutil.rmtree(HERE / "libmongocrypt", ignore_errors=True) + + LOGGER.info(f"Fetching {url}...") + with request.urlopen(request.Request(url), timeout=15.0) as response: # noqa: S310 + if response.status == 200: + fileobj = io.BytesIO(response.read()) + with tarfile.open("libmongocrypt.tar.gz", fileobj=fileobj) as fid: + fid.extractall(Path.cwd() / "libmongocrypt") + LOGGER.info(f"Fetching {url}... done.") + + run_command("ls -la libmongocrypt") + run_command("ls -la libmongocrypt/nocrypto") + + if PLATFORM == "windows": + # libmongocrypt's windows dll is not marked executable. + run_command("chmod +x libmongocrypt/nocrypto/bin/mongocrypt.dll") + + +def load_config_from_file(path: str | Path) -> dict[str, str]: + config = read_env(path) + for key, value in config.items(): + write_env(key, value) + return config + + +def get_secrets(name: str) -> dict[str, str]: + secrets_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/secrets_handling") + run_command(f"bash {secrets_dir.as_posix()}/setup-secrets.sh {name}", cwd=secrets_dir) + return load_config_from_file(secrets_dir / "secrets-export.sh") + + +def handle_test_env() -> None: + opts, _ = get_test_options("Set up the test environment and services.") + test_name = opts.test_name + sub_test_name = opts.sub_test_name + AUTH = "auth" if opts.auth else "noauth" + SSL = "ssl" if opts.ssl else "nossl" + TEST_ARGS = "" + + # Start compiling the args we'll pass to uv. + UV_ARGS = ["--extra test --no-group dev"] + + test_title = test_name + if sub_test_name: + test_title += f" {sub_test_name}" + + # Create the test env file with the initial set of values. + with ENV_FILE.open("w", newline="\n") as fid: + fid.write("#!/usr/bin/env bash\n") + fid.write("set +x\n") + ENV_FILE.chmod(ENV_FILE.stat().st_mode | stat.S_IEXEC) + + write_env("PIP_QUIET") # Quiet by default. + write_env("PIP_PREFER_BINARY") # Prefer binary dists by default. + + # Set an environment variable for the test name and sub test name. + write_env(f"TEST_{test_name.upper()}") + write_env("TEST_NAME", test_name) + write_env("SUB_TEST_NAME", sub_test_name) + + # Handle pass through env vars. + for var in PASS_THROUGH_ENV: + if is_set(var) or getattr(opts, var.lower(), ""): + write_env(var, os.environ.get(var, getattr(opts, var.lower(), ""))) + + if extra := EXTRAS_MAP.get(test_name, ""): + UV_ARGS.append(f"--extra {extra}") + + if group := GROUP_MAP.get(test_name, ""): + UV_ARGS.append(f"--group {group}") + + if opts.test_min_deps: + UV_ARGS.append("--resolution=lowest-direct") + + if test_name == "auth_oidc": + from oidc_tester import setup_oidc + + config = setup_oidc(sub_test_name) + if not config: + AUTH = "noauth" + + if test_name in ["aws_lambda", "search_index"]: + env = os.environ.copy() + env["MONGODB_VERSION"] = "7.0" + env["LAMBDA_STACK_NAME"] = "dbx-python-lambda" + write_env("LAMBDA_STACK_NAME", env["LAMBDA_STACK_NAME"]) + run_command( + f"bash {DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh", + env=env, + cwd=DRIVERS_TOOLS, + ) + + if test_name == "search_index": + AUTH = "auth" + + if test_name == "ocsp": + SSL = "ssl" + + write_env("AUTH", AUTH) + write_env("SSL", SSL) + LOGGER.info(f"Setting up '{test_title}' with {AUTH=} and {SSL=}...") + + if test_name == "aws_lambda": + UV_ARGS.append("--group pip") + # Store AWS creds if they were given. + if "AWS_ACCESS_KEY_ID" in os.environ: + for key in ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN"]: + if key in os.environ: + write_env(key, os.environ[key]) + + if AUTH != "noauth": + if test_name == "auth_oidc": + DB_USER = config["OIDC_ADMIN_USER"] + DB_PASSWORD = config["OIDC_ADMIN_PWD"] + elif test_name == "search_index": + config = read_env(f"{DRIVERS_TOOLS}/.evergreen/atlas/secrets-export.sh") + DB_USER = config["DRIVERS_ATLAS_LAMBDA_USER"] + DB_PASSWORD = config["DRIVERS_ATLAS_LAMBDA_PASSWORD"] + write_env("MONGODB_URI", config["MONGODB_URI"]) + else: + DB_USER = "bob" + DB_PASSWORD = "pwd123" # noqa: S105 + write_env("DB_USER", DB_USER) + write_env("DB_PASSWORD", DB_PASSWORD) + LOGGER.info("Added auth, DB_USER: %s", DB_USER) + + if is_set("MONGODB_URI"): + write_env("PYMONGO_MUST_CONNECT", "true") + + if opts.disable_test_commands: + write_env("PYMONGO_DISABLE_TEST_COMMANDS", "1") + + if test_name == "enterprise_auth": + config = get_secrets("drivers/enterprise_auth") + if PLATFORM == "windows": + LOGGER.info("Setting GSSAPI_PASS") + write_env("GSSAPI_PASS", config["SASL_PASS"]) + write_env("GSSAPI_CANONICALIZE", "true") + else: + # BUILD-3830 + krb_conf = ROOT / ".evergreen/krb5.conf.empty" + krb_conf.touch() + write_env("KRB5_CONFIG", krb_conf) + LOGGER.info("Writing keytab") + keytab = base64.b64decode(config["KEYTAB_BASE64"]) + keytab_file = ROOT / ".evergreen/drivers.keytab" + with keytab_file.open("wb") as fid: + fid.write(keytab) + principal = config["PRINCIPAL"] + LOGGER.info("Running kinit") + os.environ["KRB5_CONFIG"] = str(krb_conf) + cmd = f"kinit -k -t {keytab_file} -p {principal}" + run_command(cmd) + + LOGGER.info("Setting GSSAPI variables") + write_env("GSSAPI_HOST", config["SASL_HOST"]) + write_env("GSSAPI_PORT", config["SASL_PORT"]) + write_env("GSSAPI_PRINCIPAL", config["PRINCIPAL"]) + + if test_name == "doctest": + UV_ARGS.append("--extra docs") + + if test_name == "load_balancer": + SINGLE_MONGOS_LB_URI = os.environ.get( + "SINGLE_MONGOS_LB_URI", "mongodb://127.0.0.1:8000/?loadBalanced=true" + ) + MULTI_MONGOS_LB_URI = os.environ.get( + "MULTI_MONGOS_LB_URI", "mongodb://127.0.0.1:8001/?loadBalanced=true" + ) + if SSL != "nossl": + SINGLE_MONGOS_LB_URI += "&tls=true" + MULTI_MONGOS_LB_URI += "&tls=true" + write_env("SINGLE_MONGOS_LB_URI", SINGLE_MONGOS_LB_URI) + write_env("MULTI_MONGOS_LB_URI", MULTI_MONGOS_LB_URI) + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + cmd = f'bash "{DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh" start' + run_command(cmd) + + if test_name == "mod_wsgi": + from mod_wsgi_tester import setup_mod_wsgi + + setup_mod_wsgi(sub_test_name) + + if test_name == "ocsp": + if sub_test_name: + os.environ["OCSP_SERVER_TYPE"] = sub_test_name + for name in ["OCSP_SERVER_TYPE", "ORCHESTRATION_FILE"]: + if name not in os.environ: + raise ValueError(f"Please set {name}") + + server_type = os.environ["OCSP_SERVER_TYPE"] + orch_file = os.environ["ORCHESTRATION_FILE"] + ocsp_algo = orch_file.split("-")[0] + if server_type == "no-responder": + tls_should_succeed = "false" if "mustStaple-disableStapling" in orch_file else "true" + else: + tls_should_succeed = "true" if "valid" in server_type else "false" + + write_env("OCSP_TLS_SHOULD_SUCCEED", tls_should_succeed) + write_env("CA_FILE", f"{DRIVERS_TOOLS}/.evergreen/ocsp/{ocsp_algo}/ca.pem") + + if server_type != "no-responder": + env = os.environ.copy() + env["SERVER_TYPE"] = server_type + env["OCSP_ALGORITHM"] = ocsp_algo + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/setup.sh", env=env) + + # The mock OCSP responder MUST BE started before the mongod as the mongod expects that + # a responder will be available upon startup. + version = os.environ.get("VERSION", "latest") + cmd = [ + "bash", + f"{DRIVERS_TOOLS}/.evergreen/run-orchestration.sh", + "--ssl", + "--version", + version, + ] + if opts.verbose: + cmd.append("-v") + elif opts.quiet: + cmd.append("-q") + run_command(cmd, cwd=DRIVERS_TOOLS) + + if SSL != "nossl": + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + write_env("CLIENT_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/client.pem") + write_env("CA_PEM", f"{DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem") + + compressors = os.environ.get("COMPRESSORS") or opts.compressor + if compressors == "snappy": + UV_ARGS.append("--extra snappy") + elif compressors == "zstd": + UV_ARGS.append("--extra zstd") + + if test_name in ["encryption", "kms"]: + # Check for libmongocrypt download. + if not (ROOT / "libmongocrypt").exists(): + setup_libmongocrypt() + + if not opts.test_min_deps: + UV_ARGS.append( + "--with pymongocrypt@git+https://github.com/mongodb/libmongocrypt@master#subdirectory=bindings/python" + ) + + # Use the nocrypto build to avoid dependency issues with older windows/python versions. + BASE = ROOT / "libmongocrypt/nocrypto" + if PLATFORM == "linux": + if (BASE / "lib/libmongocrypt.so").exists(): + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.so" + else: + PYMONGOCRYPT_LIB = BASE / "lib64/libmongocrypt.so" + elif PLATFORM == "darwin": + PYMONGOCRYPT_LIB = BASE / "lib/libmongocrypt.dylib" + else: + PYMONGOCRYPT_LIB = BASE / "bin/mongocrypt.dll" + if not PYMONGOCRYPT_LIB.exists(): + raise RuntimeError("Cannot find libmongocrypt shared object file") + write_env("PYMONGOCRYPT_LIB", PYMONGOCRYPT_LIB.as_posix()) + # PATH is updated by configure-env.sh for access to mongocryptd. + + if test_name == "encryption": + if not DRIVERS_TOOLS: + raise RuntimeError("Missing DRIVERS_TOOLS") + csfle_dir = Path(f"{DRIVERS_TOOLS}/.evergreen/csfle") + run_command(f"bash {csfle_dir.as_posix()}/setup-secrets.sh", cwd=csfle_dir) + load_config_from_file(csfle_dir / "secrets-export.sh") + run_command(f"bash {csfle_dir.as_posix()}/start-servers.sh") + + if sub_test_name == "pyopenssl": + UV_ARGS.append("--extra ocsp") + + if opts.crypt_shared: + config = read_env(f"{DRIVERS_TOOLS}/mo-expansion.sh") + CRYPT_SHARED_DIR = Path(config["CRYPT_SHARED_LIB_PATH"]).parent.as_posix() + LOGGER.info("Using crypt_shared_dir %s", CRYPT_SHARED_DIR) + if PLATFORM == "windows": + write_env("PATH", f"{CRYPT_SHARED_DIR}:$PATH") + else: + write_env( + "DYLD_FALLBACK_LIBRARY_PATH", + f"{CRYPT_SHARED_DIR}:${{DYLD_FALLBACK_LIBRARY_PATH:-}}", + ) + write_env("LD_LIBRARY_PATH", f"{CRYPT_SHARED_DIR}:${{LD_LIBRARY_PATH:-}}") + + if test_name == "kms": + from kms_tester import setup_kms + + setup_kms(sub_test_name) + + if test_name == "auth_aws" and sub_test_name != "ecs-remote": + auth_aws_dir = f"{DRIVERS_TOOLS}/.evergreen/auth_aws" + if "AWS_ROLE_SESSION_NAME" in os.environ: + write_env("AWS_ROLE_SESSION_NAME") + if sub_test_name != "ecs": + aws_setup = f"{auth_aws_dir}/aws_setup.sh" + run_command(f"bash {aws_setup} {sub_test_name}") + creds = read_env(f"{auth_aws_dir}/test-env.sh") + for name, value in creds.items(): + write_env(name, value) + else: + run_command(f"bash {auth_aws_dir}/setup-secrets.sh") + + if test_name == "atlas_connect": + secrets = get_secrets("drivers/atlas_connect") + + # Write file with Atlas X509 client certificate: + decoded = base64.b64decode(secrets["ATLAS_X509_DEV_CERT_BASE64"]).decode("utf8") + cert_file = ROOT / ".evergreen/atlas_x509_dev_client_certificate.pem" + with cert_file.open("w") as file: + file.write(decoded) + write_env( + "ATLAS_X509_DEV_WITH_CERT", + secrets["ATLAS_X509_DEV"] + "&tlsCertificateKeyFile=" + str(cert_file), + ) + + # We do not want the default client_context to be initialized. + write_env("DISABLE_CONTEXT") + + if test_name == "perf": + data_dir = ROOT / "specifications/source/benchmarking/data" + if not data_dir.exists(): + run_command("git clone --depth 1 https://github.com/mongodb/specifications.git") + run_command("tar xf extended_bson.tgz", cwd=data_dir) + run_command("tar xf parallel.tgz", cwd=data_dir) + run_command("tar xf single_and_multi_document.tgz", cwd=data_dir) + write_env("TEST_PATH", str(data_dir)) + write_env("OUTPUT_FILE", str(ROOT / "results.json")) + # Overwrite the UV_PYTHON from the env.sh file. + write_env("UV_PYTHON", "") + + UV_ARGS.append(f"--python={PERF_PYTHON_VERSION}") + + # PYTHON-4769 Run perf_test.py directly otherwise pytest's test collection negatively + # affects the benchmark results. + if sub_test_name == "sync": + TEST_ARGS = f"test/performance/perf_test.py {TEST_ARGS}" + else: + TEST_ARGS = f"test/performance/async_perf_test.py {TEST_ARGS}" + + # Add coverage if requested. + # Only cover CPython. PyPy reports suspiciously low coverage. + if opts.cov and platform.python_implementation() == "CPython": + # Keep in sync with combine-coverage.sh. + # coverage >=5 is needed for relative_files=true. + UV_ARGS.append("--group coverage") + TEST_ARGS = f"{TEST_ARGS} --cov" + write_env("COVERAGE") + + if opts.green_framework: + framework = opts.green_framework or os.environ["GREEN_FRAMEWORK"] + UV_ARGS.append(f"--group {framework}") + + else: + TEST_ARGS = f"-v --durations=5 {TEST_ARGS}" + TEST_SUITE = TEST_SUITE_MAP.get(test_name) + if TEST_SUITE: + TEST_ARGS = f"-m {TEST_SUITE} {TEST_ARGS}" + + write_env("TEST_ARGS", TEST_ARGS) + write_env("UV_ARGS", " ".join(UV_ARGS)) + + LOGGER.info(f"Setting up test '{test_title}' with {AUTH=} and {SSL=}... done.") + + +if __name__ == "__main__": + handle_test_env() diff --git a/.evergreen/scripts/stop-server.sh b/.evergreen/scripts/stop-server.sh new file mode 100755 index 0000000000..7599387f5f --- /dev/null +++ b/.evergreen/scripts/stop-server.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Stop a server that was started using run-orchestration.sh in DRIVERS_TOOLS. +set -eu + +HERE=$(dirname ${BASH_SOURCE:-$0}) +HERE="$( cd -- "$HERE" > /dev/null 2>&1 && pwd )" + +# Try to source the env file. +if [ -f $HERE/env.sh ]; then + echo "Sourcing env file" + source $HERE/env.sh +fi + +bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh diff --git a/.evergreen/scripts/teardown-tests.sh b/.evergreen/scripts/teardown-tests.sh new file mode 100755 index 0000000000..898425b6cf --- /dev/null +++ b/.evergreen/scripts/teardown-tests.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Tear down any services that were used by tests. +set -eu + +SCRIPT_DIR=$(dirname ${BASH_SOURCE:-$0}) + +# Try to source the env file. +if [ -f $SCRIPT_DIR/env.sh ]; then + echo "Sourcing env inputs" + . $SCRIPT_DIR/env.sh +else + echo "Not sourcing env inputs" +fi + +# Handle test inputs. +if [ -f $SCRIPT_DIR/test-env.sh ]; then + echo "Sourcing test inputs" + . $SCRIPT_DIR/test-env.sh +else + echo "Missing test inputs, please run 'just setup-tests'" +fi + +# Teardown the test runner. +uv run $SCRIPT_DIR/teardown_tests.py diff --git a/.evergreen/scripts/teardown_tests.py b/.evergreen/scripts/teardown_tests.py new file mode 100644 index 0000000000..7da0b60815 --- /dev/null +++ b/.evergreen/scripts/teardown_tests.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os +import shutil +import sys +from pathlib import Path + +from utils import DRIVERS_TOOLS, LOGGER, ROOT, run_command + +TEST_NAME = os.environ.get("TEST_NAME", "unconfigured") +SUB_TEST_NAME = os.environ.get("SUB_TEST_NAME") + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'...") + +# Shut down csfle servers if applicable. +if TEST_NAME == "encryption": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/csfle/stop-servers.sh") + +# Shut down load balancer if applicable. +elif TEST_NAME == "load-balancer": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop") + +# Tear down kms VM if applicable. +elif TEST_NAME == "kms" and SUB_TEST_NAME in ["azure", "gcp"]: + from kms_tester import teardown_kms + + teardown_kms(SUB_TEST_NAME) + +# Tear down OIDC if applicable. +elif TEST_NAME == "auth_oidc": + from oidc_tester import teardown_oidc + + teardown_oidc(SUB_TEST_NAME) + +# Tear down ocsp if applicable. +elif TEST_NAME == "ocsp": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/ocsp/teardown.sh") + +# Tear down atlas cluster if applicable. +if TEST_NAME in ["aws_lambda", "search_index"]: + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh") + +# Tear down auth_aws if applicable. +# We do not run web-identity hosts on macos, because the hosts lack permissions, +# so there is no reason to run the teardown, which would error with a 401. +elif TEST_NAME == "auth_aws" and sys.platform != "darwin": + run_command(f"bash {DRIVERS_TOOLS}/.evergreen/auth_aws/teardown.sh") + +# Tear down perf if applicable. +elif TEST_NAME == "perf": + shutil.rmtree(ROOT / "specifications", ignore_errors=True) + Path(os.environ["OUTPUT_FILE"]).unlink(missing_ok=True) + +# Tear down mog_wsgi if applicable. +elif TEST_NAME == "mod_wsgi": + from mod_wsgi_tester import teardown_mod_wsgi + + teardown_mod_wsgi() + +# Tear down coverage if applicable. +if os.environ.get("COVERAGE"): + shutil.rmtree(".pytest_cache", ignore_errors=True) + +LOGGER.info(f"Tearing down tests of type '{TEST_NAME}'... done.") diff --git a/.evergreen/scripts/upload-coverage-report.sh b/.evergreen/scripts/upload-coverage-report.sh new file mode 100755 index 0000000000..895664cbf2 --- /dev/null +++ b/.evergreen/scripts/upload-coverage-report.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Upload a coverate report to s3. +set -eu +aws s3 cp htmlcov/ s3://"$1"/coverage/"$2"/"$3"/htmlcov/ --recursive --acl public-read --region us-east-1 diff --git a/.evergreen/scripts/utils.py b/.evergreen/scripts/utils.py new file mode 100644 index 0000000000..2bc9c720d2 --- /dev/null +++ b/.evergreen/scripts/utils.py @@ -0,0 +1,227 @@ +from __future__ import annotations + +import argparse +import dataclasses +import logging +import os +import shlex +import subprocess +import sys +from pathlib import Path +from typing import Any + +HERE = Path(__file__).absolute().parent +ROOT = HERE.parent.parent +DRIVERS_TOOLS = os.environ.get("DRIVERS_TOOLS", "").replace(os.sep, "/") +TMP_DRIVER_FILE = "/tmp/mongo-python-driver.tgz" # noqa: S108 + +LOGGER = logging.getLogger("test") +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") +ENV_FILE = HERE / "test-env.sh" +PLATFORM = "windows" if os.name == "nt" else sys.platform.lower() + + +@dataclasses.dataclass +class Distro: + name: str + version_id: str + arch: str + + +# Map the test name to a test suite. +TEST_SUITE_MAP = { + "atlas_connect": "atlas_connect", + "auth_aws": "auth_aws", + "auth_oidc": "auth_oidc", + "default": "", + "default_async": "default_async", + "default_sync": "default", + "encryption": "encryption", + "enterprise_auth": "auth", + "search_index": "search_index", + "kms": "kms", + "load_balancer": "load_balancer", + "mockupdb": "mockupdb", + "ocsp": "ocsp", + "perf": "perf", +} + +# Tests that require a sub test suite. +SUB_TEST_REQUIRED = ["auth_aws", "auth_oidc", "kms", "mod_wsgi", "perf"] + +EXTRA_TESTS = ["mod_wsgi", "aws_lambda", "doctest"] + +# Tests that do not use run-orchestration directly. +NO_RUN_ORCHESTRATION = [ + "auth_oidc", + "atlas_connect", + "aws_lambda", + "mockupdb", + "ocsp", +] + +# Mapping of env variables to options +OPTION_TO_ENV_VAR = {"cov": "COVERAGE", "crypt_shared": "TEST_CRYPT_SHARED"} + + +def get_test_options( + description, require_sub_test_name=True, allow_extra_opts=False +) -> tuple[argparse.Namespace, list[str]]: + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) + if require_sub_test_name: + parser.add_argument( + "test_name", + choices=sorted(list(TEST_SUITE_MAP) + EXTRA_TESTS), + nargs="?", + default="default", + help="The optional name of the test suite to set up, typically the same name as a pytest marker.", + ) + parser.add_argument( + "sub_test_name", nargs="?", help="The optional sub test name, for example 'azure'." + ) + else: + parser.add_argument( + "test_name", + choices=set(list(TEST_SUITE_MAP) + EXTRA_TESTS) - set(NO_RUN_ORCHESTRATION), + nargs="?", + default="default", + help="The optional name of the test suite to be run, which informs the server configuration.", + ) + parser.add_argument( + "--verbose", "-v", action="store_true", help="Whether to log at the DEBUG level." + ) + parser.add_argument( + "--quiet", "-q", action="store_true", help="Whether to log at the WARNING level." + ) + parser.add_argument("--auth", action="store_true", help="Whether to add authentication.") + parser.add_argument("--ssl", action="store_true", help="Whether to add TLS configuration.") + parser.add_argument( + "--test-min-deps", action="store_true", help="Test against minimum dependency versions" + ) + + # Add the test modifiers. + if require_sub_test_name: + parser.add_argument( + "--debug-log", action="store_true", help="Enable pymongo standard logging." + ) + parser.add_argument("--cov", action="store_true", help="Add test coverage.") + parser.add_argument( + "--green-framework", + nargs=1, + choices=["gevent"], + help="Optional green framework to test against.", + ) + parser.add_argument( + "--compressor", + nargs=1, + choices=["zlib", "zstd", "snappy"], + help="Optional compression algorithm.", + ) + parser.add_argument("--crypt-shared", action="store_true", help="Test with crypt_shared.") + parser.add_argument("--no-ext", action="store_true", help="Run without c extensions.") + parser.add_argument( + "--mongodb-api-version", choices=["1"], help="MongoDB stable API version to use." + ) + parser.add_argument( + "--disable-test-commands", action="store_true", help="Disable test commands." + ) + + # Get the options. + if not allow_extra_opts: + opts, extra_opts = parser.parse_args(), [] + else: + opts, extra_opts = parser.parse_known_args() + + # Convert list inputs to strings. + for name in vars(opts): + value = getattr(opts, name) + if isinstance(value, list): + setattr(opts, name, value[0]) + + # Handle validation and environment variable overrides. + test_name = opts.test_name + sub_test_name = opts.sub_test_name if require_sub_test_name else "" + if require_sub_test_name and test_name in SUB_TEST_REQUIRED and not sub_test_name: + raise ValueError(f"Test '{test_name}' requires a sub_test_name") + handle_env_overrides(parser, opts) + if "auth" in test_name: + opts.auth = True + # 'auth_aws ecs' shouldn't have extra auth set. + if test_name == "auth_aws" and sub_test_name == "ecs": + opts.auth = False + if opts.verbose: + LOGGER.setLevel(logging.DEBUG) + elif opts.quiet: + LOGGER.setLevel(logging.WARNING) + return opts, extra_opts + + +def handle_env_overrides(parser: argparse.ArgumentParser, opts: argparse.Namespace) -> None: + # Get the options, and then allow environment variable overrides. + for key in vars(opts): + if key in OPTION_TO_ENV_VAR: + env_var = OPTION_TO_ENV_VAR[key] + else: + env_var = key.upper() + if env_var in os.environ: + if parser.get_default(key) != getattr(opts, key): + LOGGER.info("Overriding env var '%s' with cli option", env_var) + elif env_var == "AUTH": + opts.auth = os.environ.get("AUTH") == "auth" + elif env_var == "SSL": + ssl_opt = os.environ.get("SSL", "") + opts.ssl = ssl_opt and ssl_opt.lower() != "nossl" + elif isinstance(getattr(opts, key), bool): + if os.environ[env_var]: + setattr(opts, key, True) + else: + setattr(opts, key, os.environ[env_var]) + + +def read_env(path: Path | str) -> dict[str, str]: + config = dict() + with Path(path).open() as fid: + for line in fid.readlines(): + if "=" not in line: + continue + name, _, value = line.strip().partition("=") + if value.startswith(('"', "'")): + value = value[1:-1] + name = name.replace("export ", "") + config[name] = value + return config + + +def write_env(name: str, value: Any = "1") -> None: + with ENV_FILE.open("a", newline="\n") as fid: + # Remove any existing quote chars. + value = str(value).replace('"', "") + fid.write(f'export {name}="{value}"\n') + + +def run_command(cmd: str | list[str], **kwargs: Any) -> None: + if isinstance(cmd, list): + cmd = " ".join(cmd) + LOGGER.info("Running command '%s'...", cmd) + kwargs.setdefault("check", True) + # Prevent overriding the python used by other tools. + env = kwargs.pop("env", os.environ).copy() + if "UV_PYTHON" in env: + del env["UV_PYTHON"] + kwargs["env"] = env + try: + subprocess.run(shlex.split(cmd), **kwargs) # noqa: PLW1510, S603 + except subprocess.CalledProcessError as e: + LOGGER.error(e.output) + LOGGER.error(str(e)) + sys.exit(e.returncode) + LOGGER.info("Running command '%s'... done.", cmd) + + +def create_archive() -> str: + run_command("git add .", cwd=ROOT) + run_command('git commit --no-verify -m "add files"', check=False, cwd=ROOT) + run_command(f"git archive -o {TMP_DRIVER_FILE} HEAD", cwd=ROOT) + return TMP_DRIVER_FILE diff --git a/.evergreen/setup-spawn-host.sh b/.evergreen/setup-spawn-host.sh new file mode 100755 index 0000000000..bada61e568 --- /dev/null +++ b/.evergreen/setup-spawn-host.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Set up a remote evergreen spawn host. +set -eu + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 +user=${target%@*} +remote_dir=/home/$user/mongo-python-driver + +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir +echo "Copying files to $target... done" + +ssh $target $remote_dir/.evergreen/scripts/setup-system.sh +ssh $target "cd $remote_dir && PYTHON_BINARY=${PYTHON_BINARY:-} .evergreen/scripts/setup-dev-env.sh" diff --git a/.evergreen/spec-patch/PYTHON-2673.patch b/.evergreen/spec-patch/PYTHON-2673.patch new file mode 100644 index 0000000000..868538f7b7 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-2673.patch @@ -0,0 +1,64 @@ +diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json +index 43e4fbb4f..4e2a55fd4 100644 +--- a/test/load_balancer/cursors.json ++++ b/test/load_balancer/cursors.json +@@ -376,7 +376,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned after an network error during getMore", +- "description": "pinned connections are returned after an network error during getMore", + "operations": [ + { + "name": "failPoint", +@@ -440,7 +440,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +@@ -659,7 +659,7 @@ + ] + }, + { ++ "description": "pinned connections are not returned to the pool after a non-network error on getMore", +- "description": "pinned connections are returned to the pool after a non-network error on getMore", + "operations": [ + { + "name": "failPoint", +@@ -715,7 +715,7 @@ + "object": "testRunner", + "arguments": { + "client": "client0", ++ "connections": 1 +- "connections": 0 + } + }, + { +diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json +index 63aabc04d..462fa0aac 100644 +--- a/test/load_balancer/sdam-error-handling.json ++++ b/test/load_balancer/sdam-error-handling.json +@@ -366,6 +366,9 @@ + { + "connectionCreatedEvent": {} + }, ++ { ++ "poolClearedEvent": {} ++ }, + { + "connectionClosedEvent": { + "reason": "error" +@@ -378,9 +375,6 @@ + "connectionCheckOutFailedEvent": { + "reason": "connectionError" + } +- }, +- { +- "poolClearedEvent": {} + } + ] + } diff --git a/.evergreen/spec-patch/PYTHON-3712.patch b/.evergreen/spec-patch/PYTHON-3712.patch new file mode 100644 index 0000000000..b48c05124c --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-3712.patch @@ -0,0 +1,14 @@ +diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +index e44fad1b..4b492f7d 100644 +--- a/test/discovery_and_monitoring/unified/serverMonitoringMode.json ++++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json +@@ -5,7 +5,8 @@ + { + "topologies": [ + "single", +- "sharded" ++ "sharded", ++ "sharded-replicaset" + ], + "serverless": "forbid" + } diff --git a/.evergreen/spec-patch/PYTHON-4261.patch b/.evergreen/spec-patch/PYTHON-4261.patch new file mode 100644 index 0000000000..e4ffc5ce9f --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4261.patch @@ -0,0 +1,61 @@ +diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/replica-set.json ++++ b/test/server_selection_logging/replica-set.json +@@ -184,7 +184,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/standalone.json ++++ b/test/server_selection_logging/standalone.json +@@ -191,7 +191,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/sharded.json ++++ b/test/server_selection_logging/sharded.json +@@ -193,7 +193,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/operation-id.json +index 830b1ea51..5eba784bf 100644 +--- a/test/server_selection_logging/operation-id.json ++++ b/test/server_selection_logging/operation-id.json +@@ -197,7 +197,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", +@@ -383,7 +383,7 @@ + } + }, + { +- "level": "debug", ++ "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", diff --git a/.evergreen/spec-patch/PYTHON-4918.patch b/.evergreen/spec-patch/PYTHON-4918.patch new file mode 100644 index 0000000000..5f409c5870 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-4918.patch @@ -0,0 +1,24 @@ +diff --git a/test/connection_monitoring/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json +index 1c744b85..509b2a23 100644 +--- a/test/connection_monitoring/pool-create-min-size-error.json ++++ b/test/connection_monitoring/pool-create-min-size-error.json +@@ -49,15 +49,15 @@ + "type": "ConnectionCreated", + "address": 42 + }, ++ { ++ "type": "ConnectionPoolCleared", ++ "address": 42 ++ }, + { + "type": "ConnectionClosed", + "address": 42, + "connectionId": 42, + "reason": "error" +- }, +- { +- "type": "ConnectionPoolCleared", +- "address": 42 + } + ], + "ignore": [ diff --git a/.evergreen/spec-patch/PYTHON-5052.patch b/.evergreen/spec-patch/PYTHON-5052.patch new file mode 100644 index 0000000000..01cbc00116 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5052.patch @@ -0,0 +1,440 @@ +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +new file mode 100644 +index 00000000..aa8046d2 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalProperties.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "foo": "bar" ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must not have additional properties'", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +new file mode 100644 +index 00000000..0b3a65f5 +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-additionalPropertyType.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "entity-client-observeTracingMessages-additionalPropertyType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": { ++ "enableCommandPayload": 0 ++ } ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages enableCommandPayload must be boolean", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +new file mode 100644 +index 00000000..de3ef39a +--- /dev/null ++++ b/test/unified-test-format/invalid/entity-client-observeTracingMessages-type.json +@@ -0,0 +1,18 @@ ++{ ++ "description": "entity-client-observeTracingMessages-type", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0", ++ "observeTracingMessages": "foo" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "observeTracingMessages must be an object", ++ "operations": [] ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +new file mode 100644 +index 00000000..5947a286 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-additionalProperties.json +@@ -0,0 +1,30 @@ ++{ ++ "description": "expectedTracingSpans-additionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "additional property foo not allowed in expectTracingMessages", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "ignoreExtraSpans": false, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ], ++ "foo": 0 ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-clientType.json b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +new file mode 100644 +index 00000000..2fe7faea +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-clientType.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-clientType", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "client type must be string", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": 0, ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +new file mode 100644 +index 00000000..8a98d5ba +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-emptyNestedSpan.json +@@ -0,0 +1,29 @@ ++{ ++ "description": "expectedTracingSpans-emptyNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must not have fewer than 1 items'", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +new file mode 100644 +index 00000000..79a86744 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-invalidNestedSpan.json +@@ -0,0 +1,31 @@ ++{ ++ "description": "expectedTracingSpans-invalidNestedSpan", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested span must have required property name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ }, ++ "nested": [ ++ {} ++ ] ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +new file mode 100644 +index 00000000..2fb1cd5b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertyClient.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertyClient", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property client", ++ "operations": [], ++ "expectTracingMessages": { ++ "spans": [ ++ { ++ "name": "command", ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +new file mode 100644 +index 00000000..acd10307 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-missingPropertySpans.json +@@ -0,0 +1,20 @@ ++{ ++ "description": "expectedTracingSpans-missingPropertySpans", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required property spans", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0" ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +new file mode 100644 +index 00000000..17299f86 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedAdditionalProperties.json +@@ -0,0 +1,28 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedAdditionalProperties", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "Span must not have additional properties", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": [], ++ "foo": "bar" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +new file mode 100644 +index 00000000..0257cd9b +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingName.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingName", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span name", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "tags": { ++ "db.system": "mongodb" ++ } ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +new file mode 100644 +index 00000000..a09ca31c +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedMissingTags.json +@@ -0,0 +1,25 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedMissingTags", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "missing required span tags", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo" ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +new file mode 100644 +index 00000000..ccff0410 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedNestedMustBeArray.json +@@ -0,0 +1,27 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeArray", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "nested spans must be an array", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": {}, ++ "nested": {} ++ } ++ ] ++ } ++ } ++ ] ++} +diff --git a/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +new file mode 100644 +index 00000000..72af1c29 +--- /dev/null ++++ b/test/unified-test-format/invalid/expectedTracingSpans-spanMalformedTagsMustBeObject.json +@@ -0,0 +1,26 @@ ++{ ++ "description": "expectedTracingSpans-spanMalformedNestedMustBeObject", ++ "schemaVersion": "1.26", ++ "createEntities": [ ++ { ++ "client": { ++ "id": "client0" ++ } ++ } ++ ], ++ "tests": [ ++ { ++ "description": "span tags must be an object", ++ "operations": [], ++ "expectTracingMessages": { ++ "client": "client0", ++ "spans": [ ++ { ++ "name": "foo", ++ "tags": [] ++ } ++ ] ++ } ++ } ++ ] ++} diff --git a/.evergreen/spec-patch/PYTHON-5493.patch b/.evergreen/spec-patch/PYTHON-5493.patch new file mode 100644 index 0000000000..99c105dcef --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5493.patch @@ -0,0 +1,50 @@ +diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json +index 5799e834..72103b3c 100644 +--- a/test/connection_logging/connection-logging.json ++++ b/test/connection_logging/connection-logging.json +@@ -446,6 +446,22 @@ + } + } + }, ++ { ++ "level": "debug", ++ "component": "connection", ++ "data": { ++ "message": "Connection pool cleared", ++ "serverHost": { ++ "$$type": "string" ++ }, ++ "serverPort": { ++ "$$type": [ ++ "int", ++ "long" ++ ] ++ } ++ } ++ }, + { + "level": "debug", + "component": "connection", +@@ -498,22 +514,6 @@ + ] + } + } +- }, +- { +- "level": "debug", +- "component": "connection", +- "data": { +- "message": "Connection pool cleared", +- "serverHost": { +- "$$type": "string" +- }, +- "serverPort": { +- "$$type": [ +- "int", +- "long" +- ] +- } +- } + } + ] + } diff --git a/.evergreen/spec-patch/PYTHON-5529.patch b/.evergreen/spec-patch/PYTHON-5529.patch new file mode 100644 index 0000000000..a97602e055 --- /dev/null +++ b/.evergreen/spec-patch/PYTHON-5529.patch @@ -0,0 +1,587 @@ +diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json +index aa9c3eb2..212cd410 100644 +--- a/test/csot/command-execution.json ++++ b/test/csot/command-execution.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly during command execution", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", +@@ -69,8 +69,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -185,8 +187,10 @@ + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, +- "heartbeatFrequencyMS": 500 ++ "heartbeatFrequencyMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +@@ -316,8 +320,10 @@ + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, +- "heartbeatFrequencyMS": 100000 ++ "heartbeatFrequencyMS": 100000, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] +diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json +index 3868b302..f9d03429 100644 +--- a/test/csot/convenient-transactions.json ++++ b/test/csot/convenient-transactions.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for the withTransaction API", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json +index 4889e395..89be49f0 100644 +--- a/test/csot/error-transformations.json ++++ b/test/csot/error-transformations.json +@@ -1,6 +1,6 @@ + { + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json +index f1edbe68..9d8046d1 100644 +--- a/test/csot/global-timeoutMS.json ++++ b/test/csot/global-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS can be configured on a MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -38,8 +38,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -217,8 +219,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -390,8 +394,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -569,8 +575,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -762,8 +770,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -941,8 +951,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1120,8 +1132,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1305,8 +1319,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1484,8 +1500,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1663,8 +1681,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -1842,8 +1862,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2021,8 +2043,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2194,8 +2218,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2375,8 +2401,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2554,8 +2582,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2733,8 +2763,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -2906,8 +2938,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3079,8 +3113,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3258,8 +3294,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3441,8 +3479,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3628,8 +3668,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3807,8 +3849,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -3986,8 +4030,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4171,8 +4217,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4360,8 +4408,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4549,8 +4599,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4728,8 +4780,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -4913,8 +4967,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5102,8 +5158,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5297,8 +5355,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5482,8 +5542,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +@@ -5677,8 +5739,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 250 ++ "timeoutMS": 250, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json +index 291c6e72..58c59cb3 100644 +--- a/test/csot/non-tailable-cursors.json ++++ b/test/csot/non-tailable-cursors.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for non-tailable cursors", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -17,8 +17,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 200 ++ "timeoutMS": 200, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json +index 9daad260..5a0c9f36 100644 +--- a/test/csot/retryability-timeoutMS.json ++++ b/test/csot/retryability-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "timeoutMS behaves correctly for retryable operations", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", +@@ -26,8 +26,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 100 ++ "timeoutMS": 100, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" +diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json +index 36f774fb..e5182e33 100644 +--- a/test/csot/runCursorCommand.json ++++ b/test/csot/runCursorCommand.json +@@ -1,6 +1,6 @@ + { + "description": "runCursorCommand", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4" +@@ -16,6 +16,10 @@ + { + "client": { + "id": "commandClient", ++ "uriOptions": { ++ "minPoolSize": 1 ++ }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", +diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json +index 13ea91c7..dbf163e4 100644 +--- a/test/csot/sessions-inherit-timeoutMS.json ++++ b/test/csot/sessions-inherit-timeoutMS.json +@@ -1,6 +1,6 @@ + { + "description": "sessions inherit timeoutMS from their parent MongoClient", +- "schemaVersion": "1.9", ++ "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", +@@ -21,8 +21,10 @@ + "client": { + "id": "client", + "uriOptions": { +- "timeoutMS": 500 ++ "timeoutMS": 500, ++ "minPoolSize": 1 + }, ++ "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", diff --git a/.evergreen/sync-spawn-host.sh b/.evergreen/sync-spawn-host.sh new file mode 100755 index 0000000000..61dd84ec22 --- /dev/null +++ b/.evergreen/sync-spawn-host.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Synchronize local files to a remote Evergreen spawn host. +set -eu + +if [ -z "$1" ] + then + echo "Must supply a spawn host URL!" +fi + +target=$1 +user=${target%@*} +remote_dir=/home/$user/mongo-python-driver + +echo "Copying files to $target..." +rsync -az -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:$remote_dir +echo "Copying files to $target... done." +echo "Syncing files to $target..." +# shellcheck disable=SC2034 +fswatch -o . | while read f; do rsync -hazv -e ssh --exclude '.git' --filter=':- .gitignore' -r . $target:/home/$user/mongo-python-driver; done +echo "Syncing files to $target... done." diff --git a/.evergreen/test-encryption-requirements.txt b/.evergreen/test-encryption-requirements.txt deleted file mode 100644 index d02df867ad..0000000000 --- a/.evergreen/test-encryption-requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -cffi>=1.12.0,<2 -cryptography>=2 -# boto3 is required by drivers-evergreen-tools/.evergreen/csfle/set-temp-creds.sh -boto3<2 diff --git a/.evergreen/utils.sh b/.evergreen/utils.sh index 67fa272683..dadb7db084 100755 --- a/.evergreen/utils.sh +++ b/.evergreen/utils.sh @@ -1,6 +1,38 @@ -#!/bin/bash -ex +#!/bin/bash +# Utility functions used by pymongo evergreen scripts. +set -eu -set -o xtrace +find_python3() { + PYTHON="" + # Find a suitable toolchain version, if available. + if [ "$(uname -s)" = "Darwin" ]; then + PYTHON="/Library/Frameworks/Python.Framework/Versions/3.10/bin/python3" + elif [ "Windows_NT" = "${OS:-}" ]; then # Magic variable in cygwin + PYTHON="C:/python/Python310/python.exe" + else + # Prefer our own toolchain, fall back to mongodb toolchain if it has Python 3.10+. + if [ -f "/opt/python/3.10/bin/python3" ]; then + PYTHON="/opt/python/Current/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v5/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v5/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v4/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v4/bin/python3" + elif is_python_310 "$(command -v /opt/mongodbtoolchain/v3/bin/python3)"; then + PYTHON="/opt/mongodbtoolchain/v3/bin/python3" + fi + fi + # Add a fallback system python3 if it is available and Python 3.10+. + if [ -z "$PYTHON" ]; then + if is_python_310 "$(command -v python3)"; then + PYTHON="$(command -v python3)" + fi + fi + if [ -z "$PYTHON" ]; then + echo "Cannot test without python3.10+ installed!" + exit 1 + fi + echo "$PYTHON" +} # Usage: # createvirtualenv /path/to/python /output/path/for/venv @@ -9,17 +41,20 @@ set -o xtrace createvirtualenv () { PYTHON=$1 VENVPATH=$2 - if $PYTHON -m virtualenv --version; then + + # Prefer venv + VENV="$PYTHON -m venv" + if [ "$(uname -s)" = "Darwin" ]; then VIRTUALENV="$PYTHON -m virtualenv" - elif $PYTHON -m venv -h>/dev/null; then - # System virtualenv might not be compatible with the python3 on our path - VIRTUALENV="$PYTHON -m venv" else - echo "Cannot test without virtualenv" - exit 1 + VIRTUALENV=$(command -v virtualenv 2>/dev/null || echo "$PYTHON -m virtualenv") + VIRTUALENV="$VIRTUALENV -p $PYTHON" fi - $VIRTUALENV $VENVPATH - if [ "Windows_NT" = "$OS" ]; then + if ! $VENV $VENVPATH 2>/dev/null; then + # Workaround for bug in older versions of virtualenv. + $VIRTUALENV $VENVPATH 2>/dev/null || $VIRTUALENV $VENVPATH + fi + if [ "Windows_NT" = "${OS:-}" ]; then # Workaround https://bugs.python.org/issue32451: # mongovenv/Scripts/activate: line 3: $'\r': command not found dos2unix $VENVPATH/Scripts/activate || true @@ -28,8 +63,8 @@ createvirtualenv () { . $VENVPATH/bin/activate fi + export PIP_QUIET=1 python -m pip install --upgrade pip - python -m pip install --upgrade setuptools wheel } # Usage: @@ -41,6 +76,7 @@ testinstall () { PYTHON=$1 RELEASE=$2 NO_VIRTUALENV=$3 + PYTHON_IMPL=$(python -c "import platform; print(platform.python_implementation())") if [ -z "$NO_VIRTUALENV" ]; then createvirtualenv $PYTHON venvtestinstall @@ -49,7 +85,11 @@ testinstall () { $PYTHON -m pip install --upgrade $RELEASE cd tools - $PYTHON fail_if_no_c.py + + if [ "$PYTHON_IMPL" = "CPython" ]; then + $PYTHON fail_if_no_c.py + fi + $PYTHON -m pip uninstall -y pymongo cd .. @@ -59,17 +99,50 @@ testinstall () { fi } -# Function that returns success if the provided Python binary is version 3.7 or later +# Function that returns success if the provided Python binary is version 3.10 or later # Usage: -# is_python_37 /path/to/python +# is_python_310 /path/to/python # * param1: Python binary -is_python_37() { +is_python_310() { if [ -z "$1" ]; then return 1 - elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 7))"; then - # runs when sys.version_info[:2] >= (3, 7) + elif $1 -c "import sys; exit(sys.version_info[:2] < (3, 10))"; then + # runs when sys.version_info[:2] >= (3, 10) return 0 else return 1 fi } + + +# Function that gets a python binary given a python version string. +# Versions can be of the form 3.xx or pypy3.xx. +get_python_binary() { + version=$1 + if [ "$(uname -s)" = "Darwin" ]; then + if [[ "$version" == *"t"* ]]; then + binary_name="python3t" + framework_dir="PythonT" + else + binary_name="python3" + framework_dir="Python" + fi + version=$(echo "$version" | sed 's/t//g') + PYTHON="/Library/Frameworks/$framework_dir.Framework/Versions/$version/bin/$binary_name" + elif [ "Windows_NT" = "${OS:-}" ]; then + version=$(echo $version | cut -d. -f1,2 | sed 's/\.//g; s/t//g') + if [ -n "${IS_WIN32:-}" ]; then + PYTHON="C:/python/32/Python$version/python.exe" + else + PYTHON="C:/python/Python$version/python.exe" + fi + else + PYTHON="/opt/python/$version/bin/python3" + fi + if is_python_310 "$(command -v $PYTHON)"; then + echo "$PYTHON" + else + echo "Could not find suitable python binary for '$version'" >&2 + return 1 + fi +} diff --git a/.flake8 b/.flake8 deleted file mode 100644 index e5bc58921a..0000000000 --- a/.flake8 +++ /dev/null @@ -1,30 +0,0 @@ -[flake8] -max-line-length = 100 -enable-extensions = G -extend-ignore = - G200, G202, - # black adds spaces around ':' - E203, - # E501 line too long (let black handle line length) - E501 - # B305 `.next()` is not a thing on Python 3 - B305 -per-file-ignores = - # E402 module level import not at top of file - pymongo/__init__.py: E402 - - # G004 Logging statement uses f-string - pymongo/event_loggers.py: G004 - - # E402 module level import not at top of file - # B011 Do not call assert False since python -O removes these calls - # F405 'Foo' may be undefined, or defined from star imports - # E741 ambiguous variable name - # B007 Loop control variable 'foo' not used within the loop body - # F403 'from foo import *' used; unable to detect undefined names - # B001 Do not use bare `except:` - # E722 do not use bare 'except' - # E731 do not assign a lambda expression, use a def - # F811 redefinition of unused 'foo' from line XXX - # F841 local variable 'foo' is assigned to but never used - test/*: E402, B011, F405, E741, B007, F403, B001, E722, E731, F811, F841 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 8f02673e41..67ad992c75 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,2 +1,4 @@ # Initial pre-commit reformat 5578999a90e439fbca06fc0ffc98f4d04e96f7b4 +# pyupgrade and ruff +0092b0af79378abf35b6db73a082ecb91af1d973 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 15a41b6ce6..e21b87ddd3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1 @@ -# Global owner for repo -* @blink1073 @juliusgeo @ShaneHarvey +* @mongodb/dbx-python diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..5bf500ba12 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + # GitHub Actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + actions: + patterns: + - "*" + # Python + - package-ecosystem: "pip" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..8185a38836 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,38 @@ + + +[Issue Key](https://jira.mongodb.org/browse/%7BISSUE_KEY%7D) +## Summary + + +## Changes in this PR + + +## Testing Plan + + +### Screenshots (optional) + + +## Checklist + +### Checklist for Author +- [ ] Did you update the changelog (if necessary)? +- [ ] Is the intention of the code captured in relevant tests? +- [ ] If there are new TODOs, has a related JIRA ticket been created? + +### Checklist for Reviewer {@primary_reviewer} +- [ ] Does the title of the PR reference a JIRA Ticket? +- [ ] Do you fully understand the implementation? (Would you be comfortable explaining how this code works to someone else?) +- [ ] Have you checked for spelling & grammar errors? +- [ ] Is all relevant documentation (README or docstring) updated? + +## Focus Areas for Reviewer (optional) + diff --git a/.github/reviewers.txt b/.github/reviewers.txt new file mode 100644 index 0000000000..9e38ee71b5 --- /dev/null +++ b/.github/reviewers.txt @@ -0,0 +1,5 @@ +# List of reviewers for auto-assignment of reviews. +caseyclements +blink1073 +Jibola +NoahStapp diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..b138324bf4 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,68 @@ +name: "CodeQL" + +on: + push: + branches: [ "master", "v*"] + tags: ['*'] + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + schedule: + - cron: '17 10 * * 2' + +concurrency: + group: codeql-${{ github.ref }} + cancel-in-progress: true + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + runs-on: "ubuntu-latest" + timeout-minutes: 360 + permissions: + # required for all workflows + security-events: write + + strategy: + fail-fast: false + matrix: + include: + - language: c-cpp + build-mode: manual + - language: python + build-mode: none + - language: actions + build-mode: none + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + ref: ${{ inputs.ref }} + persist-credentials: false + - uses: actions/setup-python@v6 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + queries: security-extended + config: | + paths-ignore: + - 'doc/**' + - 'tools/**' + - 'test/**' + + - if: matrix.build-mode == 'manual' + run: | + pip install -e . + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@64d10c13136e1c5bce3e5fbde8d4906eeaafc885 # v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/create-release-branch.yml b/.github/workflows/create-release-branch.yml new file mode 100644 index 0000000000..95a5e65c88 --- /dev/null +++ b/.github/workflows/create-release-branch.yml @@ -0,0 +1,57 @@ +name: Create Release Branch + +on: + workflow_dispatch: + inputs: + branch_name: + description: The name of the new branch + required: true + version: + description: The version to set on the branch + required: true + base_ref: + description: The base reference for the branch + push_changes: + description: Whether to push the changes + default: "true" + +concurrency: + group: create-branch-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + create-branch: + environment: release + runs-on: ubuntu-latest + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + artifactory_username: ${{ vars.ARTIFACTORY_USERNAME }} + - name: Get hatch + run: pip install hatch + - uses: mongodb-labs/drivers-github-tools/create-branch@v3 + id: create-branch + with: + branch_name: ${{ inputs.branch_name }} + version: ${{ inputs.version }} + base_ref: ${{ inputs.base_ref }} + push_changes: ${{ inputs.push_changes }} + version_bump_script: hatch version + evergreen_project: mongo-python-driver-release + release_workflow_path: ./.github/workflows/release-python.yml diff --git a/.github/workflows/dist.yml b/.github/workflows/dist.yml new file mode 100644 index 0000000000..84bf1ba893 --- /dev/null +++ b/.github/workflows/dist.yml @@ -0,0 +1,147 @@ +name: Python Dist + +on: + push: + tags: + - "[0-9]+.[0-9]+.[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+.post[0-9]+" + - "[0-9]+.[0-9]+.[0-9]+[a-b][0-9]+" + - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+" + workflow_dispatch: + pull_request: + workflow_call: + inputs: + ref: + required: true + type: string + +concurrency: + group: dist-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +jobs: + build_wheels: + name: Build wheels for ${{ matrix.buildplat[1] }} + runs-on: ${{ matrix.buildplat[0] }} + strategy: + # Ensure that a wheel builder finishes even if another fails + fail-fast: false + matrix: + # Github Actions doesn't support pairing matrix values together, let's improvise + # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 + buildplat: + - [ubuntu-latest, "manylinux_x86_64", "cp3*-manylinux_x86_64"] + - [ubuntu-latest, "manylinux_aarch64", "cp3*-manylinux_aarch64"] + - [ubuntu-latest, "manylinux_ppc64le", "cp3*-manylinux_ppc64le"] + - [ubuntu-latest, "manylinux_s390x", "cp3*-manylinux_s390x"] + - [ubuntu-latest, "manylinux_i686", "cp3*-manylinux_i686"] + - [windows-2022, "win_amd6", "cp3*-win_amd64"] + - [windows-2022, "win32", "cp3*-win32"] + - [windows-11-arm, "win_arm64", "cp3*-win_arm64"] + - [macos-14, "macos", "cp*-macosx_*"] + + steps: + - name: Checkout pymongo + uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v6 + with: + cache: 'pip' + python-version: 3.11 + cache-dependency-path: 'pyproject.toml' + allow-prereleases: true + + - name: Set up QEMU + if: runner.os == 'Linux' + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3 + with: + # setup-qemu-action by default uses `tonistiigi/binfmt:latest` image, + # which is out of date. This causes seg faults during build. + # Here we manually fix the version. + image: tonistiigi/binfmt:qemu-v8.1.5 + platforms: all + + - name: Install cibuildwheel + # Note: the default manylinux is manylinux_2_28 + run: | + python -m pip install -U pip + python -m pip install "cibuildwheel>=3.2.0,<4" + + - name: Build wheels + env: + CIBW_BUILD: ${{ matrix.buildplat[2] }} + run: python -m cibuildwheel --output-dir wheelhouse + + - name: Assert all versions in wheelhouse + if: ${{ ! startsWith(matrix.buildplat[1], 'macos') }} + run: | + ls wheelhouse/*cp310*.whl + ls wheelhouse/*cp311*.whl + ls wheelhouse/*cp312*.whl + ls wheelhouse/*cp313*.whl + ls wheelhouse/*cp314*.whl + # Free-threading builds: + ls wheelhouse/*cp314t*.whl + + - uses: actions/upload-artifact@v4 + with: + name: wheel-${{ matrix.buildplat[1] }} + path: ./wheelhouse/*.whl + if-no-files-found: error + + make_sdist: + name: Make SDist + runs-on: macos-latest + steps: + - uses: actions/checkout@v5 + with: + fetch-depth: 0 + persist-credentials: false + ref: ${{ inputs.ref }} + + - uses: actions/setup-python@v6 + with: + # Build sdist on lowest supported Python + python-version: "3.10" + + - name: Build SDist + run: | + set -ex + python -m pip install -U pip build + python -m build --sdist . + + - name: Test SDist + run: | + python -m pip install dist/*.gz + cd .. + python -c "from pymongo import has_c; assert has_c()" + + - uses: actions/upload-artifact@v4 + with: + name: "sdist" + path: ./dist/*.tar.gz + + collect_dist: + runs-on: ubuntu-latest + needs: [build_wheels, make_sdist] + name: Download Wheels + steps: + - name: Download all workflow run artifacts + uses: actions/download-artifact@v5 + - name: Flatten directory + working-directory: . + run: | + find . -mindepth 2 -type f -exec mv {} . \; + find . -type d -empty -delete + - uses: actions/upload-artifact@v4 + with: + name: all-dist-${{ github.run_id }} + path: "./*" diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml new file mode 100644 index 0000000000..6abca9e528 --- /dev/null +++ b/.github/workflows/release-python.yml @@ -0,0 +1,117 @@ +name: Release + +on: + workflow_dispatch: + inputs: + following_version: + description: "The post (dev) version to set" + dry_run: + description: "Dry Run?" + default: false + type: boolean + schedule: + - cron: '30 5 * * *' + +env: + # Changes per repo + PRODUCT_NAME: PyMongo + # Changes per branch + EVERGREEN_PROJECT: mongo-python-driver + # Constant + # inputs will be empty on a scheduled run. so, we only set dry_run + # to 'false' when the input is set to 'false'. + DRY_RUN: ${{ ! contains(inputs.dry_run, 'false') }} + FOLLOWING_VERSION: ${{ inputs.following_version || '' }} + +defaults: + run: + shell: bash -eux {0} + +jobs: + pre-publish: + environment: release + runs-on: ubuntu-latest + if: github.repository_owner == 'mongodb' || github.event_name == 'workflow_dispatch' + permissions: + id-token: write + contents: write + outputs: + version: ${{ steps.pre-publish.outputs.version }} + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + - uses: mongodb-labs/drivers-github-tools/python/pre-publish@v3 + id: pre-publish + with: + dry_run: ${{ env.DRY_RUN }} + + build-dist: + needs: [pre-publish] + uses: ./.github/workflows/dist.yml + with: + ref: ${{ needs.pre-publish.outputs.version }} + + static-scan: + needs: [pre-publish] + uses: ./.github/workflows/codeql.yml + permissions: + security-events: write + with: + ref: ${{ needs.pre-publish.outputs.version }} + + publish: + needs: [build-dist, static-scan] + name: Upload release to PyPI + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + steps: + - name: Download all the dists + uses: actions/download-artifact@v5 + with: + name: all-dist-${{ github.run_id }} + path: dist/ + - name: Publish package distributions to TestPyPI + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + skip-existing: true + attestations: ${{ env.DRY_RUN }} + - name: Publish package distributions to PyPI + if: startsWith(env.DRY_RUN, 'false') + uses: pypa/gh-action-pypi-publish@ed0c53931b1dc9bd32cbe73a98c7f6766f8a527e # release/v1 + + post-publish: + needs: [publish] + runs-on: ubuntu-latest + environment: release + permissions: + id-token: write + contents: write + attestations: write + security-events: write + steps: + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v3 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + - uses: mongodb-labs/drivers-github-tools/setup@v3 + with: + aws_role_arn: ${{ secrets.AWS_ROLE_ARN }} + aws_region_name: ${{ vars.AWS_REGION_NAME }} + aws_secret_id: ${{ secrets.AWS_SECRET_ID }} + - uses: mongodb-labs/drivers-github-tools/python/post-publish@v3 + with: + following_version: ${{ env.FOLLOWING_VERSION }} + product_name: ${{ env.PRODUCT_NAME }} + evergreen_project: ${{ env.EVERGREEN_PROJECT }} + token: ${{ github.token }} + dry_run: ${{ env.DRY_RUN }} diff --git a/.github/workflows/test-python.yml b/.github/workflows/test-python.yml index 89d9830e82..a057570f3f 100644 --- a/.github/workflows/test-python.yml +++ b/.github/workflows/test-python.yml @@ -2,86 +2,279 @@ name: Python Tests on: push: + branches: ["master", "v**"] pull_request: + workflow_dispatch: + +concurrency: + group: tests-${{ github.ref }} + cancel-in-progress: true + +defaults: + run: + shell: bash -eux {0} + +permissions: + contents: read jobs: - pre-commit: - name: pre-commit + static: runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 - - uses: pre-commit/action@v2.0.0 + - uses: actions/checkout@v5 with: - extra_args: --all-files --hook-stage=manual + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install Python dependencies + run: | + just install + - name: Run linters + run: | + just lint-manual + - name: Run compilation + run: | + export PYMONGO_C_EXT_MUST_BUILD=1 + pip install -v -e . + python tools/fail_if_no_c.py + - name: Run typecheck + run: | + just typing + - run: | + sudo apt-get install -y cppcheck + - run: | + cppcheck --force bson + cppcheck pymongo build: - # supercharge/mongodb-github-action requires containers so we don't test other platforms runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: - os: [ubuntu-20.04] - python-version: ["3.7", "3.10", "pypy-3.8"] + # Tests currently only pass on ubuntu on GitHub Actions. + os: [ubuntu-latest] + python-version: ["3.10", "pypy-3.10", "3.13t"] + mongodb-version: ["8.0"] + name: CPython ${{ matrix.python-version }}-${{ matrix.os }} steps: - - uses: actions/checkout@v2 - - name: Setup Python - uses: actions/setup-python@v2 + - uses: actions/checkout@v5 with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true python-version: ${{ matrix.python-version }} - cache: 'pip' - cache-dependency-path: 'setup.py' - - name: Start MongoDB - uses: supercharge/mongodb-github-action@1.7.0 + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master with: - mongodb-version: 4.4 + version: "${{ matrix.mongodb-version }}" - name: Run tests - run: | - pip install mypy==0.942 - python setup.py test + run: uv run --extra test pytest -v - mypytest: - name: Run mypy + doctest: + runs-on: ubuntu-latest + name: DocTest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + with: + version: "8.0" + - name: Install dependencies + run: just install + - name: Run tests + run: | + just setup-tests doctest + just run-tests + + docs: + name: Docs Checks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: just install + - name: Build docs + run: just docs + + linkcheck: + name: Link Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just + - name: Install dependencies + run: just install + - name: Build docs + run: just docs-linkcheck + + typing: + name: Typing Tests runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.7', '3.10'] - fail-fast: false + python: ["3.10", "3.11"] steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + - uses: actions/checkout@v5 with: - python-version: ${{ matrix.python-version }} - cache: 'pip' - cache-dependency-path: 'setup.py' + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "${{matrix.python}}" + - name: Install just + run: uv tool install rust-just - name: Install dependencies run: | - python -m pip install -U pip mypy==0.942 - pip install -e ".[zstd, srv]" - - name: Run mypy + just install + - name: Run typecheck run: | - mypy --install-types --non-interactive bson gridfs tools pymongo - # Test overshadowed codec_options.py file - mypy --install-types --non-interactive bson/codec_options.py - mypy --install-types --non-interactive --disable-error-code var-annotated --disable-error-code attr-defined --disable-error-code union-attr --disable-error-code assignment --disable-error-code no-redef --disable-error-code index --allow-redefinition --allow-untyped-globals --exclude "test/mypy_fails/*.*" test + just typing - linkcheck: - name: Check Links + integration_tests: runs-on: ubuntu-latest + name: Integration Tests steps: - - uses: actions/checkout@v2 - - uses: actions/setup-python@v2 + - uses: actions/checkout@v5 with: - cache: 'pip' - cache-dependency-path: 'setup.py' + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + enable-cache: true + python-version: "3.10" + - name: Install just + run: uv tool install rust-just - name: Install dependencies run: | - python -m pip install -U pip - python -m pip install sphinx - - name: Check links + just install + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + - name: Run tests + run: | + just integration-tests + - id: setup-mongodb-ssl + uses: mongodb-labs/drivers-evergreen-tools@master + with: + ssl: true + - name: Run tests run: | - cd doc - make linkcheck + just integration-tests + + make_sdist: + runs-on: ubuntu-latest + name: "Make an sdist" + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - uses: actions/setup-python@v6 + with: + cache: 'pip' + cache-dependency-path: 'pyproject.toml' + # Build sdist on lowest supported Python + python-version: "3.10" + - name: Build SDist + shell: bash + run: | + pip install build + python -m build --sdist + - uses: actions/upload-artifact@v4 + with: + name: "sdist" + path: dist/*.tar.gz + + test_sdist: + runs-on: ubuntu-latest + needs: [make_sdist] + name: Install from SDist and Test + timeout-minutes: 20 + steps: + - name: Download sdist + uses: actions/download-artifact@v5 + with: + path: sdist/ + - name: Unpack SDist + shell: bash + run: | + cd sdist + ls + mkdir test + tar --strip-components=1 -zxf *.tar.gz -C ./test + ls test + - uses: actions/setup-python@v6 + with: + cache: 'pip' + cache-dependency-path: 'sdist/test/pyproject.toml' + # Test sdist on lowest supported Python + python-version: "3.10" + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + - name: Run connect test from sdist + shell: bash + run: | + cd sdist/test + ls + which python + pip install -e ".[test]" + PYMONGO_MUST_CONNECT=1 pytest -v -k client_context + + test_minimum: + permissions: + contents: read + runs-on: ubuntu-latest + name: Test minimum dependencies and Python + steps: + - uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + with: + python-version: "3.10" + - id: setup-mongodb + uses: mongodb-labs/drivers-evergreen-tools@master + with: + version: "8.0" + - name: Run tests + shell: bash + run: | + uv venv + source .venv/bin/activate + uv pip install -e ".[test]" --resolution=lowest-direct --force-reinstall + pytest -v test/test_srv_polling.py test/test_dns.py test/asynchronous/test_srv_polling.py test/asynchronous/test_dns.py diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 0000000000..c991de2e6d --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,21 @@ +name: GitHub Actions Security Analysis with zizmor 🌈 + +on: + push: + branches: ["master"] + pull_request: + branches: ["**"] + +jobs: + zizmor: + name: zizmor latest via Cargo + runs-on: ubuntu-latest + permissions: + security-events: write + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run zizmor 🌈 + uses: zizmorcore/zizmor-action@da5ac40c5419dcf7f21630fb2f95e725ae8fb9d5 diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 0000000000..10fd4cdfcf --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,7 @@ +rules: + unpinned-uses: + config: + policies: + actions/*: ref-pin + mongodb-labs/drivers-github-tools/*: ref-pin + mongodb-labs/drivers-evergreen-tools: ref-pin diff --git a/.gitignore b/.gitignore index f7ad6563ff..74ed0bbb70 100644 --- a/.gitignore +++ b/.gitignore @@ -9,10 +9,36 @@ build/ doc/_build/ dist/ tools/settings.py +drivers-evergreen-tools pymongo.egg-info/ *.so -*.egg +*.egg* .tox mongocryptd.pid .idea/ +.vscode/ .nova/ +.temp/ +venv/ +secrets-export.sh +libmongocrypt.tar.gz +libmongocrypt/ +.venv +expansion.yml +*expansions.yml +.evergreen/scripts/env.sh +.evergreen/scripts/test-env.sh +specifications/ +results.json +.evergreen/atlas_x509_dev_client_certificate.pem + +# Lambda temp files +test/lambda/.aws-sam +test/lambda/mongodb/pymongo/* +test/lambda/mongodb/gridfs/* +test/lambda/mongodb/bson/* +test/lambda/*.json + +# test results and logs +xunit-results/ +server.log diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d72d51971c..d2b9d9a17a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,12 +1,14 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.1.0 + rev: v4.5.0 hooks: - id: check-added-large-files - id: check-case-conflict - id: check-toml + - id: check-json - id: check-yaml + exclude: template.yaml - id: debug-statements - id: end-of-file-fixer exclude: WHEEL @@ -16,36 +18,51 @@ repos: exclude: .patch exclude_types: [json] -- repo: https://github.com/psf/black - rev: 22.3.0 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.1.3 hooks: - - id: black - files: \.py$ - args: [--line-length=100] + - id: ruff + args: ["--fix", "--show-fixes"] + - id: ruff-format -- repo: https://github.com/PyCQA/isort - rev: 5.10.1 +- repo: local hooks: - - id: isort - files: \.py$ - args: [--profile=black] + - id: synchro + name: synchro + entry: bash ./tools/synchro.sh + language: python + require_serial: true + fail_fast: true + additional_dependencies: + - ruff==0.1.3 + - unasync -- repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.2 +- repo: https://github.com/adamchainz/blacken-docs + rev: "1.16.0" hooks: - - id: flake8 - files: \.py$ - additional_dependencies: [ - 'flake8-bugbear==20.1.4', - 'flake8-logging-format==0.6.0', - 'flake8-implicit-str-concat==0.2.0', - ] - stages: [manual] + - id: blacken-docs + additional_dependencies: + - black==22.3.0 + +- repo: https://github.com/pre-commit/pygrep-hooks + rev: "v1.10.0" + hooks: + - id: rst-backticks + - id: rst-directive-colons + - id: rst-inline-touching-normal + +- repo: https://github.com/rstcheck/rstcheck + rev: v6.2.0 + hooks: + - id: rstcheck + additional_dependencies: [sphinx] + args: ["--ignore-directives=doctest,testsetup,todo,automodule","--ignore-substitutions=release", "--report-level=error"] # We use the Python version instead of the original version which seems to require Docker # https://github.com/koalaman/shellcheck-precommit - repo: https://github.com/shellcheck-py/shellcheck-py - rev: v0.8.0.4 + rev: v0.9.0.6 hooks: - id: shellcheck name: shellcheck @@ -53,26 +70,66 @@ repos: stages: [manual] - repo: https://github.com/PyCQA/doc8 - rev: 0.11.1 + rev: v1.1.1 hooks: - id: doc8 args: ["--ignore=D001"] # ignore line length stages: [manual] - repo: https://github.com/sirosen/check-jsonschema - rev: 0.14.1 + rev: 0.29.0 hooks: - - id: check-jsonschema - name: "Check GitHub Workflows" - files: ^\.github/workflows/ - types: [yaml] - args: ["--schemafile", "https://json.schemastore.org/github-workflow"] - stages: [manual] + - id: check-github-workflows + - id: check-github-actions + - id: check-dependabot - repo: https://github.com/ariebovenberg/slotscheck - rev: v0.14.0 + rev: v0.19.0 hooks: - id: slotscheck files: \.py$ exclude: "^(test|tools)/" stages: [manual] + args: ["--no-strict-imports"] + +- repo: https://github.com/codespell-project/codespell + rev: "v2.2.6" + hooks: + - id: codespell + # Examples of errors or updates to justify the exceptions: + # - test/test_on_demand_csfle.py:44: FLE ==> FILE + # - test/test_bson.py:1043: fo ==> of, for, to, do, go + # - test/bson_corpus/decimal128-4.json:98: Infinit ==> Infinite + # - test/test_bson.py:267: isnt ==> isn't + # - test/versioned-api/crud-api-version-1-strict.json:514: nin ==> inn, min, bin, nine + # - test/test_client.py:188: te ==> the, be, we, to + args: ["-L", "fle,fo,infinit,isnt,nin,te,aks"] + +- repo: local + hooks: + - id: executable-shell + name: executable-shell + entry: chmod +x + language: system + types: [shell] + exclude: | + (?x)( + .evergreen/retry-with-backoff.sh + ) + - id: generate-config + name: generate-config + entry: .evergreen/scripts/generate-config.sh + language: python + require_serial: true + additional_dependencies: ["shrub.py>=3.10.0", "pyyaml>=6.0.2"] + + - id: uv-lock + name: uv-lock + entry: uv lock + language: python + require_serial: true + files: ^(uv\.lock|pyproject\.toml|requirements.txt|requirements/.*\.txt)$ + pass_filenames: false + fail_fast: true + additional_dependencies: + - "uv>=0.8.4" diff --git a/.readthedocs.yaml b/.readthedocs.yaml index e2956c122b..a3693074f6 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -12,9 +12,13 @@ sphinx: # Set the version of Python and requirements required to build the docs. python: - version: 3.8 install: # Install pymongo itself. - method: pip path: . - - requirements: doc/docs-requirements.txt + - requirements: requirements/docs.txt + +build: + os: ubuntu-22.04 + tools: + python: "3.11" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000..a8881db9cb --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,530 @@ +# Contributing to PyMongo + +PyMongo has a large +[community](https://pymongo.readthedocs.io/en/stable/contributors.html) +and contributions are always encouraged. Contributions can be as simple +as minor tweaks to the documentation. Please read these guidelines +before sending a pull request. + +## Bugfixes and New Features + +Before starting to write code, look for existing +[tickets](https://jira.mongodb.org/browse/PYTHON) or [create +one](https://jira.mongodb.org/browse/PYTHON) for your specific issue or +feature request. That way you avoid working on something that might not +be of interest or that has already been addressed. + +## Supported Interpreters + +PyMongo supports CPython 3.10+ and PyPy3.10+. Language features not +supported by all interpreters can not be used. + +## Style Guide + +PyMongo follows [PEP8](http://www.python.org/dev/peps/pep-0008/) +including 4 space indents and 79 character line limits. + +## General Guidelines + +- Avoid backward breaking changes if at all possible. +- Write inline documentation for new classes and methods. +- We use [uv](https://docs.astral.sh/uv/) for python environment management and packaging. +- We use [just](https://just.systems/man/en/) as our task runner. +- Write tests and make sure they pass (make sure you have a mongod + running on the default port, then execute `just test` from the cmd + line to run the test suite). +- Add yourself to doc/contributors.rst `:)` + +## Authoring a Pull Request + +**Our Pull Request Policy is based on this** [Code Review Developer +Guide](https://google.github.io/eng-practices/review) + +The expectation for any code author is to provide all the context needed +in the space of a pull request for any engineer to feel equipped to +review the code. Depending on the type of change, do your best to +highlight important new functions or objects you've introduced in the +code; think complex functions or new abstractions. Whilst it may seem +like more work for you to adjust your pull request, the reality is your +likelihood for getting review sooner shoots up. + +**Self Review Guidelines to follow** + +- If the PR is too large, split it if possible. + + - Use 250 LoC (excluding test data and config changes) as a + rule-of-thumb. + + - Moving and changing code should be in separate PRs or commits. + + - Moving: Taking large code blobs and transplanting + them to another file. There\'s generally no (or very + little) actual code changed other than a cut and + paste. It can even be extended to large deletions. + - Changing: Adding code changes (be that refactors or + functionality additions/subtractions). + - These two, when mixed, can muddy understanding and + sometimes make it harder for reviewers to keep track + of things. + +- Prefer explaining with code comments instead of PR comments. + +**Provide background** + +- The PR description and linked tickets should answer the "what" and + "why" of the change. The code change explains the "how". + +**Follow the Template** + +- Please do not deviate from the template we make; it is there for a + lot of reasons. If it is a one line fix, we still need to have + context on what and why it is needed. + +- If making a versioning change, please let that be known. See examples below: + + - `versionadded:: 3.11` + - `versionchanged:: 3.5` + +**Pull Request Template Breakdown** + +- **Github PR Title** + + - The PR Title format should always be + `[JIRA-ID] : Jira Title or Blurb Summary`. + +- **JIRA LINK** + +- Convenient link to the associated JIRA ticket. + +- **Summary** + + - Small blurb on why this is needed. The JIRA task should have + the more in-depth description, but this should still, at a + high level, give anyone looking an understanding of why the + PR has been checked in. + +- **Changes in this PR** + + - The explicit code changes that this PR is introducing. This + should be more specific than just the task name. (Unless the + task name is very clear). + +- **Test Plan** + + - Everything needs a test description. Describe what you did + to validate your changes actually worked; if you did + nothing, then document you did not test it. Aim to make + these steps reproducible by other engineers, specifically + with your primary reviewer in mind. + +- **Screenshots** + + - Any images that provide more context to the PR. Usually, + these just coincide with the test plan. + +- **Callouts or follow-up items** + + - This is a good place for identifying "to-dos" that you've + placed in the code (Must have an accompanying JIRA Ticket). + - Potential bugs that you are unsure how to test in the code. + - Opinions you want to receive about your code. + +## Running Linters + +PyMongo uses [pre-commit](https://pypi.org/project/pre-commit/) for +managing linting of the codebase. `pre-commit` performs various checks +on all files in PyMongo and uses tools that help follow a consistent +code style within the codebase. + +To set up `pre-commit` locally, run: + +```bash +brew install pre-commit +pre-commit install +``` + +To run `pre-commit` manually, run: + +```bash +pre-commit run --all-files +``` + +To run a manual hook like `ruff` manually, run: + +```bash +pre-commit run --all-files --hook-stage manual ruff +``` + +Typically we use `just` to run the linters, e.g. + +```bash +just install # this will install a venv with pre-commit installed, and install the pre-commit hook. +just typing-mypy +just run lint-manual +``` + +## Documentation + +To contribute to the [API documentation](https://pymongo.readthedocs.io/en/stable/) just make your +changes to the inline documentation of the appropriate [source code](https://github.com/mongodb/mongo-python-driver) or +[rst file](https://github.com/mongodb/mongo-python-driver/tree/master/doc) in +a branch and submit a [pull request](https://help.github.com/articles/using-pull-requests). You +might also use the GitHub +[Edit](https://github.com/blog/844-forking-with-the-edit-button) button. + +We use [reStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html) for all +documentation including narrative docs, and the [Sphinx docstring format](https://sphinx-rtd-tutorial.readthedocs.io/en/latest/docstrings.html). + +You can build the documentation locally by running: + +```bash +just docs +``` + +When updating docs, it can be helpful to run the live docs server as: + +```bash +just docs-serve +``` + +Browse to the link provided, and then as you make changes to docstrings or narrative docs, +the pages will re-render and the browser will automatically refresh. + +## Running Tests Locally + +- Run `just install` to set a local virtual environment, or you can manually + create a virtual environment and run `pytest` directly. If you want to use a specific + version of Python, remove the `.venv` folder and set `PYTHON_BINARY` before running `just install`. +- Ensure you have started the appropriate Mongo Server(s). You can run `just run-server` with optional args + to set up the server. All given options will be passed to + [`run-orchestration.sh`](https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/run-orchestration.sh). Run `$DRIVERS_TOOLS/evergreen/run-orchestration.sh -h` + for a full list of options. +- Run `just test` or `pytest` to run all of the tests. +- Append `test/.py::::` to run + specific tests. You can omit the `` to test a full class + and the `` to test a full module. For example: + `just test test/test_change_stream.py::TestUnifiedChangeStreamsErrors::test_change_stream_errors_on_ElectionInProgress`. +- Use the `-k` argument to select tests by pattern. + + +## Running tests that require secrets, services, or other configuration + +### Prerequisites + +- Clone `drivers-evergreen-tools`: + `git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git`. +- Run `export DRIVERS_TOOLS=$PWD/drivers-evergreen-tools`. This can be put into a `.bashrc` file + for convenience. +- Some tests require access to [Drivers test secrets](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/secrets_handling#secrets-handling). + +### Usage + +- Run `just run-server` with optional args to set up the server. +- Run `just setup-tests` with optional args to set up the test environment, secrets, etc. + See `just setup-tests -h` for a full list of available options. +- Run `just run-tests` to run the tests in an appropriate Python environment. +- When done, run `just teardown-tests` to clean up and `just stop-server` to stop the server. + +### SSL tests + +- Run `just run-server --ssl` to start the server with TLS enabled. +- Run `just setup-tests --ssl`. +- Run `just run-tests`. + +Note: for general testing purposes with an TLS-enabled server, you can use the following (this should ONLY be used +for local testing): + +```python +from pymongo import MongoClient + +client = MongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" +) +``` + +If you want to use the actual certificate file then set `tlsCertificateKeyFile` to the local path +to `/test/certificates/client.pem` and `tlsCAFile` to the local path to `/test/certificates/ca.pem`. + +### Encryption tests + +- Run `just run-server` to start the server. +- Run `just setup-tests encryption`. +- Run the tests with `just run-tests`. + +To test with `encryption` and `PyOpenSSL`, use `just setup-tests encryption pyopenssl`. + +### PyOpenSSL tests + +- Run `just run-server` to start the server. +- Run `just setup-tests default_sync pyopenssl`. +- Run the tests with `just run-tests`. + +Note: `PyOpenSSL` is not used in async tests, but you can use `just setup-tests default_async pyopenssl` +to verify that PyMongo falls back to the standard library `OpenSSL`. + +### Load balancer tests + +- Install `haproxy` (available as `brew install haproxy` on macOS). +- Start the server with `just run-server load_balancer`. +- Set up the test with `just setup-tests load_balancer`. +- Run the tests with `just run-tests`. + +### AWS auth tests + +- Run `just run-server auth_aws` to start the server. +- Run `just setup-tests auth_aws ` to set up the AWS test. +- Run the tests with `just run-tests`. + +### OIDC auth tests + +- Run `just setup-tests auth_oidc ` to set up the OIDC test. +- Run the tests with `just run-tests`. + +The supported types are [`default`, `azure`, `gcp`, `eks`, `aks`, and `gke`]. +For the `eks` test, you will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). + +### KMS tests + +For KMS tests that are run locally, and expected to fail, in this case using `azure`: + +- Run `just run-server`. +- Run `just setup-tests kms azure-fail`. +- Run `just run-tests`. + +For KMS tests that run remotely and are expected to pass, in this case using `gcp`: + +- Run `just setup-tests kms gcp`. +- Run `just run-tests`. + +### Enterprise Auth tests + +Note: these tests can only be run from an Evergreen host. + +- Run `just run-server enterprise_auth`. +- Run `just setup-tests enterprise_auth`. +- Run `just run-tests`. + +### Atlas Connect tests + +- Run `just setup-tests atlas_connect`. +- Run `just run-tests`. + +### Search Index tests + +- Run `just run-server search_index`. +- Run `just setup-tests search_index`. +- Run `just run-tests`. + +### MockupDB tests + +- Run `just setup-tests mockupdb`. +- Run `just run-tests`. + +### Doc tests + +The doc tests require a running server. + +- Run `just run-server`. +- Run `just setup-tests doctest`. +- Run `just run-tests`. + +### Free-threaded Python Tests + +In the evergreen builds, the tests are configured to use the free-threaded python from the toolchain. +Locally you can run: + +- Run `just run-server`. +- Run `just setup-tests`. +- Run `UV_PYTHON=3.13t just run-tests`. + +### AWS Lambda tests + +You will need to set up access to the `drivers-test-secrets-role`, see the [Wiki](https://wiki.corp.mongodb.com/spaces/DRIVERS/pages/239737385/Using+AWS+Secrets+Manager+to+Store+Testing+Secrets). + +- Run `just setup-tests aws_lambda`. +- Run `just run-tests`. + +### mod_wsgi tests + +Note: these tests can only be run from an Evergreen Linux host that has the Python toolchain. + +- Run `just run-server`. +- Run `just setup-tests mod_wsgi `. +- Run `just run-tests`. + +The `mode` can be `standalone` or `embedded`. For the `replica_set` version of the tests, use +`TOPOLOGY=replica_set just run-server`. + +### OCSP tests + +- Export the orchestration file, e.g. `export ORCHESTRATION_FILE=rsa-basic-tls-ocsp-disableStapling.json`. +This corresponds to a config file in `$DRIVERS_TOOLS/.evergreen/orchestration/configs/servers`. +MongoDB servers on MacOS and Windows do not staple OCSP responses and only support RSA. +NOTE: because the mock ocsp responder MUST be started prior to the server starting, the ocsp tests start the server +as part of `setup-tests`. + +- Run `just setup-tests ocsp ` (options are "valid", "revoked", "valid-delegate", "revoked-delegate"). +- Run `just run-tests` + +If you are running one of the `no-responder` tests, omit the `run-server` step. + +### Perf Tests + +- Start the appropriate server, e.g. `just run-server --version=v8.0-perf --ssl`. +- Set up the tests with `sync` or `async`: `just setup-tests perf sync`. +- Run the tests: `just run-tests`. + +## Enable Debug Logs + +- Use `-o log_cli_level="DEBUG" -o log_cli=1` with `just test` or `pytest` to output all debug logs to the terminal. **Warning**: This will output a huge amount of logs. +- Add `log_cli=1` and `log_cli_level="DEBUG"` to the `tool.pytest.ini_options` section in `pyproject.toml` to enable debug logs in this manner by default on your machine. +- Set `DEBUG_LOG=1` and run `just setup-tests`, `just-test`, or `pytest` to enable debug logs only for failed tests. +- Finally, you can use `just setup-tests --debug-log`. +- For evergreen patch builds, you can use `evergreen patch --param DEBUG_LOG=1` to enable debug logs for failed tests in the patch. + +## Testing minimum dependencies + +To run any of the test suites with minimum supported dependencies, pass `--test-min-deps` to +`just setup-tests`. + +## Adding a new test suite + +- If adding new tests files that should only be run for that test suite, add a pytest marker to the file and add + to the list of pytest markers in `pyproject.toml`. Then add the test suite to the `TEST_SUITE_MAP` in `.evergreen/scripts/utils.py`. If for some reason it is not a pytest-runnable test, add it to the list of `EXTRA_TESTS` instead. +- If the test uses Atlas or otherwise doesn't use `run-orchestration.sh`, add it to the `NO_RUN_ORCHESTRATION` list in + `.evergreen/scripts/utils.py`. +- If there is something special required to run the local server or there is an extra flag that should always be set + like `AUTH`, add that logic to `.evergreen/scripts/run_server.py`. +- The bulk of the logic will typically be in `.evergreen/scripts/setup_tests.py`. This is where you should fetch secrets and make them available using `write_env`, start services, and write other env vars needed using `write_env`. +- If there are any special test considerations, including not running `pytest` at all, handle it in `.evergreen/scripts/run_tests.py`. +- If there are any services or atlas clusters to teardown, handle them in `.evergreen/scripts/teardown_tests.py`. +- Add functions to generate the test variant(s) and task(s) to the `.evergreen/scripts/generate_config.py`. +- Regenerate the test variants and tasks using `pre-commit run --all-files generate-config`. +- Make sure to add instructions for running the test suite to `CONTRIBUTING.md`. + +## Handling flaky tests + +We have a custom `flaky` decorator in [test/asynchronous/utils.py](test/asynchronous/utils.py) that can be used for +tests that are `flaky`. By default the decorator only applies when not running on CPython on Linux, since other +runtimes tend to have more variation. When using the `flaky` decorator, open a corresponding ticket and +a use the ticket number as the "reason" parameter to the decorator, e.g. `@flaky(reason="PYTHON-1234")`. +When running tests locally (not in CI), the `flaky` decorator will be disabled unless `ENABLE_FLAKY` is set. +To disable the `flaky` decorator in CI, you can use `evergreen patch --param DISABLE_FLAKY=1`. + +## Integration Tests + +The `integration_tests` directory has a set of scripts that verify the usage of PyMongo with downstream packages or frameworks. See the [README](./integration_tests/README.md) for more information. + +To run the tests, use `just integration_tests`. + +The tests should be able to run with and without SSL enabled. + +## Specification Tests + +The MongoDB [specifications repository](https://github.com/mongodb/specifications) +holds in progress and completed specifications for features of MongoDB, drivers, +and associated products. PyMongo supports the [Unified Test Format](https://jira.mongodb.org/browse/DRIVERS-709) +for running specification tests to confirm PyMongo behaves as expected. + +### Resynchronizing the Specification Tests + +If you would like to re-sync the copy of the specification tests in the +PyMongo repository with that which is inside the [specifications +repo](https://github.com/mongodb/specifications), please use the script +provided in `.evergreen/resync-specs.sh`.: + +```bash +git clone git@github.com:mongodb/specifications.git +export MDB_SPECS=~/specifications +cd ~/mongo-python-driver/.evergreen +./resync-specs.sh -b "" spec1 spec2 ... +./resync-specs.sh -b "connection-string*" crud bson-corpus # Updates crud and bson-corpus specs while ignoring all files with the regex "connection-string*" +cd .. +``` + +The `-b` flag adds as a regex pattern to block files you do not wish to +update in PyMongo. This is primarily helpful if you are implementing a +new feature in PyMongo that has spec tests already implemented, or if +you are attempting to validate new spec tests in PyMongo. + +### Automated Specification Test Resyncing +The (`/.evergreen/scripts/resync-all-specs.sh`) script +automatically runs once a week to resync all the specs with the [specifications repo](https://github.com/mongodb/specifications). +A PR will be generated by mongodb-drivers-pr-bot containing any changes picked up by this resync. +The PR description will display the name(s) of the updated specs along +with any errors that occurred. + +Spec test changes associated with a behavioral change or bugfix that has yet to be implemented in PyMongo +must be added to a patch file in `/.evergreen/spec-patch`. Each patch +file must be named after the associated PYTHON ticket and contain the +test differences between PyMongo's current tests and the specification. +All changes listed in these patch files will be *undone* by the script and won't +be applied to PyMongo's tests. + +When a new test file or folder is added to the spec repo before the associated code changes are implemented, that test's path must be added to `.evergreen/remove-unimplemented-tests.sh` along with a comment indicating the associated PYTHON ticket for those changes. + +Any PR that implements a PYTHON ticket documented in a patch file or within `.evergreen/remove-unimplemented-tests.sh` must also remove the associated patch file or entry in `remove-unimplemented-tests.sh`. + +#### Adding to a patch file +To add to or create a patch file, run `git diff` to show the desired changes to undo and copy the +results into the patch file. + +For example: the imaginary, unimplemented PYTHON-1234 ticket has associated spec test changes. To add those changes to `PYTHON-1234.patch`), do the following: +```bash +git diff HEAD~1 path/to/file >> .evergreen/spec-patch/PYTHON-1234.patch + +#### Running Locally +Both `resync-all-specs.sh` and `resync-all-specs.py` can be run locally (and won't generate a PR). +```bash +./.evergreen/scripts/resync-all-specs.sh +python3 ./.evergreen/scripts/resync-all-specs.py +``` + +## Making a Release + +Follow the [Python Driver Release Process Wiki](https://wiki.corp.mongodb.com/display/DRIVERS/Python+Driver+Release+Process). + +## Asyncio considerations + +PyMongo adds asyncio capability by modifying the source files in `*/asynchronous` to `*/synchronous` using +[unasync](https://github.com/python-trio/unasync/) and some custom transforms. + +Where possible, edit the code in `*/asynchronous/*.py` and not the synchronous files. +You can run `pre-commit run --all-files synchro` before running tests if you are testing synchronous code. + +To prevent the `synchro` hook from accidentally overwriting code, it first checks to see whether a sync version +of a file is changing and not its async counterpart, and will fail. +In the unlikely scenario that you want to override this behavior, first export `OVERRIDE_SYNCHRO_CHECK=1`. + +Sometimes, the `synchro` hook will fail and introduce changes many previously unmodified files. This is due to static +Python errors, such as missing imports, incorrect syntax, or other fatal typos. To resolve these issues, +run `pre-commit run --all-files --hook-stage manual ruff` and fix all reported errors before running the `synchro` +hook again. + +## Converting a test to async + +The `tools/convert_test_to_async.py` script takes in an existing synchronous test file and outputs a +partially-converted asynchronous version of the same name to the `test/asynchronous` directory. +Use this generated file as a starting point for the completed conversion. + +The script is used like so: `python tools/convert_test_to_async.py [test_file.py]` + +## Generating a flame graph using py-spy +To profile a test script and generate a flame graph, follow these steps: +1. Install `py-spy` if you haven't already: + ```bash + pip install py-spy + ``` +2. Inside your test script, perform any required setup and then loop over the code you want to profile for improved sampling. +3. Run `py-spy record -o -r -- python ` to generate a `.svg` file containing the flame graph. + (Note: on macOS you will need to run this command using `sudo` to allow `py-spy` to attach to the Python process.) +4. If you need to include native code (for example the C extensions), profiling should be done on a Linux system, as macOS and Windows do not support the `--native` option of `py-spy`. + Creating an ubuntu Evergreen spawn host and using `scp` to copy the flamegraph `.svg` file back to your local machine is the best way to do this. + +## Dependabot updates + +Dependabot will raise PRs at most once per week, grouped by GitHub Actions updates and Python requirement +file updates. We have a pre-commit hook that will update the `uv.lock` file when requirements change. +To update the lock file on a failing PR, you can use a method like `gh pr checkout `, then run +`just lint uv-lock` to update the lock file, and then push the changes. If a typing dependency has changed, +also run `just typing` and handle any new findings. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst deleted file mode 100644 index a457b3e4c3..0000000000 --- a/CONTRIBUTING.rst +++ /dev/null @@ -1,108 +0,0 @@ -Contributing to PyMongo -======================= - -PyMongo has a large `community -`_ and -contributions are always encouraged. Contributions can be as simple as -minor tweaks to the documentation. Please read these guidelines before -sending a pull request. - -Bugfixes and New Features -------------------------- - -Before starting to write code, look for existing `tickets -`_ or `create one -`_ for your specific -issue or feature request. That way you avoid working on something -that might not be of interest or that has already been addressed. - -Supported Interpreters ----------------------- - -PyMongo supports CPython 3.7+ and PyPy3.7+. Language -features not supported by all interpreters can not be used. - -Style Guide ------------ - -PyMongo follows `PEP8 `_ -including 4 space indents and 79 character line limits. - -General Guidelines ------------------- - -- Avoid backward breaking changes if at all possible. -- Write inline documentation for new classes and methods. -- Write tests and make sure they pass (make sure you have a mongod - running on the default port, then execute ``python setup.py test`` - from the cmd line to run the test suite). -- Add yourself to doc/contributors.rst :) - -Running Linters ---------------- - -PyMongo uses `pre-commit `_ -for managing linting of the codebase. -``pre-commit`` performs various checks on all files in PyMongo and uses tools -that help follow a consistent code style within the codebase. - -To set up ``pre-commit`` locally, run:: - - pip install pre-commit - pre-commit install - -To run ``pre-commit`` manually, run:: - - pre-commit run --all-files - -To run a manual hook like `flake8` manually, run:: - - pre-commit run --all-files --hook-stage manual flake8 - -Documentation -------------- - -To contribute to the `API documentation `_ -just make your changes to the inline documentation of the appropriate -`source code `_ or `rst file -`_ in a -branch and submit a `pull request `_. -You might also use the GitHub `Edit `_ -button. - -Running Tests Locally ---------------------- -- Ensure you have started the appropriate Mongo Server(s). -- Run ``python setup.py test`` to run all of the tests. -- Run ``python setup.py test -s test...`` to - run specific tests. You can omit the ```` to test a full class - and the ```` to test a full module. For example: - ``python setup.py test -s test.test_change_stream.TestUnifiedChangeStreamsErrors.test_change_stream_errors_on_ElectionInProgress``. - -Running Load Balancer Tests Locally ------------------------------------ -- Install ``haproxy`` (available as ``brew install haproxy`` on macOS). -- Clone ``drivers-evergreen-tools``: ``git clone git@github.com:mongodb-labs/drivers-evergreen-tools.git``. -- Start the servers using ``LOAD_BALANCER=true TOPOLOGY=sharded_cluster AUTH=noauth SSL=nossl MONGODB_VERSION=6.0 DRIVERS_TOOLS=$PWD/drivers-evergreen-tools MONGO_ORCHESTRATION_HOME=$PWD/drivers-evergreen-tools/.evergreen/orchestration $PWD/drivers-evergreen-tools/.evergreen/run-orchestration.sh``. -- Start the load balancer using: ``MONGODB_URI='mongodb://localhost:27017,localhost:27018/' $PWD/drivers-evergreen-tools/.evergreen/run-load-balancer.sh start``. -- Run the tests from the ``pymongo`` checkout directory using: ``LOADBALANCER=1 TEST_LOADBALANCER=1 SINGLE_MONGOS_LB_URI='mongodb://127.0.0.1:8000/?loadBalanced=true' MULTI_MONGOS_LB_URI='mongodb://127.0.0.1:8001/?loadBalanced=true' MONGODB_URI='mongodb://localhost:27017,localhost:27018/' python setup.py test -s test.test_load_balancer``. - -Re-sync Spec Tests ------------------- - -If you would like to re-sync the copy of the specification tests in the -PyMongo repository with that which is inside the `specifications repo -`_, please -use the script provided in ``.evergreen/resync-specs.sh``.:: - - git clone git@github.com:mongodb/specifications.git - export MDB_SPECS=~/specifications - cd ~/mongo-python-driver/.evergreen - ./resync-specs.sh -b "connection-string*" crud bson-corpus - cd .. - -The ``-b`` flag adds as a regex pattern to block files you do not wish to -update in PyMongo. -This is primarily helpful if you are implementing a new feature in PyMongo -that has spec tests already implemented, or if you are attempting to -validate new spec tests in PyMongo. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index d017d16ab0..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,14 +0,0 @@ -include README.rst -include LICENSE -include THIRD-PARTY-NOTICES -recursive-include doc *.rst -recursive-include doc *.py -recursive-include doc *.conf -recursive-include doc *.css -recursive-include doc *.js -recursive-include doc *.png -recursive-include tools *.py -include tools/README.rst -recursive-include test *.pem -recursive-include test *.py -recursive-include bson *.h diff --git a/README.md b/README.md new file mode 100644 index 0000000000..ba1688cb70 --- /dev/null +++ b/README.md @@ -0,0 +1,217 @@ +# PyMongo + +[![PyPI Version](https://img.shields.io/pypi/v/pymongo)](https://pypi.org/project/pymongo) +[![Python Versions](https://img.shields.io/pypi/pyversions/pymongo)](https://pypi.org/project/pymongo) +[![Monthly Downloads](https://static.pepy.tech/badge/pymongo/month)](https://pepy.tech/project/pymongo) +[![API Documentation Status](https://readthedocs.org/projects/pymongo/badge/?version=stable)](http://pymongo.readthedocs.io/en/stable/api?badge=stable) + +## About + +The PyMongo distribution contains tools for interacting with MongoDB +database from Python. The `bson` package is an implementation of the +[BSON format](http://bsonspec.org) for Python. The `pymongo` package is +a native Python driver for MongoDB, offering both synchronous and asynchronous APIs. The `gridfs` package is a +[gridfs](https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.md/) +implementation on top of `pymongo`. + +PyMongo supports MongoDB 4.0, 4.2, 4.4, 5.0, 6.0, 7.0, and 8.0. PyMongo follows [semantic versioning](https://semver.org/spec/v2.0.0.html) for its releases. + +## Documentation + +Documentation is available at +[mongodb.com](https://www.mongodb.com/docs/languages/python/pymongo-driver/current/). + +[API documentation](https://pymongo.readthedocs.io/en/stable/api/) and the [full changelog](https://pymongo.readthedocs.io/en/stable/changelog.html) for each release is available at [readthedocs.io](https://pymongo.readthedocs.io/en/stable/index.html). + +## Support / Feedback + +For issues with, questions about, or feedback for PyMongo, please look +into our [support channels](https://support.mongodb.com/welcome). Please +do not email any of the PyMongo developers directly with issues or +questions - you're more likely to get an answer on +[StackOverflow](https://stackoverflow.com/questions/tagged/mongodb) +(using a "mongodb" tag). + +## Bugs / Feature Requests + +Think you've found a bug? Want to see a new feature in PyMongo? Please +open a case in our issue management tool, JIRA: + +- [Create an account and login](https://jira.mongodb.org). +- Navigate to [the PYTHON + project](https://jira.mongodb.org/browse/PYTHON). +- Click **Create Issue** - Please provide as much information as + possible about the issue type and how to reproduce it. + +Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) +and the Core Server (i.e. SERVER) project are **public**. + +### How To Ask For Help + +Please include all of the following information when opening an issue: + +- Detailed steps to reproduce the problem, including full traceback, + if possible. + +- The exact python version used, with patch level: + +```bash +python -c "import sys; print(sys.version)" +``` + +- The exact version of PyMongo used, with patch level: + +```bash +python -c "import pymongo; print(pymongo.version); print(pymongo.has_c())" +``` + +- The operating system and version (e.g. Windows 7, OSX 10.8, ...) + +- Web framework or asynchronous network library used, if any, with + version (e.g. Django 1.7, mod_wsgi 4.3.0, gevent 1.0.1, Tornado + 4.0.2, ...) + +### Security Vulnerabilities + +If you've identified a security vulnerability in a driver or any other +MongoDB project, please report it according to the [instructions +here](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report/). + +## Installation + +PyMongo can be installed with [pip](http://pypi.python.org/pypi/pip): + +```bash +python -m pip install pymongo +``` + +You can also download the project source and do: + +```bash +pip install . +``` + +Do **not** install the "bson" package from pypi. PyMongo comes with +its own bson package; running "pip install bson" installs a third-party +package that is incompatible with PyMongo. + +## Dependencies + +PyMongo supports CPython 3.10+ and PyPy3.10+. + +Required dependencies: + +Support for `mongodb+srv://` URIs requires [dnspython](https://pypi.python.org/pypi/dnspython) + +Optional dependencies: + +GSSAPI authentication requires +[pykerberos](https://pypi.python.org/pypi/pykerberos) on Unix or +[WinKerberos](https://pypi.python.org/pypi/winkerberos) on Windows. The +correct dependency can be installed automatically along with PyMongo: + +```bash +python -m pip install "pymongo[gssapi]" +``` + +MONGODB-AWS authentication requires +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[aws]" +``` + +OCSP (Online Certificate Status Protocol) requires +[PyOpenSSL](https://pypi.org/project/pyOpenSSL/), +[requests](https://pypi.org/project/requests/), +[service_identity](https://pypi.org/project/service_identity/) and may +require [certifi](https://pypi.python.org/pypi/certifi): + +```bash +python -m pip install "pymongo[ocsp]" +``` + +Wire protocol compression with snappy requires +[python-snappy](https://pypi.org/project/python-snappy): + +```bash +python -m pip install "pymongo[snappy]" +``` + +Wire protocol compression with zstandard requires +[zstandard](https://pypi.org/project/zstandard): + +```bash +python -m pip install "pymongo[zstd]" +``` + +Client-Side Field Level Encryption requires +[pymongocrypt](https://pypi.org/project/pymongocrypt/) and +[pymongo-auth-aws](https://pypi.org/project/pymongo-auth-aws/): + +```bash +python -m pip install "pymongo[encryption]" +``` +You can install all dependencies automatically with the following +command: + +```bash +python -m pip install "pymongo[gssapi,aws,ocsp,snappy,zstd,encryption]" +``` + +## Examples + +Here's a basic example (for more see the *examples* section of the +docs): + +```pycon +>>> import pymongo +>>> client = pymongo.MongoClient("localhost", 27017) +>>> db = client.test +>>> db.name +'test' +>>> db.my_collection +Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') +>>> db.my_collection.insert_one({"x": 10}).inserted_id +ObjectId('4aba15ebe23f6b53b0000000') +>>> db.my_collection.insert_one({"x": 8}).inserted_id +ObjectId('4aba160ee23f6b543e000000') +>>> db.my_collection.insert_one({"x": 11}).inserted_id +ObjectId('4aba160ee23f6b543e000002') +>>> db.my_collection.find_one() +{'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} +>>> for item in db.my_collection.find(): +... print(item["x"]) +... +10 +8 +11 +>>> db.my_collection.create_index("x") +'x_1' +>>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): +... print(item["x"]) +... +8 +10 +11 +>>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] +[8, 11] +``` + +## Learning Resources + +- MongoDB Learn - [Python +courses](https://learn.mongodb.com/catalog?labels=%5B%22Language%22%5D&values=%5B%22Python%22%5D). +- [Python Articles on Developer +Center](https://www.mongodb.com/developer/languages/python/). + +## Testing + +The easiest way to run the tests is to run the following from the repository root. + +```bash +pip install -e ".[test]" +pytest +``` + +For more advanced testing scenarios, see the [contributing guide](./CONTRIBUTING.md#running-tests-locally). diff --git a/README.rst b/README.rst deleted file mode 100644 index c301932643..0000000000 --- a/README.rst +++ /dev/null @@ -1,207 +0,0 @@ -======= -PyMongo -======= -:Info: See `the mongo site `_ for more information. See `GitHub `_ for the latest source. -:Documentation: Available at `pymongo.readthedocs.io `_ -:Author: The MongoDB Python Team - -About -===== - -The PyMongo distribution contains tools for interacting with MongoDB -database from Python. The ``bson`` package is an implementation of -the `BSON format `_ for Python. The ``pymongo`` -package is a native Python driver for MongoDB. The ``gridfs`` package -is a `gridfs -`_ -implementation on top of ``pymongo``. - -PyMongo supports MongoDB 3.6, 4.0, 4.2, 4.4, and 5.0. - -Support / Feedback -================== - -For issues with, questions about, or feedback for PyMongo, please look into -our `support channels `_. Please -do not email any of the PyMongo developers directly with issues or -questions - you're more likely to get an answer on the `MongoDB Community -Forums `_. - -Bugs / Feature Requests -======================= - -Think you’ve found a bug? Want to see a new feature in PyMongo? Please open a -case in our issue management tool, JIRA: - -- `Create an account and login `_. -- Navigate to `the PYTHON project `_. -- Click **Create Issue** - Please provide as much information as possible about the issue type and how to reproduce it. - -Bug reports in JIRA for all driver projects (i.e. PYTHON, CSHARP, JAVA) and the -Core Server (i.e. SERVER) project are **public**. - -How To Ask For Help -------------------- - -Please include all of the following information when opening an issue: - -- Detailed steps to reproduce the problem, including full traceback, if possible. -- The exact python version used, with patch level:: - - $ python -c "import sys; print(sys.version)" - -- The exact version of PyMongo used, with patch level:: - - $ python -c "import pymongo; print(pymongo.version); print(pymongo.has_c())" - -- The operating system and version (e.g. Windows 7, OSX 10.8, ...) -- Web framework or asynchronous network library used, if any, with version (e.g. - Django 1.7, mod_wsgi 4.3.0, gevent 1.0.1, Tornado 4.0.2, ...) - -Security Vulnerabilities ------------------------- - -If you’ve identified a security vulnerability in a driver or any other -MongoDB project, please report it according to the `instructions here -`_. - -Installation -============ - -PyMongo can be installed with `pip `_:: - - $ python -m pip install pymongo - -Or ``easy_install`` from -`setuptools `_:: - - $ python -m easy_install pymongo - -You can also download the project source and do:: - - $ python setup.py install - -Do **not** install the "bson" package from pypi. PyMongo comes with its own -bson package; doing "easy_install bson" installs a third-party package that -is incompatible with PyMongo. - -Dependencies -============ - -PyMongo supports CPython 3.7+ and PyPy3.7+. - -Optional dependencies: - -GSSAPI authentication requires `pykerberos -`_ on Unix or `WinKerberos -`_ on Windows. The correct -dependency can be installed automatically along with PyMongo:: - - $ python -m pip install "pymongo[gssapi]" - -MONGODB-AWS authentication requires `pymongo-auth-aws -`_:: - - $ python -m pip install "pymongo[aws]" - -Support for mongodb+srv:// URIs requires `dnspython -`_:: - - $ python -m pip install "pymongo[srv]" - -OCSP (Online Certificate Status Protocol) requires `PyOpenSSL -`_, `requests -`_, `service_identity -`_ and may -require `certifi -`_:: - - $ python -m pip install "pymongo[ocsp]" - -Wire protocol compression with snappy requires `python-snappy -`_:: - - $ python -m pip install "pymongo[snappy]" - -Wire protocol compression with zstandard requires `zstandard -`_:: - - $ python -m pip install "pymongo[zstd]" - -Client-Side Field Level Encryption requires `pymongocrypt -`_:: - - $ python -m pip install "pymongo[encryption]" - -You can install all dependencies automatically with the following -command:: - - $ python -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,tls,zstd,encryption]" - -Additional dependencies are: - -- (to generate documentation) sphinx_ - -Examples -======== -Here's a basic example (for more see the *examples* section of the docs): - -.. code-block:: python - - >>> import pymongo - >>> client = pymongo.MongoClient("localhost", 27017) - >>> db = client.test - >>> db.name - 'test' - >>> db.my_collection - Collection(Database(MongoClient('localhost', 27017), 'test'), 'my_collection') - >>> db.my_collection.insert_one({"x": 10}).inserted_id - ObjectId('4aba15ebe23f6b53b0000000') - >>> db.my_collection.insert_one({"x": 8}).inserted_id - ObjectId('4aba160ee23f6b543e000000') - >>> db.my_collection.insert_one({"x": 11}).inserted_id - ObjectId('4aba160ee23f6b543e000002') - >>> db.my_collection.find_one() - {'x': 10, '_id': ObjectId('4aba15ebe23f6b53b0000000')} - >>> for item in db.my_collection.find(): - ... print(item["x"]) - ... - 10 - 8 - 11 - >>> db.my_collection.create_index("x") - 'x_1' - >>> for item in db.my_collection.find().sort("x", pymongo.ASCENDING): - ... print(item["x"]) - ... - 8 - 10 - 11 - >>> [item["x"] for item in db.my_collection.find().limit(2).skip(1)] - [8, 11] - -Documentation -============= - -Documentation is available at `pymongo.readthedocs.io `_. - -To build the documentation, you will need to install sphinx_. -Documentation can be generated by running **python -setup.py doc**. Generated documentation can be found in the -*doc/build/html/* directory. - -Testing -======= - -The easiest way to run the tests is to run **python setup.py test** in -the root of the distribution. - -To verify that PyMongo works with Gevent's monkey-patching:: - - $ python green_framework_test.py gevent - -Or with Eventlet's:: - - $ python green_framework_test.py eventlet - -.. _sphinx: https://www.sphinx-doc.org/en/master/ diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index ad18446a0f..0000000000 --- a/RELEASE.rst +++ /dev/null @@ -1,97 +0,0 @@ -Some notes on PyMongo releases -============================== - -Versioning ----------- - -We shoot for a release every few months - that will generally just -increment the middle / minor version number (e.g. 3.5.0 -> 3.6.0). - -Patch releases are reserved for bug fixes (in general no new features -or deprecations) - they only happen in cases where there is a critical -bug in a recently released version, or when a release has no new -features or API changes. - -In between releases we add .devN to the version number to denote the version -under development. So if we just released 3.6.0, then the current dev -version might be 3.6.1.dev0 or 3.7.0.dev0. When we make the next release we -replace all instances of 3.x.x.devN in the docs with the new version number. - -https://semver.org/ -https://www.python.org/dev/peps/pep-0440/ - -Deprecation ------------ - -Changes should be backwards compatible unless absolutely necessary. When making -API changes the approach is generally to add a deprecation warning but keeping -the existing API functional. Deprecated features can be removed in a release -that changes the major version number. - -Doing a Release ---------------- - -1. PyMongo is tested on Evergreen. Ensure the latest commit are passing CI - as expected: https://evergreen.mongodb.com/waterfall/mongo-python-driver. - To test locally, ``python3 setup.py test`` will build the C extensions and - test. ``python3 tools/clean.py`` will remove the extensions, - and then ``python3 setup.py --no_ext test`` will run the tests without - them. You can also run the doctests: ``python3 setup.py doc -t``. - -2. Check Jira to ensure all the tickets in this version have been completed. - -3. Add release notes to doc/changelog.rst. Generally just summarize/clarify - the git log, but you might add some more long form notes for big changes. - -4. Search and replace the "devN" version number w/ the new version number (see - note above in `Versioning`_). - -5. Make sure version number is updated in setup.py and pymongo/__init__.py - -6. Commit with a BUMP version_number message, eg ``git commit -m 'BUMP 3.11.0'``. - -7. Tag w/ version_number, eg, ``git tag -a '3.11.0' -m '3.11.0' ``. - -8. Push commit / tag, eg ``git push && git push --tags``. - -9. Pushing a tag will trigger a release process in Evergreen which builds - wheels for manylinux, macOS, and Windows. Wait for the "release-combine" - task to complete and then download the "Release files all" archive. See: - https://evergreen.mongodb.com/waterfall/mongo-python-driver?bv_filter=release - - The contents should look like this:: - - $ ls path/to/archive - pymongo--cp310-cp310-macosx_10_9_universal2.whl - ... - pymongo--cp38-cp38-manylinux2014_x86_64.whl - ... - pymongo--cp38-cp38-win_amd64.whl - ... - pymongo-.tar.gz - -10. Upload all the release packages to PyPI with twine:: - - $ python3 -m twine upload path/to/archive/* - -11. Make sure the new version appears on https://pymongo.readthedocs.io/. If the - new version does not show up automatically, trigger a rebuild of "latest": - https://readthedocs.org/projects/pymongo/builds/ - -12. Bump the version number to .dev0 in setup.py/__init__.py, - commit, push. - -13. Publish the release version in Jira. - -14. Announce the release on: - https://developer.mongodb.com/community/forums/c/community/release-notes/ - -15. File a ticket for DOCSP highlighting changes in server version and Python - version compatibility or the lack thereof, for example: - https://jira.mongodb.org/browse/DOCSP-13536 - -16. Create a GitHub Release for the tag using - https://github.com/mongodb/mongo-python-driver/releases/new. - The title should be "PyMongo X.Y.Z", and the description should contain - a link to the release notes on the the community forum, e.g. - "Release notes: mongodb.com/community/forums/t/pymongo-4-0-2-released/150457." diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES index 0b9fc738ed..ad00831a2a 100644 --- a/THIRD-PARTY-NOTICES +++ b/THIRD-PARTY-NOTICES @@ -39,35 +39,60 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -2) License Notice for bson-stdint-win32.h +2) License Notice for _asyncio_lock.py ----------------------------------------- -ISO C9x compliant stdint.h for Microsoft Visual Studio -Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 - - Copyright (c) 2006-2013 Alexander Chemeris - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the product nor the names of its contributors may - be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved" +are retained in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/_setup.py b/_setup.py new file mode 100644 index 0000000000..f99e9e7dc8 --- /dev/null +++ b/_setup.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import os +import sys +import warnings + +# Hack to silence atexit traceback in some Python versions +try: + import multiprocessing # noqa: F401 +except ImportError: + pass + +from setuptools import setup +from setuptools.command.build_ext import build_ext +from setuptools.extension import Extension + + +class custom_build_ext(build_ext): + """Allow C extension building to fail. + + The C extension speeds up BSON encoding, but is not essential. + """ + + warning_message = """ +******************************************************************** +WARNING: %s could not +be compiled. No C extensions are essential for PyMongo to run, +although they do result in significant speed improvements. +%s + +Please see the installation docs for solutions to build issues: + +https://pymongo.readthedocs.io/en/stable/installation.html + +Here are some hints for popular operating systems: + +If you are seeing this message on Linux you probably need to +install GCC and/or the Python development package for your +version of Python. + +Debian and Ubuntu users should issue the following command: + + $ sudo apt-get install build-essential python-dev + +Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, +Oracle Linux, Fedora, etc.) should issue the following command: + + $ sudo yum install gcc python-devel + +If you are seeing this message on Microsoft Windows please install +PyMongo using pip. Modern versions of pip will install PyMongo +from binary wheels available on pypi. If you must install from +source read the documentation here: + +https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows + +If you are seeing this message on macOS / OSX please install PyMongo +using pip. Modern versions of pip will install PyMongo from binary +wheels available on pypi. If wheels are not available for your version +of macOS / OSX, or you must install from source read the documentation +here: + +https://pymongo.readthedocs.io/en/stable/installation.html#osx +******************************************************************** +""" + + def run(self): + try: + build_ext.run(self) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "Extension modules", + "There was an issue with your platform configuration - see above.", + ), + stacklevel=2, + ) + + def build_extension(self, ext): + # "ProgramFiles(x86)" is not a valid environment variable in Cygwin but is needed for + # the MSVCCompiler in distutils. + if os.name == "nt": + if "ProgramFiles" in os.environ and "ProgramFiles(x86)" not in os.environ: + os.environ["ProgramFiles(x86)"] = os.environ["ProgramFiles"] + " (x86)" + name = ext.name + try: + build_ext.build_extension(self, ext) + except Exception: + if os.environ.get("PYMONGO_C_EXT_MUST_BUILD"): + raise + e = sys.exc_info()[1] + sys.stdout.write("%s\n" % str(e)) + warnings.warn( + self.warning_message + % ( + "The %s extension module" % (name,), # noqa: UP031 + "The output above this warning shows how the compilation failed.", + ), + stacklevel=2, + ) + + +ext_modules = [ + Extension( + "bson._cbson", + include_dirs=["bson"], + sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], + ), + Extension( + "pymongo._cmessage", + include_dirs=["bson"], + sources=[ + "pymongo/_cmessagemodule.c", + "bson/_cbsonmodule.c", + "bson/time64.c", + "bson/buffer.c", + ], + ), +] + + +if "--no_ext" in sys.argv or os.environ.get("NO_EXT"): + try: + sys.argv.remove("--no_ext") + except ValueError: + pass + ext_modules = [] +elif ( + sys.platform.startswith("java") + or sys.platform == "cli" + or sys.implementation.name in ("pypy", "graalpy") +): + sys.stdout.write( + """ +*****************************************************\n +The optional C extensions are currently not supported\n +by this python implementation.\n +*****************************************************\n +""" + ) + ext_modules = [] + +setup( + cmdclass={"build_ext": custom_build_ext}, + ext_modules=ext_modules, + packages=["bson", "pymongo", "gridfs"], +) # type:ignore diff --git a/bson/__init__.py b/bson/__init__.py index cc0850709e..d260fb876f 100644 --- a/bson/__init__.py +++ b/bson/__init__.py @@ -22,30 +22,46 @@ None null both bool boolean both int [#int]_ int32 / int64 py -> bson -`bson.int64.Int64` int64 both +:class:`bson.int64.Int64` int64 both float number (real) both str string both list array both -dict / `SON` object both -datetime.datetime [#dt]_ [#dt2]_ date both -`bson.regex.Regex` regex both +dict object both +:class:`~bson.son.SON` object both +:py:class:`~collections.abc.Mapping` object py -> bson +:class:`~bson.raw_bson.RawBSONDocument` object both [#raw]_ +datetime.datetime [#dt]_ [#dt2]_ UTC datetime both +:class:`~bson.datetime_ms.DatetimeMS` UTC datetime both [#dt3]_ +:class:`~bson.regex.Regex` regex both compiled re [#re]_ regex py -> bson -`bson.binary.Binary` binary both -`bson.objectid.ObjectId` oid both -`bson.dbref.DBRef` dbref both +:class:`~bson.binary.Binary` binary both +:py:class:`uuid.UUID` [#uuid]_ binary both +:class:`~bson.objectid.ObjectId` oid both +:class:`~bson.dbref.DBRef` dbref both +:class:`~bson.dbref.DBRef` dbpointer bson -> py None undefined bson -> py -`bson.code.Code` code both +:class:`~bson.code.Code` code both str symbol bson -> py bytes [#bytes]_ binary both +:class:`~bson.timestamp.Timestamp` timestamp both +:class:`~bson.decimal128.Decimal128` decimal128 both +:class:`~bson.min_key.MinKey` min key both +:class:`~bson.max_key.MaxKey` max key both ======================================= ============= =================== .. [#int] A Python int will be saved as a BSON int32 or BSON int64 depending on its size. A BSON int32 will always decode to a Python int. A BSON int64 will always decode to a :class:`~bson.int64.Int64`. -.. [#dt] datetime.datetime instances will be rounded to the nearest - millisecond when saved -.. [#dt2] all datetime.datetime instances are treated as *naive*. clients - should always use UTC. +.. [#raw] Decoding a bson object to :class:`~bson.raw_bson.RawBSONDocument` can be + optionally configured via :attr:`~bson.codec_options.CodecOptions.document_class`. +.. [#dt] datetime.datetime instances are encoded with millisecond precision so + the microsecond field is truncated. +.. [#dt2] all datetime.datetime instances are encoded as UTC. By default, they + are decoded as *naive* but timezone aware datetimes are also supported. + See `Dates and Times `_ for examples. +.. [#dt3] To enable decoding a bson UTC datetime to a :class:`~bson.datetime_ms.DatetimeMS` + instance see `handling out of range datetimes `_. +.. [#uuid] For :py:class:`uuid.UUID` encoding and decoding behavior see ``_. .. [#re] :class:`~bson.regex.Regex` instances and regular expression objects from ``re.compile()`` are both saved as BSON regular expressions. BSON regular expressions are decoded as :class:`~bson.regex.Regex` @@ -53,16 +69,17 @@ .. [#bytes] The bytes type is encoded as BSON binary with subtype 0. It will be decoded back to bytes. """ +from __future__ import annotations -import calendar import datetime import itertools +import os import re import struct import sys import uuid -from codecs import utf_8_decode as _utf_8_decode # type: ignore[attr-defined] -from codecs import utf_8_encode as _utf_8_encode # type: ignore[attr-defined] +from codecs import utf_8_decode as _utf_8_decode +from codecs import utf_8_encode as _utf_8_encode from collections import abc as _abc from typing import ( IO, @@ -70,10 +87,8 @@ Any, BinaryIO, Callable, - Dict, Generator, Iterator, - List, Mapping, MutableMapping, NoReturn, @@ -84,6 +99,7 @@ TypeVar, Union, cast, + overload, ) from bson.binary import ( @@ -100,9 +116,16 @@ from bson.codec_options import ( DEFAULT_CODEC_OPTIONS, CodecOptions, - _DocumentType, + DatetimeConversion, _raw_document_class, ) +from bson.datetime_ms import ( + EPOCH_AWARE, + EPOCH_NAIVE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.errors import InvalidBSON, InvalidDocument, InvalidStringData @@ -117,9 +140,8 @@ # Import some modules for type-checking only. if TYPE_CHECKING: - from array import array - from mmap import mmap - + from bson.raw_bson import RawBSONDocument + from bson.typings import _DocumentType, _ReadableBuffer try: from bson import _cbson # type: ignore[attr-defined] @@ -187,12 +209,10 @@ "is_valid", "BSON", "has_c", + "DatetimeConversion", + "DatetimeMS", ] -EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) -EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) - - BSONNUM = b"\x01" # Floating point BSONSTR = b"\x02" # UTF-8 string BSONOBJ = b"\x03" # Embedded document @@ -234,33 +254,33 @@ def get_data_and_view(data: Any) -> Tuple[Any, memoryview]: def _raise_unknown_type(element_type: int, element_name: str) -> NoReturn: """Unknown type helper.""" raise InvalidBSON( - "Detected unknown BSON type %r for fieldname '%s'. Are " - "you using the latest driver version?" % (chr(element_type).encode(), element_name) + "Detected unknown BSON type {!r} for fieldname '{}'. Are " + "you using the latest driver version?".format(chr(element_type).encode(), element_name) ) def _get_int( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[int, int]: """Decode a BSON int32 to python int.""" return _UNPACK_INT_FROM(data, position)[0], position + 4 -def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions) -> Tuple[str, int]: +def _get_c_string(data: Any, view: Any, position: int, opts: CodecOptions[Any]) -> Tuple[str, int]: """Decode a BSON 'C' string to python str.""" end = data.index(b"\x00", position) return _utf_8_decode(view[position:end], opts.unicode_decode_error_handler, True)[0], end + 1 def _get_float( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[float, int]: """Decode a BSON double to python float.""" return _UNPACK_FLOAT_FROM(data, position)[0], position + 8 def _get_string( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any ) -> Tuple[str, int]: """Decode a BSON string to python str.""" length = _UNPACK_INT_FROM(data, position)[0] @@ -278,12 +298,12 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: try: obj_size = _UNPACK_INT_FROM(data, position)[0] except struct.error as exc: - raise InvalidBSON(str(exc)) + raise InvalidBSON(str(exc)) from None end = position + obj_size - 1 - if data[end] != 0: - raise InvalidBSON("bad eoo") if end >= obj_end: raise InvalidBSON("invalid object length") + if data[end] != 0: + raise InvalidBSON("bad eoo") # If this is the top-level document, validate the total size too. if position == 0 and obj_size != obj_end: raise InvalidBSON("invalid object length") @@ -291,7 +311,7 @@ def _get_object_size(data: Any, position: int, obj_end: int) -> Tuple[int, int]: def _get_object( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy: Any + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy: Any ) -> Tuple[Any, int]: """Decode a BSON subdocument to opts.document_class or bson.dbref.DBRef.""" obj_size, end = _get_object_size(data, position, obj_end) @@ -312,7 +332,7 @@ def _get_object( def _get_array( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str ) -> Tuple[Any, int]: """Decode a BSON array to python list.""" size = _UNPACK_INT_FROM(data, position)[0] @@ -322,7 +342,7 @@ def _get_array( position += 4 end -= 1 - result: List[Any] = [] + result: list[Any] = [] # Avoid doing global and attribute lookups in the loop. append = result.append @@ -354,7 +374,7 @@ def _get_array( def _get_binary( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, dummy1: Any + data: Any, _view: Any, position: int, obj_end: int, opts: CodecOptions[Any], dummy1: Any ) -> Tuple[Union[Binary, uuid.UUID], int]: """Decode a BSON binary to bson.binary.Binary or python UUID.""" length, subtype = _UNPACK_LENGTH_SUBTYPE_FROM(data, position) @@ -391,7 +411,7 @@ def _get_binary( def _get_oid( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[ObjectId, int]: """Decode a BSON ObjectId to bson.objectid.ObjectId.""" end = position + 12 @@ -399,7 +419,7 @@ def _get_oid( def _get_boolean( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[bool, int]: """Decode a BSON true/false to python True/False.""" end = position + 1 @@ -412,14 +432,14 @@ def _get_boolean( def _get_date( - data: Any, view: Any, position: int, dummy0: int, opts: CodecOptions, dummy1: Any -) -> Tuple[datetime.datetime, int]: + data: Any, _view: Any, position: int, dummy0: int, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Union[datetime.datetime, DatetimeMS], int]: """Decode a BSON datetime to python datetime.datetime.""" return _millis_to_datetime(_UNPACK_LONG_FROM(data, position)[0], opts), position + 8 def _get_code( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str ) -> Tuple[Code, int]: """Decode a BSON code to bson.code.Code.""" code, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -427,7 +447,7 @@ def _get_code( def _get_code_w_scope( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str + data: Any, view: Any, position: int, _obj_end: int, opts: CodecOptions[Any], element_name: str ) -> Tuple[Code, int]: """Decode a BSON code_w_scope to bson.code.Code.""" code_end = position + _UNPACK_INT_FROM(data, position)[0] @@ -439,8 +459,8 @@ def _get_code_w_scope( def _get_regex( - data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions, dummy1: Any -) -> Tuple[Regex, int]: + data: Any, view: Any, position: int, dummy0: Any, opts: CodecOptions[Any], dummy1: Any +) -> Tuple[Regex[Any], int]: """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, view, position, opts) bson_flags, position = _get_c_string(data, view, position, opts) @@ -449,7 +469,7 @@ def _get_regex( def _get_ref( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, element_name: str + data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions[Any], element_name: str ) -> Tuple[DBRef, int]: """Decode (deprecated) BSON DBPointer to bson.dbref.DBRef.""" collection, position = _get_string(data, view, position, obj_end, opts, element_name) @@ -458,7 +478,7 @@ def _get_ref( def _get_timestamp( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[Timestamp, int]: """Decode a BSON timestamp to bson.timestamp.Timestamp.""" inc, timestamp = _UNPACK_TIMESTAMP_FROM(data, position) @@ -466,14 +486,14 @@ def _get_timestamp( def _get_int64( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[Int64, int]: """Decode a BSON int64 to bson.int64.Int64.""" return Int64(_UNPACK_LONG_FROM(data, position)[0]), position + 8 def _get_decimal128( - data: Any, view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any + data: Any, _view: Any, position: int, dummy0: Any, dummy1: Any, dummy2: Any ) -> Tuple[Decimal128, int]: """Decode a BSON decimal128 to bson.decimal128.Decimal128.""" end = position + 16 @@ -486,17 +506,17 @@ def _get_decimal128( # - position: int, beginning of object in 'data' to decode # - obj_end: int, end of object to decode in 'data' if variable-length type # - opts: a CodecOptions -_ELEMENT_GETTER: Dict[int, Callable[..., Tuple[Any, int]]] = { +_ELEMENT_GETTER: dict[int, Callable[..., Tuple[Any, int]]] = { ord(BSONNUM): _get_float, ord(BSONSTR): _get_string, ord(BSONOBJ): _get_object, ord(BSONARR): _get_array, ord(BSONBIN): _get_binary, - ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # Deprecated undefined + ord(BSONUND): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 # Deprecated undefined ord(BSONOID): _get_oid, ord(BSONBOO): _get_boolean, ord(BSONDAT): _get_date, - ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), + ord(BSONNUL): lambda u, v, w, x, y, z: (None, w), # noqa: ARG005 ord(BSONRGX): _get_regex, ord(BSONREF): _get_ref, # Deprecated DBPointer ord(BSONCOD): _get_code, @@ -506,27 +526,43 @@ def _get_decimal128( ord(BSONTIM): _get_timestamp, ord(BSONLON): _get_int64, ord(BSONDEC): _get_decimal128, - ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), - ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), + ord(BSONMIN): lambda u, v, w, x, y, z: (MinKey(), w), # noqa: ARG005 + ord(BSONMAX): lambda u, v, w, x, y, z: (MaxKey(), w), # noqa: ARG005 } if _USE_C: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions - ) -> Any: - return _cbson._element_to_dict(data, position, obj_end, opts) + data: Any, + view: Any, # noqa: ARG001 + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: + return cast( + "Tuple[str, Any, int]", + _cbson._element_to_dict(data, position, obj_end, opts, raw_array), + ) else: def _element_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions - ) -> Any: + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + raw_array: bool = False, + ) -> Tuple[str, Any, int]: """Decode a single key, value pair.""" element_type = data[position] position += 1 element_name, position = _get_c_string(data, view, position, opts) + if raw_array and element_type == ord(BSONARR): + _, end = _get_object_size(data, position, len(data)) + return element_name, view[position : end + 1], end + 1 try: value, position = _ELEMENT_GETTER[element_type]( data, view, position, obj_end, opts, element_name @@ -542,47 +578,64 @@ def _element_to_dict( return element_name, value, position -_T = TypeVar("_T", bound=MutableMapping[Any, Any]) +_T = TypeVar("_T", bound=MutableMapping[str, Any]) -def _raw_to_dict(data: Any, position: int, obj_end: int, opts: CodecOptions, result: _T) -> _T: +def _raw_to_dict( + data: Any, + position: int, + obj_end: int, + opts: CodecOptions[RawBSONDocument], + result: _T, + raw_array: bool = False, +) -> _T: data, view = get_data_and_view(data) - return _elements_to_dict(data, view, position, obj_end, opts, result) + return cast( + _T, _elements_to_dict(data, view, position, obj_end, opts, result, raw_array=raw_array) + ) def _elements_to_dict( - data: Any, view: Any, position: int, obj_end: int, opts: CodecOptions, result: Any = None + data: Any, + view: Any, + position: int, + obj_end: int, + opts: CodecOptions[Any], + result: Any = None, + raw_array: bool = False, ) -> Any: """Decode a BSON document into result.""" if result is None: result = opts.document_class() end = obj_end - 1 while position < end: - key, value, position = _element_to_dict(data, view, position, obj_end, opts) + key, value, position = _element_to_dict( + data, view, position, obj_end, opts, raw_array=raw_array + ) result[key] = value if position != obj_end: raise InvalidBSON("bad object or element length") return result -def _bson_to_dict(data: Any, opts: CodecOptions) -> Any: +def _bson_to_dict(data: Any, opts: CodecOptions[_DocumentType]) -> _DocumentType: """Decode a BSON string to document_class.""" data, view = get_data_and_view(data) try: if _raw_document_class(opts.document_class): - return opts.document_class(data, opts) + return opts.document_class(data, opts) # type:ignore[call-arg] _, end = _get_object_size(data, 0, len(data)) - return _elements_to_dict(data, view, 4, end, opts) + return cast("_DocumentType", _elements_to_dict(data, view, 4, end, opts)) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() - raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) from None if _USE_C: - _bson_to_dict = _cbson._bson_to_dict # noqa: F811 + _bson_to_dict = _cbson._bson_to_dict _PACK_FLOAT = struct.Struct(" Generator[bytes, None, None]: The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly. """ - for name in _LIST_NAMES: - yield name + yield from _LIST_NAMES counter = itertools.count(1000) while True: @@ -617,11 +669,13 @@ def _make_c_string_check(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None else: if "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_c_string(string: Union[str, bytes]) -> bytes: @@ -631,17 +685,18 @@ def _make_c_string(string: Union[str, bytes]) -> bytes: _utf_8_decode(string, None, True) return string + b"\x00" except UnicodeError: - raise InvalidStringData("strings in documents must be valid UTF-8: %r" % string) + raise InvalidStringData( + "strings in documents must be valid UTF-8: %r" % string + ) from None else: - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + return _utf_8_encode(string)[0] + b"\x00" def _make_name(string: str) -> bytes: """Make a 'C' string suitable for a BSON key.""" - # Keys can only be text in python 3. if "\x00" in string: - raise InvalidDocument("BSON keys / regex patterns must not contain a NUL character") - return cast(bytes, _utf_8_encode(string)[0]) + b"\x00" + raise InvalidDocument("BSON keys must not contain a NUL character") + return _utf_8_encode(string)[0] + b"\x00" def _encode_float(name: bytes, value: float, dummy0: Any, dummy1: Any) -> bytes: @@ -655,15 +710,15 @@ def _encode_bytes(name: bytes, value: bytes, dummy0: Any, dummy1: Any) -> bytes: return b"\x05" + name + _PACK_INT(len(value)) + b"\x00" + value -def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: +def _encode_mapping(name: bytes, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode a mapping type.""" if _raw_document_class(value): - return b"\x03" + name + value.raw + return b"\x03" + name + cast(bytes, value.raw) data = b"".join([_element_to_bson(key, val, check_keys, opts) for key, val in value.items()]) return b"\x03" + name + _PACK_INT(len(data) + 5) + data + b"\x00" -def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions) -> bytes: +def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode bson.dbref.DBRef.""" buf = bytearray(b"\x03" + name + b"\x00\x00\x00\x00") begin = len(buf) - 4 @@ -680,7 +735,9 @@ def _encode_dbref(name: bytes, value: DBRef, check_keys: bool, opts: CodecOption return bytes(buf) -def _encode_list(name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions) -> bytes: +def _encode_list( + name: bytes, value: Sequence[Any], check_keys: bool, opts: CodecOptions[Any] +) -> bytes: """Encode a list/tuple.""" lname = gen_list_name() data = b"".join([_name_value_to_bson(next(lname), item, check_keys, opts) for item in value]) @@ -701,7 +758,7 @@ def _encode_binary(name: bytes, value: Binary, dummy0: Any, dummy1: Any) -> byte return b"\x05" + name + _PACK_LENGTH_SUBTYPE(len(value), subtype) + value -def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions) -> bytes: +def _encode_uuid(name: bytes, value: uuid.UUID, dummy: Any, opts: CodecOptions[Any]) -> bytes: """Encode uuid.UUID.""" uuid_representation = opts.uuid_representation binval = Binary.from_uuid(value, uuid_representation=uuid_representation) @@ -724,12 +781,18 @@ def _encode_datetime(name: bytes, value: datetime.datetime, dummy0: Any, dummy1: return b"\x09" + name + _PACK_LONG(millis) +def _encode_datetime_ms(name: bytes, value: DatetimeMS, dummy0: Any, dummy1: Any) -> bytes: + """Encode datetime.datetime.""" + millis = int(value) + return b"\x09" + name + _PACK_LONG(millis) + + def _encode_none(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: """Encode python None.""" return b"\x0A" + name -def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: +def _encode_regex(name: bytes, value: Regex[Any], dummy0: Any, dummy1: Any) -> bytes: """Encode a python regex or bson.regex.Regex.""" flags = value.flags # Python 3 common case @@ -755,7 +818,7 @@ def _encode_regex(name: bytes, value: Regex, dummy0: Any, dummy1: Any) -> bytes: return b"\x0B" + name + _make_c_string_check(value.pattern) + sflags -def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions) -> bytes: +def _encode_code(name: bytes, value: Code, dummy: Any, opts: CodecOptions[Any]) -> bytes: """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) @@ -774,7 +837,7 @@ def _encode_int(name: bytes, value: int, dummy0: Any, dummy1: Any) -> bytes: try: return b"\x12" + name + _PACK_LONG(value) except struct.error: - raise OverflowError("BSON can only handle up to 8-byte ints") + raise OverflowError("BSON can only handle up to 8-byte ints") from None def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: @@ -783,11 +846,11 @@ def _encode_timestamp(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> byte def _encode_long(name: bytes, value: Any, dummy0: Any, dummy1: Any) -> bytes: - """Encode a python long (python 2.x)""" + """Encode a bson.int64.Int64.""" try: return b"\x12" + name + _PACK_LONG(value) except struct.error: - raise OverflowError("BSON can only handle up to 8-byte ints") + raise OverflowError("BSON can only handle up to 8-byte ints") from None def _encode_decimal128(name: bytes, value: Decimal128, dummy0: Any, dummy1: Any) -> bytes: @@ -814,6 +877,7 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: bool: _encode_bool, bytes: _encode_bytes, datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetime_ms, dict: _encode_mapping, float: _encode_float, int: _encode_int, @@ -838,18 +902,11 @@ def _encode_maxkey(name: bytes, dummy0: Any, dummy1: Any, dummy2: Any) -> bytes: _abc.Mapping: _encode_mapping, } - -_MARKERS = { - 5: _encode_binary, - 7: _encode_objectid, - 11: _encode_regex, - 13: _encode_code, - 17: _encode_timestamp, - 18: _encode_long, - 100: _encode_dbref, - 127: _encode_maxkey, - 255: _encode_minkey, -} +# Map each _type_marker to its encoder for faster lookup. +_MARKERS = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] _BUILT_IN_TYPES = tuple(t for t in _ENCODERS) @@ -859,17 +916,26 @@ def _name_value_to_bson( name: bytes, value: Any, check_keys: bool, - opts: CodecOptions, + opts: CodecOptions[Any], in_custom_call: bool = False, in_fallback_call: bool = False, ) -> bytes: """Encode a single name, value pair.""" + + was_integer_overflow = False + # First see if the type is already cached. KeyError will only ever # happen once per subtype. try: return _ENCODERS[type(value)](name, value, check_keys, opts) # type: ignore except KeyError: pass + except OverflowError: + if not isinstance(value, int): + raise + + # Give the fallback_encoder a chance + was_integer_overflow = True # Second, fall back to trying _type_marker. This has to be done # before the loop below since users could subclass one of our @@ -895,7 +961,7 @@ def _name_value_to_bson( # is done after trying the custom type encoder because checking for each # subtype is expensive. for base in _BUILT_IN_TYPES: - if isinstance(value, base): + if not was_integer_overflow and isinstance(value, base): func = _ENCODERS[base] # Cache this type for faster subsequent lookup. _ENCODERS[type(value)] = func @@ -909,24 +975,28 @@ def _name_value_to_bson( name, fallback_encoder(value), check_keys, opts, in_fallback_call=True ) - raise InvalidDocument("cannot encode object: %r, of type: %r" % (value, type(value))) + if was_integer_overflow: + raise OverflowError("BSON can only handle up to 8-byte ints") + raise InvalidDocument(f"cannot encode object: {value!r}, of type: {type(value)!r}") -def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions) -> bytes: +def _element_to_bson(key: Any, value: Any, check_keys: bool, opts: CodecOptions[Any]) -> bytes: """Encode a single key, value pair.""" if not isinstance(key, str): - raise InvalidDocument("documents must have only string keys, key was %r" % (key,)) + raise InvalidDocument(f"documents must have only string keys, key was {key!r}") if check_keys: if key.startswith("$"): - raise InvalidDocument("key %r must not start with '$'" % (key,)) + raise InvalidDocument(f"key {key!r} must not start with '$'") if "." in key: - raise InvalidDocument("key %r must not contain '.'" % (key,)) + raise InvalidDocument(f"key {key!r} must not contain '.'") name = _make_name(key) return _name_value_to_bson(name, value, check_keys, opts) -def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: bool = True) -> bytes: +def _dict_to_bson( + doc: Any, check_keys: bool, opts: CodecOptions[Any], top_level: bool = True +) -> bytes: """Encode a document to BSON.""" if _raw_document_class(doc): return cast(bytes, doc.raw) @@ -936,67 +1006,43 @@ def _dict_to_bson(doc: Any, check_keys: bool, opts: CodecOptions, top_level: boo elements.append(_name_value_to_bson(b"_id\x00", doc["_id"], check_keys, opts)) for key, value in doc.items(): if not top_level or key != "_id": - elements.append(_element_to_bson(key, value, check_keys, opts)) + try: + elements.append(_element_to_bson(key, value, check_keys, opts)) + except InvalidDocument as err: + raise InvalidDocument(f"Invalid document: {err}", doc) from err except AttributeError: - raise TypeError("encoder expected a mapping type but got: %r" % (doc,)) + raise TypeError(f"encoder expected a mapping type but got: {doc!r}") from None encoded = b"".join(elements) return _PACK_INT(len(encoded) + 5) + encoded + b"\x00" if _USE_C: - _dict_to_bson = _cbson._dict_to_bson # noqa: F811 - - -def _millis_to_datetime(millis: int, opts: CodecOptions) -> datetime.datetime: - """Convert milliseconds since epoch UTC to datetime.""" - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) // 1000 - micros = diff * 1000 - if opts.tz_aware: - dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) - if opts.tzinfo: - dt = dt.astimezone(opts.tzinfo) - return dt - else: - return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) - - -def _datetime_to_millis(dtm: datetime.datetime) -> int: - """Convert datetime to milliseconds since epoch UTC.""" - if dtm.utcoffset() is not None: - dtm = dtm - dtm.utcoffset() # type: ignore - return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) + _dict_to_bson = _cbson._dict_to_bson _CODEC_OPTIONS_TYPE_ERROR = TypeError("codec_options must be an instance of CodecOptions") -_DocumentIn = Mapping[str, Any] -_ReadableBuffer = Union[bytes, memoryview, "mmap", "array"] - - def encode( - document: _DocumentIn, + document: Mapping[str, Any], check_keys: bool = False, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, + codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS, ) -> bytes: """Encode a document to BSON. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, - or contains keys that are not instances of - :class:`basestring` (:class:`str` in python 3). Raises + or contains keys that are not instances of :class:`str`. Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. - :Parameters: - - `document`: mapping type representing a document - - `check_keys` (optional): check if keys start with '$' or + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionadded:: 3.9 @@ -1007,9 +1053,19 @@ def encode( return _dict_to_bson(document, check_keys, codec_options) +@overload +def decode(data: _ReadableBuffer, codec_options: None = None) -> dict[str, Any]: + ... + + +@overload +def decode(data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType]) -> _DocumentType: + ... + + def decode( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> _DocumentType: + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[dict[str, Any], _DocumentType]: """Decode BSON to a document. By default, returns a BSON document represented as a Python @@ -1027,26 +1083,25 @@ def decode( >>> type(decoded_doc) - :Parameters: - - `data`: the BSON to decode. Any bytes-like object that implements + :param data: the BSON to decode. Any bytes-like object that implements the buffer protocol. - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionadded:: 3.9 """ - opts: CodecOptions = codec_options or DEFAULT_CODEC_OPTIONS + opts: CodecOptions[Any] = codec_options or DEFAULT_CODEC_OPTIONS if not isinstance(opts, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _bson_to_dict(data, opts) + return cast("Union[dict[str, Any], _DocumentType]", _bson_to_dict(data, opts)) -def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> List[_DocumentType]: +def _decode_all(data: _ReadableBuffer, opts: CodecOptions[_DocumentType]) -> list[_DocumentType]: """Decode a BSON data to multiple documents.""" data, view = get_data_and_view(data) data_len = len(data) - docs: List[_DocumentType] = [] + docs: list[_DocumentType] = [] position = 0 end = data_len - 1 use_raw = _raw_document_class(opts.document_class) @@ -1069,24 +1124,35 @@ def _decode_all(data: _ReadableBuffer, opts: "CodecOptions[_DocumentType]") -> L except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() - raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) + raise InvalidBSON(str(exc_value)).with_traceback(exc_tb) from None if _USE_C: - _decode_all = _cbson._decode_all # noqa: F811 + _decode_all = _cbson._decode_all + + +@overload +def decode_all(data: _ReadableBuffer, codec_options: None = None) -> list[dict[str, Any]]: + ... +@overload def decode_all( - data: _ReadableBuffer, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> List[_DocumentType]: + data: _ReadableBuffer, codec_options: CodecOptions[_DocumentType] +) -> list[_DocumentType]: + ... + + +def decode_all( + data: _ReadableBuffer, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[list[dict[str, Any]], list[_DocumentType]]: """Decode BSON data to multiple documents. `data` must be a bytes-like object implementing the buffer protocol that provides concatenated, valid, BSON-encoded documents. - :Parameters: - - `data`: BSON data - - `codec_options` (optional): An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.9 @@ -1101,62 +1167,99 @@ def decode_all( Replaced `as_class`, `tz_aware`, and `uuid_subtype` options with `codec_options`. """ - opts = codec_options or DEFAULT_CODEC_OPTIONS - if not isinstance(opts, CodecOptions): + if codec_options is None: + return _decode_all(data, DEFAULT_CODEC_OPTIONS) + + if not isinstance(codec_options, CodecOptions): raise _CODEC_OPTIONS_TYPE_ERROR - return _decode_all(data, opts) # type: ignore[arg-type] + return _decode_all(data, codec_options) -def _decode_selective(rawdoc: Any, fields: Any, codec_options: Any) -> Mapping[Any, Any]: +def _decode_selective( + rawdoc: Any, fields: Any, codec_options: CodecOptions[_DocumentType] +) -> _DocumentType: if _raw_document_class(codec_options.document_class): # If document_class is RawBSONDocument, use vanilla dictionary for # decoding command response. - doc = {} + doc: _DocumentType = {} # type:ignore[assignment] else: # Else, use the specified document_class. doc = codec_options.document_class() for key, value in rawdoc.items(): if key in fields: if fields[key] == 1: - doc[key] = _bson_to_dict(rawdoc.raw, codec_options)[key] + doc[key] = _bson_to_dict(rawdoc.raw, codec_options)[key] # type:ignore[index] else: - doc[key] = _decode_selective(value, fields[key], codec_options) + doc[key] = _decode_selective( # type:ignore[index] + value, fields[key], codec_options + ) else: - doc[key] = value + doc[key] = value # type:ignore[index] return doc +def _array_of_documents_to_buffer(data: Union[memoryview, bytes]) -> bytes: + # Extract the raw bytes of each document. + position = 0 + view = memoryview(data) + _, end = _get_object_size(view, position, len(view)) + position += 4 + buffers: list[memoryview] = [] + append = buffers.append + while position < end - 1: + # Just skip the keys. + while view[position] != 0: + position += 1 + position += 1 + obj_size, _ = _get_object_size(view, position, end) + append(view[position : position + obj_size]) + position += obj_size + if position != end: + raise InvalidBSON("bad object or element length") + return b"".join(buffers) + + +if _USE_C: + _array_of_documents_to_buffer = _cbson._array_of_documents_to_buffer + + def _convert_raw_document_lists_to_streams(document: Any) -> None: + """Convert raw array of documents to a stream of BSON documents.""" cursor = document.get("cursor") - if cursor: - for key in ("firstBatch", "nextBatch"): - batch = cursor.get(key) - if batch: - stream = b"".join(doc.raw for doc in batch) - cursor[key] = [stream] + if not cursor: + return + for key in ("firstBatch", "nextBatch"): + batch = cursor.get(key) + if not batch: + continue + data = _array_of_documents_to_buffer(batch) + if data: + cursor[key] = [data] + else: + cursor[key] = [] -def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) -> List[Any]: +def _decode_all_selective( + data: Any, codec_options: CodecOptions[_DocumentType], fields: Any +) -> list[_DocumentType]: """Decode BSON data to a single document while using user-provided custom decoding logic. `data` must be a string representing a valid, BSON-encoded document. - :Parameters: - - `data`: BSON data - - `codec_options`: An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` with user-specified type decoders. If no decoders are found, this method is the same as ``decode_all``. - - `fields`: Map of document namespaces where data that needs + :param fields: Map of document namespaces where data that needs to be custom decoded lives or None. For example, to custom decode a list of objects in 'field1.subfield1', the specified value should be ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or None, this method is the same as ``decode_all``. - :Returns: - - `document_list`: Single-member list containing the decoded document. + :return: Single-member list containing the decoded document. .. versionadded:: 3.8 """ @@ -1169,7 +1272,7 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - # Decode documents for internal use. from bson.raw_bson import RawBSONDocument - internal_codec_options = codec_options.with_options( + internal_codec_options: CodecOptions[RawBSONDocument] = codec_options.with_options( document_class=RawBSONDocument, type_registry=None ) _doc = _bson_to_dict(data, internal_codec_options) @@ -1182,9 +1285,19 @@ def _decode_all_selective(data: Any, codec_options: CodecOptions, fields: Any) - ] +@overload +def decode_iter(data: bytes, codec_options: None = None) -> Iterator[dict[str, Any]]: + ... + + +@overload +def decode_iter(data: bytes, codec_options: CodecOptions[_DocumentType]) -> Iterator[_DocumentType]: + ... + + def decode_iter( - data: bytes, codec_options: "Optional[CodecOptions[_DocumentType]]" = None -) -> Iterator[_DocumentType]: + data: bytes, codec_options: Optional[CodecOptions[_DocumentType]] = None +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: """Decode BSON data to multiple documents as a generator. Works similarly to the decode_all function, but yields one document at a @@ -1193,9 +1306,8 @@ def decode_iter( `data` must be a string of concatenated, valid, BSON-encoded documents. - :Parameters: - - `data`: BSON data - - `codec_options` (optional): An instance of + :param data: BSON data + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1218,17 +1330,31 @@ def decode_iter( yield _bson_to_dict(elements, opts) +@overload +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], codec_options: None = None +) -> Iterator[dict[str, Any]]: + ... + + +@overload def decode_file_iter( - file_obj: Union[BinaryIO, IO], codec_options: "Optional[CodecOptions[_DocumentType]]" = None + file_obj: Union[BinaryIO, IO[bytes]], codec_options: CodecOptions[_DocumentType] ) -> Iterator[_DocumentType]: + ... + + +def decode_file_iter( + file_obj: Union[BinaryIO, IO[bytes]], + codec_options: Optional[CodecOptions[_DocumentType]] = None, +) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: """Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object in chunks and parses bson in chunks, yielding one document at a time. - :Parameters: - - `file_obj`: A file object containing BSON data. - - `codec_options` (optional): An instance of + :param file_obj: A file object containing BSON data. + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1240,28 +1366,27 @@ def decode_file_iter( opts = codec_options or DEFAULT_CODEC_OPTIONS while True: # Read size of next object. - size_data = file_obj.read(4) + size_data: Any = file_obj.read(4) if not size_data: - break # Finished with file normaly. + break # Finished with file normally. elif len(size_data) != 4: raise InvalidBSON("cut off in middle of objsize") obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) - yield _bson_to_dict(elements, opts) + yield _bson_to_dict(elements, opts) # type:ignore[misc] def is_valid(bson: bytes) -> bool: """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of - :class:`str` (:class:`bytes` in python 3). Returns ``True`` + :class:`bytes`. Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. - :Parameters: - - `bson`: the data to be validated + :param bson: the data to be validated """ if not isinstance(bson, bytes): - raise TypeError("BSON data must be an instance of a subclass of bytes") + raise TypeError(f"BSON data must be an instance of a subclass of bytes, not {type(bson)}") try: _bson_to_dict(bson, DEFAULT_CODEC_OPTIONS) @@ -1280,27 +1405,25 @@ class BSON(bytes): @classmethod def encode( - cls: Type["BSON"], - document: _DocumentIn, + cls: Type[BSON], + document: Mapping[str, Any], check_keys: bool = False, - codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS, - ) -> "BSON": + codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS, + ) -> BSON: """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~bson.errors.InvalidDocument` if `document` cannot be - converted to :class:`BSON`. + :class:`str'. Raises :class:`~bson.errors.InvalidDocument` + if `document` cannot be converted to :class:`BSON`. - :Parameters: - - `document`: mapping type representing a document - - `check_keys` (optional): check if keys start with '$' or + :param document: mapping type representing a document + :param check_keys: check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1308,7 +1431,9 @@ def encode( """ return cls(encode(document, check_keys, codec_options)) - def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _DocumentType: # type: ignore[override] + def decode( # type:ignore[override] + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> dict[str, Any]: """Decode this BSON data. By default, returns a BSON document represented as a Python @@ -1326,8 +1451,7 @@ def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _Docume >>> type(decoded_doc) - :Parameters: - - `codec_options` (optional): An instance of + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions`. .. versionchanged:: 3.0 @@ -1345,3 +1469,16 @@ def decode(self, codec_options: CodecOptions = DEFAULT_CODEC_OPTIONS) -> _Docume def has_c() -> bool: """Is the C extension installed?""" return _USE_C + + +def _after_fork() -> None: + """Releases the ObjectID lock child.""" + if ObjectId._inc_lock.locked(): + ObjectId._inc_lock.release() + + +if hasattr(os, "register_at_fork"): + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork) diff --git a/bson/_cbsonmodule.c b/bson/_cbsonmodule.c index da6a5cbda7..7d184641c5 100644 --- a/bson/_cbsonmodule.c +++ b/bson/_cbsonmodule.c @@ -52,6 +52,38 @@ struct module_state { PyObject* BSONInt64; PyObject* Decimal128; PyObject* Mapping; + PyObject* DatetimeMS; + PyObject* min_datetime; + PyObject* max_datetime; + PyObject* replace_args; + PyObject* replace_kwargs; + PyObject* _type_marker_str; + PyObject* _flags_str; + PyObject* _pattern_str; + PyObject* _encoder_map_str; + PyObject* _decoder_map_str; + PyObject* _fallback_encoder_str; + PyObject* _raw_str; + PyObject* _subtype_str; + PyObject* _binary_str; + PyObject* _scope_str; + PyObject* _inc_str; + PyObject* _time_str; + PyObject* _bid_str; + PyObject* _replace_str; + PyObject* _astimezone_str; + PyObject* _id_str; + PyObject* _dollar_ref_str; + PyObject* _dollar_id_str; + PyObject* _dollar_db_str; + PyObject* _tzinfo_str; + PyObject* _as_doc_str; + PyObject* _utcoffset_str; + PyObject* _from_uuid_str; + PyObject* _as_uuid_str; + PyObject* _from_bid_str; + int64_t min_millis; + int64_t max_millis; }; #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) @@ -72,11 +104,110 @@ struct module_state { /* The smallest possible BSON document, i.e. "{}" */ #define BSON_MIN_SIZE 5 +/* Datetime codec options */ +#define DATETIME 1 +#define DATETIME_CLAMP 2 +#define DATETIME_MS 3 +#define DATETIME_AUTO 4 + +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long num, char* str, size_t size) { + // Buffer should fit 64-bit signed integer + if (size < 21) { + PyErr_Format( + PyExc_RuntimeError, + "Buffer too small to hold long long: %d < 21", size); + return -1; + } + int index = 0; + int sign = 1; + // Convert to unsigned to handle -LLONG_MIN overflow + unsigned long long absNum; + // Handle the case of 0 + if (num == 0) { + str[index++] = '0'; + str[index] = '\0'; + return 0; + } + // Handle negative numbers + if (num < 0) { + sign = -1; + absNum = 0ULL - (unsigned long long)num; + } else { + absNum = (unsigned long long)num; + } + // Convert the number to string + unsigned long long digit; + while (absNum > 0) { + digit = absNum % 10ULL; + str[index++] = (char)digit + '0'; // Convert digit to character + absNum /= 10; + } + // Add minus sign if negative + if (sign == -1) { + str[index++] = '-'; + } + str[index] = '\0'; // Null terminator + // Reverse the string + int start = 0; + int end = index - 1; + while (start < end) { + char temp = str[start]; + str[start++] = str[end]; + str[end--] = temp; + } + return 0; +} + +static PyObject* _test_long_long_to_str(PyObject* self, PyObject* args) { + // Test extreme values + Py_ssize_t maxNum = PY_SSIZE_T_MAX; + Py_ssize_t minNum = PY_SSIZE_T_MIN; + Py_ssize_t num; + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + int res = LL2STR(str_1, (long long)minNum); + if (res == -1) { + return NULL; + } + INT2STRING(str_2, (long long)minNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + LL2STR(str_1, (long long)maxNum); + INT2STRING(str_2, (long long)maxNum); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + + // Test common values + for (num = 0; num < 10000; num++) { + char str_1[BUF_SIZE]; + char str_2[BUF_SIZE]; + LL2STR(str_1, (long long)num); + INT2STRING(str_2, (long long)num); + if (strcmp(str_1, str_2) != 0) { + PyErr_Format( + PyExc_RuntimeError, + "LL2STR != INT2STRING: %s != %s", str_1, str_2); + return NULL; + } + } + + return args; +} + /* Get an error class from the bson.errors module. * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("bson.errors"); if (!errors) { return NULL; @@ -116,7 +247,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw); +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw); /* Date stuff */ static PyObject* datetime_from_millis(long long millis) { @@ -126,7 +257,7 @@ static PyObject* datetime_from_millis(long long millis) { * 2. Multiply that by 1000: 253402300799000 * 3. Add in microseconds divided by 1000 253402300799999 * - * (Note: BSON doesn't support microsecond accuracy, hence the rounding.) + * (Note: BSON doesn't support microsecond accuracy, hence the truncation.) * * To decode we could do: * 1. Get seconds: timestamp / 1000: 253402300799 @@ -148,19 +279,51 @@ static PyObject* datetime_from_millis(long long millis) { * micros = diff * 1000 111000 * Resulting in datetime(1, 1, 1, 1, 1, 1, 111000) -- the expected result */ + PyObject* datetime = NULL; int diff = (int)(((millis % 1000) + 1000) % 1000); int microseconds = diff * 1000; Time64_T seconds = (millis - diff) / 1000; struct TM timeinfo; cbson_gmtime64_r(&seconds, &timeinfo); - return PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, - timeinfo.tm_mon + 1, - timeinfo.tm_mday, - timeinfo.tm_hour, - timeinfo.tm_min, - timeinfo.tm_sec, - microseconds); + datetime = PyDateTime_FromDateAndTime(timeinfo.tm_year + 1900, + timeinfo.tm_mon + 1, + timeinfo.tm_mday, + timeinfo.tm_hour, + timeinfo.tm_min, + timeinfo.tm_sec, + microseconds); + if(!datetime) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + + /* + * Calling _error clears the error state, so fetch it first. + */ + PyErr_Fetch(&etype, &evalue, &etrace); + + /* Only add addition error message on ValueError exceptions. */ + if (PyErr_GivenExceptionMatches(etype, PyExc_ValueError)) { + if (evalue) { + PyObject* err_msg = PyObject_Str(evalue); + if (err_msg) { + PyObject* appendage = PyUnicode_FromString(" (Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO) or MongoClient(datetime_conversion='DATETIME_AUTO')). See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes"); + if (appendage) { + PyObject* msg = PyUnicode_Concat(err_msg, appendage); + if (msg) { + Py_DECREF(evalue); + evalue = msg; + } + } + Py_XDECREF(appendage); + } + Py_XDECREF(err_msg); + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } + /* Steals references to args. */ + PyErr_Restore(etype, evalue, etrace); + } + return datetime; } static long long millis_from_datetime(PyObject* datetime) { @@ -179,6 +342,155 @@ static long long millis_from_datetime(PyObject* datetime) { return millis; } +/* Extended-range datetime, returns a DatetimeMS object with millis */ +static PyObject* datetime_ms_from_millis(PyObject* self, long long millis){ + // Allocate a new DatetimeMS object. + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } + + PyObject* dt = NULL; + PyObject* ll_millis = NULL; + + if (!(ll_millis = PyLong_FromLongLong(millis))){ + return NULL; + } + dt = PyObject_CallFunctionObjArgs(state->DatetimeMS, ll_millis, NULL); + Py_DECREF(ll_millis); + return dt; +} + +/* Extended-range datetime, takes a DatetimeMS object and extracts the long long value. */ +static int millis_from_datetime_ms(PyObject* dt, long long* out){ + PyObject* ll_millis; + long long millis; + + if (!(ll_millis = PyNumber_Long(dt))){ + return 0; + } + millis = PyLong_AsLongLong(ll_millis); + Py_DECREF(ll_millis); + if (millis == -1 && PyErr_Occurred()) { /* Overflow */ + PyErr_SetString(PyExc_OverflowError, + "MongoDB datetimes can only handle up to 8-byte ints"); + return 0; + } + *out = millis; + return 1; +} + +static PyObject* decode_datetime(PyObject* self, long long millis, const codec_options_t* options){ + PyObject* naive = NULL; + PyObject* replace = NULL; + PyObject* value = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + goto invalid; + } + if (options->datetime_conversion == DATETIME_MS){ + return datetime_ms_from_millis(self, millis); + } + + int dt_clamp = options->datetime_conversion == DATETIME_CLAMP; + int dt_auto = options->datetime_conversion == DATETIME_AUTO; + + if (dt_clamp || dt_auto){ + int64_t min_millis = state->min_millis; + int64_t max_millis = state->max_millis; + int64_t min_millis_offset = 0; + int64_t max_millis_offset = 0; + if (options->tz_aware && options->tzinfo && options->tzinfo != Py_None) { + PyObject* utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->min_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + min_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + utcoffset = PyObject_CallMethodObjArgs(options->tzinfo, state->_utcoffset_str, state->max_datetime, NULL); + if (utcoffset == NULL) { + return 0; + } + if (utcoffset != Py_None) { + if (!PyDelta_Check(utcoffset)) { + PyObject* BSONError = _error("BSONError"); + if (BSONError) { + PyErr_SetString(BSONError, "tzinfo.utcoffset() did not return a datetime.timedelta"); + Py_DECREF(BSONError); + } + Py_DECREF(utcoffset); + return 0; + } + max_millis_offset = (PyDateTime_DELTA_GET_DAYS(utcoffset) * (int64_t)86400 + + PyDateTime_DELTA_GET_SECONDS(utcoffset)) * (int64_t)1000 + + (PyDateTime_DELTA_GET_MICROSECONDS(utcoffset) / 1000); + } + Py_DECREF(utcoffset); + } + if (min_millis_offset < 0) { + min_millis -= min_millis_offset; + } + + if (max_millis_offset > 0) { + max_millis -= max_millis_offset; + } + + if (dt_clamp) { + if (millis < min_millis) { + millis = min_millis; + } else if (millis > max_millis) { + millis = max_millis; + } + // Continues from here to return a datetime. + } else { // dt_auto + if (millis < min_millis || millis > max_millis){ + return datetime_ms_from_millis(self, millis); + } + } + } + + naive = datetime_from_millis(millis); + if (!naive) { + goto invalid; + } + + if (!options->tz_aware) { /* In the naive case, we're done here. */ + return naive; + } + replace = PyObject_GetAttr(naive, state->_replace_str); + if (!replace) { + goto invalid; + } + value = PyObject_Call(replace, state->replace_args, state->replace_kwargs); + if (!value) { + goto invalid; + } + + /* convert to local time */ + if (options->tzinfo != Py_None) { + PyObject* temp = PyObject_CallMethodObjArgs(value, state->_astimezone_str, options->tzinfo, NULL); + Py_DECREF(value); + value = temp; + } +invalid: + Py_XDECREF(naive); + Py_XDECREF(replace); + return value; +} + /* Just make this compatible w/ the old API. */ int buffer_write_bytes(buffer_t buffer, const char* data, int size) { if (pymongo_buffer_write(buffer, data, size)) { @@ -261,49 +573,6 @@ static int write_string(buffer_t buffer, PyObject* py_string) { return 1; } -/* - * Are we in the main interpreter or a sub-interpreter? - * Useful for deciding if we can use cached pure python - * types in mod_wsgi. - */ -static int -_in_main_interpreter(void) { - static PyInterpreterState* main_interpreter = NULL; - PyInterpreterState* interpreter; - - if (main_interpreter == NULL) { - interpreter = PyInterpreterState_Head(); - - while (PyInterpreterState_Next(interpreter)) - interpreter = PyInterpreterState_Next(interpreter); - - main_interpreter = interpreter; - } - - return (main_interpreter == PyThreadState_Get()->interp); -} - -/* - * Get a reference to a pure python type. If we are in the - * main interpreter return the cached object, otherwise import - * the object we need and return it instead. - */ -static PyObject* -_get_object(PyObject* object, char* module_name, char* object_name) { - if (_in_main_interpreter()) { - Py_XINCREF(object); - return object; - } else { - PyObject* imported = NULL; - PyObject* module = PyImport_ImportModule(module_name); - if (!module) - return NULL; - imported = PyObject_GetAttrString(module, object_name); - Py_DECREF(module); - return imported; - } -} - /* Load a Python object to cache. * * Returns non-zero on failure. */ @@ -328,7 +597,41 @@ static int _load_python_objects(PyObject* module) { PyObject* empty_string = NULL; PyObject* re_compile = NULL; PyObject* compiled = NULL; + PyObject* min_datetime_ms = NULL; + PyObject* max_datetime_ms = NULL; struct module_state *state = GETSTATE(module); + if (!state) { + return 1; + } + + /* Cache commonly used attribute names to improve performance. */ + if (!((state->_type_marker_str = PyUnicode_FromString("_type_marker")) && + (state->_flags_str = PyUnicode_FromString("flags")) && + (state->_pattern_str = PyUnicode_FromString("pattern")) && + (state->_encoder_map_str = PyUnicode_FromString("_encoder_map")) && + (state->_decoder_map_str = PyUnicode_FromString("_decoder_map")) && + (state->_fallback_encoder_str = PyUnicode_FromString("_fallback_encoder")) && + (state->_raw_str = PyUnicode_FromString("raw")) && + (state->_subtype_str = PyUnicode_FromString("subtype")) && + (state->_binary_str = PyUnicode_FromString("binary")) && + (state->_scope_str = PyUnicode_FromString("scope")) && + (state->_inc_str = PyUnicode_FromString("inc")) && + (state->_time_str = PyUnicode_FromString("time")) && + (state->_bid_str = PyUnicode_FromString("bid")) && + (state->_replace_str = PyUnicode_FromString("replace")) && + (state->_astimezone_str = PyUnicode_FromString("astimezone")) && + (state->_id_str = PyUnicode_FromString("_id")) && + (state->_dollar_ref_str = PyUnicode_FromString("$ref")) && + (state->_dollar_id_str = PyUnicode_FromString("$id")) && + (state->_dollar_db_str = PyUnicode_FromString("$db")) && + (state->_tzinfo_str = PyUnicode_FromString("tzinfo")) && + (state->_as_doc_str = PyUnicode_FromString("as_doc")) && + (state->_utcoffset_str = PyUnicode_FromString("utcoffset")) && + (state->_from_uuid_str = PyUnicode_FromString("from_uuid")) && + (state->_as_uuid_str = PyUnicode_FromString("as_uuid")) && + (state->_from_bid_str = PyUnicode_FromString("from_bid")))) { + return 1; + } if (_load_object(&state->Binary, "bson.binary", "Binary") || _load_object(&state->Code, "bson.code", "Code") || @@ -342,9 +645,36 @@ static int _load_python_objects(PyObject* module) { _load_object(&state->BSONInt64, "bson.int64", "Int64") || _load_object(&state->Decimal128, "bson.decimal128", "Decimal128") || _load_object(&state->UUID, "uuid", "UUID") || - _load_object(&state->Mapping, "collections.abc", "Mapping")) { + _load_object(&state->Mapping, "collections.abc", "Mapping") || + _load_object(&state->DatetimeMS, "bson.datetime_ms", "DatetimeMS") || + _load_object(&min_datetime_ms, "bson.datetime_ms", "_MIN_UTC_MS") || + _load_object(&max_datetime_ms, "bson.datetime_ms", "_MAX_UTC_MS") || + _load_object(&state->min_datetime, "bson.datetime_ms", "_MIN_UTC") || + _load_object(&state->max_datetime, "bson.datetime_ms", "_MAX_UTC")) { + return 1; + } + + state->min_millis = PyLong_AsLongLong(min_datetime_ms); + state->max_millis = PyLong_AsLongLong(max_datetime_ms); + Py_DECREF(min_datetime_ms); + Py_DECREF(max_datetime_ms); + if ((state->min_millis == -1 || state->max_millis == -1) && PyErr_Occurred()) { + return 1; + } + + /* Speed up datetime.replace(tzinfo=utc) call */ + state->replace_args = PyTuple_New(0); + if (!state->replace_args) { return 1; } + state->replace_kwargs = PyDict_New(); + if (!state->replace_kwargs) { + return 1; + } + if (PyDict_SetItem(state->replace_kwargs, state->_tzinfo_str, state->UTC) == -1) { + return 1; + } + /* Reload our REType hack too. */ empty_string = PyBytes_FromString(""); if (empty_string == NULL) { @@ -377,12 +707,12 @@ static int _load_python_objects(PyObject* module) { * * Return the type marker, 0 if there is no marker, or -1 on failure. */ -static long _type_marker(PyObject* object) { +static long _type_marker(PyObject* object, PyObject* _type_marker_str) { PyObject* type_marker = NULL; long type = 0; - if (PyObject_HasAttrString(object, "_type_marker")) { - type_marker = PyObject_GetAttrString(object, "_type_marker"); + if (PyObject_HasAttr(object, _type_marker_str)) { + type_marker = PyObject_GetAttr(object, _type_marker_str); if (type_marker == NULL) { return -1; } @@ -399,13 +729,6 @@ static long _type_marker(PyObject* object) { if (type_marker && PyLong_CheckExact(type_marker)) { type = PyLong_AsLong(type_marker); Py_DECREF(type_marker); - /* - * Py(Long|Int)_AsLong returns -1 for error but -1 is a valid value - * so we call PyErr_Occurred to differentiate. - */ - if (type == -1 && PyErr_Occurred()) { - return -1; - } } else { Py_XDECREF(type_marker); } @@ -418,25 +741,25 @@ static long _type_marker(PyObject* object) { * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry) { +int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registry, PyObject* _encoder_map_str, PyObject* _decoder_map_str, PyObject* _fallback_encoder_str) { registry->encoder_map = NULL; registry->decoder_map = NULL; registry->fallback_encoder = NULL; registry->registry_obj = NULL; - registry->encoder_map = PyObject_GetAttrString(registry_obj, "_encoder_map"); + registry->encoder_map = PyObject_GetAttr(registry_obj, _encoder_map_str); if (registry->encoder_map == NULL) { goto fail; } registry->is_encoder_empty = (PyDict_Size(registry->encoder_map) == 0); - registry->decoder_map = PyObject_GetAttrString(registry_obj, "_decoder_map"); + registry->decoder_map = PyObject_GetAttr(registry_obj, _decoder_map_str); if (registry->decoder_map == NULL) { goto fail; } registry->is_decoder_empty = (PyDict_Size(registry->decoder_map) == 0); - registry->fallback_encoder = PyObject_GetAttrString(registry_obj, "_fallback_encoder"); + registry->fallback_encoder = PyObject_GetAttr(registry_obj, _fallback_encoder_str); if (registry->fallback_encoder == NULL) { goto fail; } @@ -453,35 +776,40 @@ int cbson_convert_type_registry(PyObject* registry_obj, type_registry_t* registr return 0; } -/* Fill out a codec_options_t* from a CodecOptions object. Use with the "O&" - * format spec in PyArg_ParseTuple. +/* Fill out a codec_options_t* from a CodecOptions object. * * Return 1 on success. options->document_class is a new reference. * Return 0 on failure. */ -int convert_codec_options(PyObject* options_obj, void* p) { - codec_options_t* options = (codec_options_t*)p; +int convert_codec_options(PyObject* self, PyObject* options_obj, codec_options_t* options) { PyObject* type_registry_obj = NULL; + struct module_state *state = GETSTATE(self); long type_marker; + if (!state) { + return 0; + } options->unicode_decode_error_handler = NULL; - if (!PyArg_ParseTuple(options_obj, "ObbzOO", + if (!PyArg_ParseTuple(options_obj, "ObbzOOb", &options->document_class, &options->tz_aware, &options->uuid_rep, &options->unicode_decode_error_handler, &options->tzinfo, - &type_registry_obj)) + &type_registry_obj, + &options->datetime_conversion)) { return 0; + } - type_marker = _type_marker(options->document_class); + type_marker = _type_marker(options->document_class, + state->_type_marker_str); if (type_marker < 0) { return 0; } if (!cbson_convert_type_registry(type_registry_obj, - &options->type_registry)) { + &options->type_registry, state->_encoder_map_str, state->_decoder_map_str, state->_fallback_encoder_str)) { return 0; } @@ -524,6 +852,13 @@ static int write_element_to_buffer(PyObject* self, buffer_t buffer, static void _set_cannot_encode(PyObject* value) { + if (PyLong_Check(value)) { + if ((PyLong_AsLongLong(value) == -1) && PyErr_Occurred()) { + return PyErr_SetString(PyExc_OverflowError, + "MongoDB can only handle up to 8-byte ints"); + } + } + PyObject* type = NULL; PyObject* InvalidDocument = _error("InvalidDocument"); if (InvalidDocument == NULL) { @@ -547,7 +882,7 @@ _set_cannot_encode(PyObject* value) { * Sets exception and returns 0 on failure. */ static int _write_regex_to_buffer( - buffer_t buffer, int type_byte, PyObject* value) { + buffer_t buffer, int type_byte, PyObject* value, PyObject* _flags_str, PyObject* _pattern_str) { PyObject* py_flags; PyObject* py_pattern; @@ -563,7 +898,7 @@ static int _write_regex_to_buffer( * Both the builtin re type and our Regex class have attributes * "flags" and "pattern". */ - py_flags = PyObject_GetAttrString(value, "flags"); + py_flags = PyObject_GetAttr(value, _flags_str); if (!py_flags) { return 0; } @@ -572,7 +907,7 @@ static int _write_regex_to_buffer( if (int_flags == -1 && PyErr_Occurred()) { return 0; } - py_pattern = PyObject_GetAttrString(value, "pattern"); + py_pattern = PyObject_GetAttr(value, _pattern_str); if (!py_pattern) { return 0; } @@ -668,17 +1003,18 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, const codec_options_t* options, unsigned char in_custom_call, unsigned char in_fallback_call) { - struct module_state *state = GETSTATE(self); - PyObject* mapping_type; PyObject* new_value = NULL; int retval; - PyObject* uuid_type; + int is_list; + long type; + struct module_state *state = GETSTATE(self); + if (!state) { + return 0; + } /* - * Don't use PyObject_IsInstance for our custom types. It causes - * problems with python sub interpreters. Our custom types should - * have a _type_marker attribute, which we can switch on instead. + * Use _type_marker attribute instead of PyObject_IsInstance for better perf. */ - long type = _type_marker(value); + type = _type_marker(value, state->_type_marker_str); if (type < 0) { return 0; } @@ -693,7 +1029,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, int size; *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x05; - subtype_object = PyObject_GetAttrString(value, "subtype"); + subtype_object = PyObject_GetAttr(value, state->_subtype_str); if (!subtype_object) { return 0; } @@ -741,7 +1077,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* ObjectId */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "binary"); + PyObject* pystring = PyObject_GetAttr(value, state->_binary_str); if (!pystring) { return 0; } @@ -758,10 +1094,20 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x07; return 1; } + case 9: + { + /* DatetimeMS */ + long long millis; + if (!millis_from_datetime_ms(value, &millis)) { + return 0; + } + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; + return buffer_write_int64(buffer, (int64_t)millis); + } case 11: { /* Regex */ - return _write_regex_to_buffer(buffer, type_byte, value); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); } case 13: { @@ -770,7 +1116,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, length_location, length; - PyObject* scope = PyObject_GetAttrString(value, "scope"); + PyObject* scope = PyObject_GetAttr(value, state->_scope_str); if (!scope) { return 0; } @@ -813,7 +1159,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, PyObject* obj; unsigned long i; - obj = PyObject_GetAttrString(value, "inc"); + obj = PyObject_GetAttr(value, state->_inc_str); if (!obj) { return 0; } @@ -826,7 +1172,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return 0; } - obj = PyObject_GetAttrString(value, "time"); + obj = PyObject_GetAttr(value, state->_time_str); if (!obj) { return 0; } @@ -861,7 +1207,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, { /* Decimal128 */ const char* data; - PyObject* pystring = PyObject_GetAttrString(value, "bid"); + PyObject* pystring = PyObject_GetAttr(value, state->_bid_str); if (!pystring) { return 0; } @@ -881,7 +1227,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 100: { /* DBRef */ - PyObject* as_doc = PyObject_CallMethod(value, "as_doc", NULL); + PyObject* as_doc = PyObject_CallMethodObjArgs(value, state->_as_doc_str, NULL); if (!as_doc) { return 0; } @@ -896,7 +1242,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, case 101: { /* RawBSONDocument */ - if (!write_raw_doc(buffer, value)) { + if (!write_raw_doc(buffer, value, state->_raw_str)) { return 0; } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; @@ -916,7 +1262,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } } - /* No _type_marker attibute or not one of our types. */ + /* No _type_marker attribute or not one of our types. */ if (PyBool_Check(value)) { const char c = (value == Py_True) ? 0x01 : 0x00; @@ -924,23 +1270,17 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return buffer_write_bytes(buffer, &c, 1); } else if (PyLong_Check(value)) { - const long long_value = PyLong_AsLong(value); - - const int int_value = (int)long_value; - if (PyErr_Occurred() || long_value != int_value) { /* Overflow */ - long long long_long_value; + const long long long_long_value = PyLong_AsLongLong(value); + if (long_long_value == -1 && PyErr_Occurred()) { + /* Ignore error and give the fallback_encoder a chance. */ PyErr_Clear(); - long_long_value = PyLong_AsLongLong(value); - if (PyErr_Occurred()) { /* Overflow AGAIN */ - PyErr_SetString(PyExc_OverflowError, - "MongoDB can only handle up to 8-byte ints"); - return 0; - } + } else if (-2147483648LL <= long_long_value && long_long_value <= 2147483647LL) { + *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; + return buffer_write_int32(buffer, (int32_t)long_long_value); + } else { *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x12; return buffer_write_int64(buffer, (int64_t)long_long_value); } - *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x10; - return buffer_write_int32(buffer, (int32_t)int_value); } else if (PyFloat_Check(value)) { const double d = PyFloat_AsDouble(value); *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x01; @@ -951,7 +1291,7 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else if (PyDict_Check(value)) { *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); - } else if (PyList_Check(value) || PyTuple_Check(value)) { + } else if ((is_list = PyList_Check(value)) || PyTuple_Check(value)) { Py_ssize_t items, i; int start_position, length_location, @@ -966,8 +1306,12 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, if (length_location == -1) { return 0; } - - if ((items = PySequence_Size(value)) > BSON_MAX_SIZE) { + if (is_list) { + items = PyList_Size(value); + } else { + items = PyTuple_Size(value); + } + if (items > BSON_MAX_SIZE) { PyObject* BSONError = _error("BSONError"); if (BSONError) { PyErr_SetString(BSONError, @@ -978,26 +1322,32 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } for(i = 0; i < items; i++) { int list_type_byte = pymongo_buffer_save_space(buffer, 1); - char name[16]; + char name[BUF_SIZE]; PyObject* item_value; if (list_type_byte == -1) { return 0; } - INT2STRING(name, (int)i); + int res = LL2STR(name, (long long)i); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, name, (int)strlen(name) + 1)) { return 0; } - - if (!(item_value = PySequence_GetItem(value, i))) + if (is_list) { + item_value = PyList_GET_ITEM(value, i); + } else { + item_value = PyTuple_GET_ITEM(value, i); + } + if (!item_value) { return 0; + } if (!write_element_to_buffer(self, buffer, list_type_byte, item_value, check_keys, options, 0, 0)) { - Py_DECREF(item_value); return 0; } - Py_DECREF(item_value); } /* write null byte and fill in length */ @@ -1033,13 +1383,13 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, return write_unicode(buffer, value); } else if (PyDateTime_Check(value)) { long long millis; - PyObject* utcoffset = PyObject_CallMethod(value, "utcoffset", NULL); + PyObject* utcoffset = PyObject_CallMethodObjArgs(value, state->_utcoffset_str , NULL); if (utcoffset == NULL) return 0; if (utcoffset != Py_None) { PyObject* result = PyNumber_Subtract(value, utcoffset); - Py_DECREF(utcoffset); if (!result) { + Py_DECREF(utcoffset); return 0; } millis = millis_from_datetime(result); @@ -1047,47 +1397,35 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, } else { millis = millis_from_datetime(value); } + Py_DECREF(utcoffset); *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x09; return buffer_write_int64(buffer, (int64_t)millis); } else if (PyObject_TypeCheck(value, state->REType)) { - return _write_regex_to_buffer(buffer, type_byte, value); - } - - /* - * Try Mapping and UUID last since we have to import - * them if we're in a sub-interpreter. - */ - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); - if (mapping_type && PyObject_IsInstance(value, mapping_type)) { - Py_DECREF(mapping_type); + return _write_regex_to_buffer(buffer, type_byte, value, state->_flags_str, state->_pattern_str); + } else if (PyObject_IsInstance(value, state->Mapping)) { /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; } *(pymongo_buffer_get_buffer(buffer) + type_byte) = 0x03; return write_dict(self, buffer, value, check_keys, options, 0); - } - - uuid_type = _get_object(state->UUID, "uuid", "UUID"); - if (uuid_type && PyObject_IsInstance(value, uuid_type)) { - PyObject* binary_type = NULL; + } else if (PyObject_IsInstance(value, state->UUID)) { PyObject* binary_value = NULL; + PyObject *uuid_rep_obj = NULL; int result; - Py_DECREF(uuid_type); /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; } - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { + if (!(uuid_rep_obj = PyLong_FromLong(options->uuid_rep))) { return 0; } + binary_value = PyObject_CallMethodObjArgs(state->Binary, state->_from_uuid_str, value, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); - binary_value = PyObject_CallMethod(binary_type, "from_uuid", "(Oi)", value, options->uuid_rep); if (binary_value == NULL) { - Py_DECREF(binary_type); return 0; } @@ -1096,12 +1434,9 @@ static int _write_element_to_buffer(PyObject* self, buffer_t buffer, check_keys, options, in_custom_call, in_fallback_call); - Py_DECREF(binary_type); Py_DECREF(binary_value); return result; } - Py_XDECREF(mapping_type); - Py_XDECREF(uuid_type); /* Try a custom encoder if one is provided and we have not already * attempted to use a type encoder. */ @@ -1281,14 +1616,14 @@ int decode_and_write_pair(PyObject* self, buffer_t buffer, /* Write a RawBSONDocument to the buffer. * Returns the number of bytes written or 0 on failure. */ -static int write_raw_doc(buffer_t buffer, PyObject* raw) { +static int write_raw_doc(buffer_t buffer, PyObject* raw, PyObject* _raw_str) { char* bytes; Py_ssize_t len; int len_int; int bytes_written = 0; PyObject* bytes_obj = NULL; - bytes_obj = PyObject_GetAttrString(raw, "raw"); + bytes_obj = PyObject_GetAttr(raw, _raw_str); if (!bytes_obj) { goto fail; } @@ -1309,6 +1644,54 @@ static int write_raw_doc(buffer_t buffer, PyObject* raw) { return bytes_written; } + +/* Update Invalid Document error to include doc as a property. + */ +void handle_invalid_doc_error(PyObject* dict) { + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *msg = NULL, *new_msg = NULL, *new_evalue = NULL; + PyErr_Fetch(&etype, &evalue, &etrace); + PyObject *InvalidDocument = _error("InvalidDocument"); + if (InvalidDocument == NULL) { + goto cleanup; + } + + if (evalue && PyErr_GivenExceptionMatches(etype, InvalidDocument)) { + msg = PyObject_Str(evalue); + if (msg) { + const char * msg_utf8 = PyUnicode_AsUTF8(msg); + if (msg_utf8 == NULL) { + goto cleanup; + } + new_msg = PyUnicode_FromFormat("Invalid document: %s", msg_utf8); + if (new_msg == NULL) { + goto cleanup; + } + // Add doc to the error instance as a property. + new_evalue = PyObject_CallFunctionObjArgs(InvalidDocument, new_msg, dict, NULL); + Py_DECREF(evalue); + Py_DECREF(etype); + etype = InvalidDocument; + InvalidDocument = NULL; + if (new_evalue) { + evalue = new_evalue; + new_evalue = NULL; + } else { + evalue = msg; + msg = NULL; + } + } + PyErr_NormalizeException(&etype, &evalue, &etrace); + } +cleanup: + PyErr_Restore(etype, evalue, etrace); + Py_XDECREF(msg); + Py_XDECREF(InvalidDocument); + Py_XDECREF(new_evalue); + Py_XDECREF(new_msg); +} + + /* returns the number of bytes written or 0 on failure */ int write_dict(PyObject* self, buffer_t buffer, PyObject* dict, unsigned char check_keys, @@ -1319,25 +1702,25 @@ int write_dict(PyObject* self, buffer_t buffer, int length; int length_location; struct module_state *state = GETSTATE(self); - PyObject* mapping_type; long type_marker; - - /* check for RawBSONDocument */ - type_marker = _type_marker(dict); - if (type_marker < 0) { + int is_dict = PyDict_Check(dict); + if (!state) { return 0; } - if (101 == type_marker) { - return write_raw_doc(buffer, dict); - } + if (!is_dict) { + /* check for RawBSONDocument */ + type_marker = _type_marker(dict, state->_type_marker_str); + if (type_marker < 0) { + return 0; + } - mapping_type = _get_object(state->Mapping, "collections.abc", "Mapping"); + if (101 == type_marker) { + return write_raw_doc(buffer, dict, state->_raw_str); + } - if (mapping_type) { - if (!PyObject_IsInstance(dict, mapping_type)) { + if (!PyObject_IsInstance(dict, state->Mapping)) { PyObject* repr; - Py_DECREF(mapping_type); if ((repr = PyObject_Repr(dict))) { PyObject* errmsg = PyUnicode_FromString( "encoder expected a mapping type but got: "); @@ -1360,7 +1743,6 @@ int write_dict(PyObject* self, buffer_t buffer, return 0; } - Py_DECREF(mapping_type); /* PyObject_IsInstance returns -1 on error */ if (PyErr_Occurred()) { return 0; @@ -1376,20 +1758,20 @@ int write_dict(PyObject* self, buffer_t buffer, if (top_level) { /* * If "dict" is a defaultdict we don't want to call - * PyMapping_GetItemString on it. That would **create** + * PyObject_GetItem on it. That would **create** * an _id where one didn't previously exist (PYTHON-871). */ - if (PyDict_Check(dict)) { - /* PyDict_GetItemString returns a borrowed reference. */ - PyObject* _id = PyDict_GetItemString(dict, "_id"); + if (is_dict) { + /* PyDict_GetItem returns a borrowed reference. */ + PyObject* _id = PyDict_GetItem(dict, state->_id_str); if (_id) { if (!write_pair(self, buffer, "_id", 3, _id, check_keys, options, 1)) { return 0; } } - } else if (PyMapping_HasKeyString(dict, "_id")) { - PyObject* _id = PyMapping_GetItemString(dict, "_id"); + } else if (PyMapping_HasKey(dict, state->_id_str)) { + PyObject* _id = PyObject_GetItem(dict, state->_id_str); if (!_id) { return 0; } @@ -1398,36 +1780,53 @@ int write_dict(PyObject* self, buffer_t buffer, Py_DECREF(_id); return 0; } - /* PyMapping_GetItemString returns a new reference. */ + /* PyObject_GetItem returns a new reference. */ Py_DECREF(_id); } } - iter = PyObject_GetIter(dict); - if (iter == NULL) { - return 0; - } - while ((key = PyIter_Next(iter)) != NULL) { - PyObject* value = PyObject_GetItem(dict, key); - if (!value) { - PyErr_SetObject(PyExc_KeyError, key); - Py_DECREF(key); - Py_DECREF(iter); + if (is_dict) { + PyObject* value; + Py_ssize_t pos = 0; + while (PyDict_Next(dict, &pos, &key, &value)) { + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); + } + return 0; + } + } + } else { + iter = PyObject_GetIter(dict); + if (iter == NULL) { return 0; } - if (!decode_and_write_pair(self, buffer, key, value, - check_keys, options, top_level)) { + while ((key = PyIter_Next(iter)) != NULL) { + PyObject* value = PyObject_GetItem(dict, key); + if (!value) { + PyErr_SetObject(PyExc_KeyError, key); + Py_DECREF(key); + Py_DECREF(iter); + return 0; + } + if (!decode_and_write_pair(self, buffer, key, value, + check_keys, options, top_level)) { + if (PyErr_Occurred() && top_level) { + handle_invalid_doc_error(dict); + } + Py_DECREF(key); + Py_DECREF(value); + Py_DECREF(iter); + return 0; + } Py_DECREF(key); Py_DECREF(value); - Py_DECREF(iter); + } + Py_DECREF(iter); + if (PyErr_Occurred()) { return 0; } - Py_DECREF(key); - Py_DECREF(value); - } - Py_DECREF(iter); - if (PyErr_Occurred()) { - return 0; } /* write null byte and fill in length */ @@ -1445,24 +1844,30 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { PyObject* result; unsigned char check_keys; unsigned char top_level = 1; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; PyObject* raw_bson_document_bytes_obj; long type_marker; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "ObO&|b", &dict, &check_keys, - convert_codec_options, &options, &top_level)) { + if (!(PyArg_ParseTuple(args, "ObO|b", &dict, &check_keys, + &options_obj, &top_level) && + convert_codec_options(self, options_obj, &options))) { return NULL; } /* check for RawBSONDocument */ - type_marker = _type_marker(dict); + type_marker = _type_marker(dict, state->_type_marker_str); if (type_marker < 0) { destroy_codec_options(&options); return NULL; } else if (101 == type_marker) { destroy_codec_options(&options); - raw_bson_document_bytes_obj = PyObject_GetAttrString(dict, "raw"); + raw_bson_document_bytes_obj = PyObject_GetAttr(dict, state->_raw_str); if (NULL == raw_bson_document_bytes_obj) { return NULL; } @@ -1494,29 +1899,30 @@ static PyObject* _cbson_dict_to_bson(PyObject* self, PyObject* args) { */ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { struct module_state *state = GETSTATE(self); - PyObject* dbref = NULL; - PyObject* dbref_type = NULL; PyObject* ref = NULL; PyObject* id = NULL; PyObject* database = NULL; PyObject* ret = NULL; int db_present = 0; + if (!state) { + return NULL; + } /* Decoding for DBRefs */ - if (PyMapping_HasKeyString(value, "$ref") && PyMapping_HasKeyString(value, "$id")) { /* DBRef */ - ref = PyMapping_GetItemString(value, "$ref"); - /* PyMapping_GetItemString returns NULL to indicate error. */ + if (PyMapping_HasKey(value, state->_dollar_ref_str) && PyMapping_HasKey(value, state->_dollar_id_str)) { /* DBRef */ + ref = PyObject_GetItem(value, state->_dollar_ref_str); + /* PyObject_GetItem returns NULL to indicate error. */ if (!ref) { goto invalid; } - id = PyMapping_GetItemString(value, "$id"); - /* PyMapping_GetItemString returns NULL to indicate error. */ + id = PyObject_GetItem(value, state->_dollar_id_str); + /* PyObject_GetItem returns NULL to indicate error. */ if (!id) { goto invalid; } - if (PyMapping_HasKeyString(value, "$db")) { - database = PyMapping_GetItemString(value, "$db"); + if (PyMapping_HasKey(value, state->_dollar_db_str)) { + database = PyObject_GetItem(value, state->_dollar_db_str); if (!database) { goto invalid; } @@ -1532,22 +1938,18 @@ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { goto invalid; } - PyMapping_DelItemString(value, "$ref"); - PyMapping_DelItemString(value, "$id"); + PyMapping_DelItem(value, state->_dollar_ref_str); + PyMapping_DelItem(value, state->_dollar_id_str); if (db_present) { - PyMapping_DelItemString(value, "$db"); + PyMapping_DelItem(value, state->_dollar_db_str); } - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - dbref = PyObject_CallFunctionObjArgs(dbref_type, ref, id, database, value, NULL); - Py_DECREF(value); - ret = dbref; - } + ret = PyObject_CallFunctionObjArgs(state->DBRef, ref, id, database, value, NULL); + Py_DECREF(value); } else { ret = value; } invalid: - Py_XDECREF(dbref_type); Py_XDECREF(ref); Py_XDECREF(id); Py_XDECREF(database); @@ -1556,9 +1958,12 @@ static PyObject *_dbref_hook(PyObject* self, PyObject* value) { static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, unsigned* position, unsigned char type, - unsigned max, const codec_options_t* options) { + unsigned max, const codec_options_t* options, int raw_array) { struct module_state *state = GETSTATE(self); PyObject* value = NULL; + if (!state) { + return NULL; + } switch (type) { case 1: { @@ -1615,23 +2020,17 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } + value = elements_to_dict(self, buffer + *position, + size, options); + if (!value) { + goto invalid; + } + if (options->is_raw_bson) { - value = PyObject_CallFunction( - options->document_class, "y#O", - buffer + *position, (Py_ssize_t)size, options->options_obj); - if (!value) { - goto invalid; - } *position += size; break; } - value = elements_to_dict(self, buffer + *position + 4, - size - 5, options); - if (!value) { - goto invalid; - } - /* Hook for DBRefs */ value = _dbref_hook(self, value); if (!value) { @@ -1653,11 +2052,20 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (size < BSON_MIN_SIZE || max < size) { goto invalid; } + end = *position + size - 1; /* Check for bad eoo */ if (buffer[end]) { goto invalid; } + + if (raw_array != 0) { + // Treat it as a binary buffer. + value = PyBytes_FromStringAndSize(buffer + *position, size); + *position += size; + break; + } + *position += 4; value = PyList_New(0); @@ -1681,7 +2089,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } to_append = get_value(self, name, buffer, position, bson_type, - max - (unsigned)key_size, options); + max - (unsigned)key_size, options, raw_array); Py_LeaveRecursiveCall(); if (!to_append) { Py_DECREF(value); @@ -1704,7 +2112,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, { PyObject* data; PyObject* st; - PyObject* type_to_create; uint32_t length, length2; unsigned char subtype; @@ -1745,7 +2152,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } /* Encode as UUID or Binary based on options->uuid_rep */ if (subtype == 3 || subtype == 4) { - PyObject* binary_type = NULL; PyObject* binary_value = NULL; char uuid_rep = options->uuid_rep; @@ -1754,12 +2160,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto uuiderror; } - binary_type = _get_object(state->Binary, "bson", "Binary"); - if (binary_type == NULL) { - goto uuiderror; - } - - binary_value = PyObject_CallFunction(binary_type, "(Oi)", data, subtype); + binary_value = PyObject_CallFunction(state->Binary, "(Oi)", data, subtype); if (binary_value == NULL) { goto uuiderror; } @@ -1770,11 +2171,15 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, value = binary_value; Py_INCREF(value); } else { - value = PyObject_CallMethod(binary_value, "as_uuid", "(i)", uuid_rep); + PyObject *uuid_rep_obj = PyLong_FromLong(uuid_rep); + if (!uuid_rep_obj) { + goto uuiderror; + } + value = PyObject_CallMethodObjArgs(binary_value, state->_as_uuid_str, uuid_rep_obj, NULL); + Py_DECREF(uuid_rep_obj); } uuiderror: - Py_XDECREF(binary_type); Py_XDECREF(binary_value); Py_DECREF(data); if (!value) { @@ -1789,10 +2194,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, Py_DECREF(data); goto invalid; } - if ((type_to_create = _get_object(state->Binary, "bson.binary", "Binary"))) { - value = PyObject_CallFunctionObjArgs(type_to_create, data, st, NULL); - Py_DECREF(type_to_create); - } + value = PyObject_CallFunctionObjArgs(state->Binary, data, st, NULL); Py_DECREF(st); Py_DECREF(data); if (!value) { @@ -1810,15 +2212,10 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 7: { - PyObject* objectid_type; if (max < 12) { goto invalid; } - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - value = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } + value = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); *position += 12; break; } @@ -1842,83 +2239,19 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } case 9: { - PyObject* utc_type; - PyObject* naive; - PyObject* replace; - PyObject* args; - PyObject* kwargs; - PyObject* astimezone; int64_t millis; if (max < 8) { goto invalid; } memcpy(&millis, buffer + *position, 8); millis = (int64_t)BSON_UINT64_FROM_LE(millis); - naive = datetime_from_millis(millis); *position += 8; - if (!options->tz_aware) { /* In the naive case, we're done here. */ - value = naive; - break; - } - if (!naive) { - goto invalid; - } - replace = PyObject_GetAttrString(naive, "replace"); - Py_DECREF(naive); - if (!replace) { - goto invalid; - } - args = PyTuple_New(0); - if (!args) { - Py_DECREF(replace); - goto invalid; - } - kwargs = PyDict_New(); - if (!kwargs) { - Py_DECREF(replace); - Py_DECREF(args); - goto invalid; - } - utc_type = _get_object(state->UTC, "bson.tz_util", "utc"); - if (!utc_type || PyDict_SetItemString(kwargs, "tzinfo", utc_type) == -1) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - Py_XDECREF(utc_type); - goto invalid; - } - Py_XDECREF(utc_type); - value = PyObject_Call(replace, args, kwargs); - if (!value) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - - /* convert to local time */ - if (options->tzinfo != Py_None) { - astimezone = PyObject_GetAttrString(value, "astimezone"); - Py_DECREF(value); - if (!astimezone) { - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); - goto invalid; - } - value = PyObject_CallFunctionObjArgs(astimezone, options->tzinfo, NULL); - Py_DECREF(astimezone); - } - - Py_DECREF(replace); - Py_DECREF(args); - Py_DECREF(kwargs); + value = decode_datetime(self, millis, options); break; } case 11: { - PyObject* regex_class; PyObject* pattern; int flags; size_t flags_length, i; @@ -1961,12 +2294,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } *position += (unsigned)flags_length + 1; - regex_class = _get_object(state->Regex, "bson.regex", "Regex"); - if (regex_class) { - value = PyObject_CallFunction(regex_class, - "Oi", pattern, flags); - Py_DECREF(regex_class); - } + value = PyObject_CallFunction(state->Regex, "Oi", pattern, flags); Py_DECREF(pattern); break; } @@ -1975,8 +2303,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, uint32_t coll_length; PyObject* collection; PyObject* id = NULL; - PyObject* objectid_type; - PyObject* dbref_type; if (max < 4) { goto invalid; @@ -2001,20 +2327,13 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, } *position += coll_length; - if ((objectid_type = _get_object(state->ObjectId, "bson.objectid", "ObjectId"))) { - id = PyObject_CallFunction(objectid_type, "y#", - buffer + *position, (Py_ssize_t)12); - Py_DECREF(objectid_type); - } + id = PyObject_CallFunction(state->ObjectId, "y#", buffer + *position, (Py_ssize_t)12); if (!id) { Py_DECREF(collection); goto invalid; } *position += 12; - if ((dbref_type = _get_object(state->DBRef, "bson.dbref", "DBRef"))) { - value = PyObject_CallFunctionObjArgs(dbref_type, collection, id, NULL); - Py_DECREF(dbref_type); - } + value = PyObject_CallFunctionObjArgs(state->DBRef, collection, id, NULL); Py_DECREF(collection); Py_DECREF(id); break; @@ -2022,7 +2341,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, case 13: { PyObject* code; - PyObject* code_type; uint32_t value_length; if (max < 4) { goto invalid; @@ -2045,10 +2363,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, goto invalid; } *position += value_length; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, NULL, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, NULL, NULL); Py_DECREF(code); break; } @@ -2057,9 +2372,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, uint32_t c_w_s_size; uint32_t code_size; uint32_t scope_size; + uint32_t len; PyObject* code; PyObject* scope; - PyObject* code_type; if (max < 8) { goto invalid; @@ -2076,7 +2391,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&code_size, buffer + *position, 4); code_size = BSON_UINT32_FROM_LE(code_size); /* code_w_scope length + code length + code + scope length */ - if (!code_size || max < code_size || max < 4 + 4 + code_size + 4) { + len = 4 + 4 + code_size + 4; + if (!code_size || max < code_size || max < len || len < code_size) { goto invalid; } *position += 4; @@ -2094,12 +2410,9 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&scope_size, buffer + *position, 4); scope_size = BSON_UINT32_FROM_LE(scope_size); - if (scope_size < BSON_MIN_SIZE) { - Py_DECREF(code); - goto invalid; - } /* code length + code + scope length + scope */ - if ((4 + code_size + 4 + scope_size) != c_w_s_size) { + len = 4 + 4 + code_size + scope_size; + if (scope_size < BSON_MIN_SIZE || len != c_w_s_size || len < scope_size) { Py_DECREF(code); goto invalid; } @@ -2108,18 +2421,15 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, if (buffer[*position + scope_size - 1]) { goto invalid; } - scope = elements_to_dict(self, buffer + *position + 4, - scope_size - 5, options); + scope = elements_to_dict(self, buffer + *position, + scope_size, options); if (!scope) { Py_DECREF(code); goto invalid; } *position += scope_size; - if ((code_type = _get_object(state->Code, "bson.code", "Code"))) { - value = PyObject_CallFunctionObjArgs(code_type, code, scope, NULL); - Py_DECREF(code_type); - } + value = PyObject_CallFunctionObjArgs(state->Code, code, scope, NULL); Py_DECREF(code); Py_DECREF(scope); break; @@ -2142,7 +2452,6 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, case 17: { uint32_t time, inc; - PyObject* timestamp_type; if (max < 8) { goto invalid; } @@ -2150,66 +2459,44 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, memcpy(&time, buffer + *position + 4, 4); inc = BSON_UINT32_FROM_LE(inc); time = BSON_UINT32_FROM_LE(time); - if ((timestamp_type = _get_object(state->Timestamp, "bson.timestamp", "Timestamp"))) { - value = PyObject_CallFunction(timestamp_type, "II", time, inc); - Py_DECREF(timestamp_type); - } + value = PyObject_CallFunction(state->Timestamp, "II", time, inc); *position += 8; break; } case 18: { int64_t ll; - PyObject* bson_int64_type = _get_object(state->BSONInt64, - "bson.int64", "Int64"); - if (!bson_int64_type) - goto invalid; if (max < 8) { - Py_DECREF(bson_int64_type); goto invalid; } memcpy(&ll, buffer + *position, 8); ll = (int64_t)BSON_UINT64_FROM_LE(ll); - value = PyObject_CallFunction(bson_int64_type, "L", ll); + value = PyObject_CallFunction(state->BSONInt64, "L", ll); *position += 8; - Py_DECREF(bson_int64_type); break; } case 19: { - PyObject* dec128; if (max < 16) { goto invalid; } - if ((dec128 = _get_object(state->Decimal128, - "bson.decimal128", - "Decimal128"))) { - value = PyObject_CallMethod(dec128, - "from_bid", - "y#", - buffer + *position, - (Py_ssize_t)16); - Py_DECREF(dec128); + PyObject *_bytes_obj = PyBytes_FromStringAndSize(buffer + *position, (Py_ssize_t)16); + if (!_bytes_obj) { + goto invalid; } + value = PyObject_CallMethodObjArgs(state->Decimal128, state->_from_bid_str, _bytes_obj, NULL); + Py_DECREF(_bytes_obj); *position += 16; break; } case 255: { - PyObject* minkey_type = _get_object(state->MinKey, "bson.min_key", "MinKey"); - if (!minkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(minkey_type, NULL); - Py_DECREF(minkey_type); + value = PyObject_CallFunctionObjArgs(state->MinKey, NULL); break; } case 127: { - PyObject* maxkey_type = _get_object(state->MaxKey, "bson.max_key", "MaxKey"); - if (!maxkey_type) - goto invalid; - value = PyObject_CallFunctionObjArgs(maxkey_type, NULL); - Py_DECREF(maxkey_type); + value = PyObject_CallFunctionObjArgs(state->MaxKey, NULL); break; } default: @@ -2279,8 +2566,8 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, * Wrap any non-InvalidBSON errors in InvalidBSON. */ if (PyErr_Occurred()) { - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; /* * Calling _error clears the error state, so fetch it first. @@ -2334,6 +2621,7 @@ static PyObject* get_value(PyObject* self, PyObject* name, const char* buffer, static int _element_to_dict(PyObject* self, const char* string, unsigned position, unsigned max, const codec_options_t* options, + int raw_array, PyObject** name, PyObject** value) { unsigned char type = (unsigned char)string[position++]; size_t name_length = strlen(string + position); @@ -2351,8 +2639,8 @@ static int _element_to_dict(PyObject* self, const char* string, if (!*name) { /* If NULL is returned then wrap the UnicodeDecodeError in an InvalidBSON error */ - PyObject *etype, *evalue, *etrace; - PyObject *InvalidBSON; + PyObject *etype = NULL, *evalue = NULL, *etrace = NULL; + PyObject *InvalidBSON = NULL; PyErr_Fetch(&etype, &evalue, &etrace); if (PyErr_GivenExceptionMatches(etype, PyExc_Exception)) { @@ -2374,7 +2662,7 @@ static int _element_to_dict(PyObject* self, const char* string, } position += (unsigned)name_length + 1; *value = get_value(self, *name, string, &position, type, - max - position, options); + max - position, options, raw_array); if (!*value) { Py_DECREF(*name); return -1; @@ -2386,16 +2674,19 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { /* TODO: Support buffer protocol */ char* string; PyObject* bson; + PyObject* options_obj = NULL; codec_options_t options; unsigned position; unsigned max; int new_position; + int raw_array = 0; PyObject* name; PyObject* value; PyObject* result_tuple; - if (!PyArg_ParseTuple(args, "OIIO&", &bson, &position, &max, - convert_codec_options, &options)) { + if (!(PyArg_ParseTuple(args, "OIIOp", &bson, &position, &max, + &options_obj, &raw_array) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2405,8 +2696,7 @@ static PyObject* _cbson_element_to_dict(PyObject* self, PyObject* args) { } string = PyBytes_AS_STRING(bson); - new_position = _element_to_dict(self, string, position, max, &options, - &name, &value); + new_position = _element_to_dict(self, string, position, max, &options, raw_array, &name, &value); if (new_position < 0) { return NULL; } @@ -2430,13 +2720,14 @@ static PyObject* _elements_to_dict(PyObject* self, const char* string, if (!dict) { return NULL; } + int raw_array = 0; while (position < max) { PyObject* name = NULL; PyObject* value = NULL; int new_position; new_position = _element_to_dict( - self, string, position, max, options, &name, &value); + self, string, position, max, options, raw_array, &name, &value); if (new_position < 0) { Py_DECREF(dict); return NULL; @@ -2455,9 +2746,14 @@ static PyObject* elements_to_dict(PyObject* self, const char* string, unsigned max, const codec_options_t* options) { PyObject* result; + if (options->is_raw_bson) { + return PyObject_CallFunction( + options->document_class, "y#O", + string, max, options->options_obj); + } if (Py_EnterRecursiveCall(" while decoding a BSON document")) return NULL; - result = _elements_to_dict(self, string, max, options); + result = _elements_to_dict(self, string + 4, max - 5, options); Py_LeaveRecursiveCall(); return result; } @@ -2490,14 +2786,14 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { int32_t size; Py_ssize_t total_size; const char* string; - PyObject* bson; + PyObject* bson = NULL; codec_options_t options; PyObject* result = NULL; PyObject* options_obj; Py_buffer view = {0}; if (! (PyArg_ParseTuple(args, "OO", &bson, &options_obj) && - convert_codec_options(options_obj, &options))) { + convert_codec_options(self, options_obj, &options))) { return result; } @@ -2519,7 +2815,6 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { } string = (char*)view.buf; - memcpy(&size, string, 4); size = (int32_t)BSON_UINT32_FROM_LE(size); if (size < BSON_MIN_SIZE) { @@ -2549,15 +2844,7 @@ static PyObject* _cbson_bson_to_dict(PyObject* self, PyObject* args) { goto done; } - /* No need to decode fields if using RawBSONDocument */ - if (options.is_raw_bson) { - result = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } - else { - result = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } + result = elements_to_dict(self, string, (unsigned)size, &options); done: PyBuffer_Release(&view); destroy_codec_options(&options); @@ -2575,10 +2862,8 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { PyObject* options_obj = NULL; Py_buffer view = {0}; - if (!PyArg_ParseTuple(args, "OO", &bson, &options_obj)) { - return NULL; - } - if (!convert_codec_options(options_obj, &options)) { + if (!(PyArg_ParseTuple(args, "OO", &bson, &options_obj) && + convert_codec_options(self, options_obj, &options))) { return NULL; } @@ -2637,14 +2922,7 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { goto fail; } - /* No need to decode fields if using RawBSONDocument. */ - if (options.is_raw_bson) { - dict = PyObject_CallFunction( - options.document_class, "y#O", string, (Py_ssize_t)size, - options_obj); - } else { - dict = elements_to_dict(self, string + 4, (unsigned)size - 5, &options); - } + dict = elements_to_dict(self, string, (unsigned)size, &options); if (!dict) { Py_DECREF(result); goto fail; @@ -2667,6 +2945,136 @@ static PyObject* _cbson_decode_all(PyObject* self, PyObject* args) { return result; } + +static PyObject* _cbson_array_of_documents_to_buffer(PyObject* self, PyObject* args) { + uint32_t size; + uint32_t value_length; + uint32_t position = 0; + buffer_t buffer; + const char* string; + PyObject* arr; + PyObject* result = NULL; + Py_buffer view = {0}; + + if (!PyArg_ParseTuple(args, "O", &arr)) { + return NULL; + } + + if (!_get_buffer(arr, &view)) { + return NULL; + } + + buffer = pymongo_buffer_new(); + if (!buffer) { + PyBuffer_Release(&view); + return NULL; + } + + string = (char*)view.buf; + + if (view.len < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "not enough data for a BSON document"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&size, string, 4); + size = BSON_UINT32_FROM_LE(size); + + /* validate the size of the array */ + if (view.len != (int32_t)size || (int32_t)size < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "objsize too large"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (string[size - 1]) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "bad eoo"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + /* save space for length */ + if (pymongo_buffer_save_space(buffer, size) == -1) { + goto fail; + } + pymongo_buffer_update_position(buffer, 0); + + position += 4; + while (position < size - 1) { + // Verify the value is an object. + unsigned char type = (unsigned char)string[position]; + if (type != 3) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "array element was not an object"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + // Just skip the keys. + position = position + strlen(string + position) + 1; + + if (position >= size || (size - position) < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid array content"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + memcpy(&value_length, string + position, 4); + value_length = BSON_UINT32_FROM_LE(value_length); + if (value_length < BSON_MIN_SIZE) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, "invalid message size"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + if (pymongo_buffer_write(buffer, string + position, value_length) == 1) { + goto fail; + } + position += value_length; + } + + if (position != size - 1) { + PyObject* InvalidBSON = _error("InvalidBSON"); + if (InvalidBSON) { + PyErr_SetString(InvalidBSON, + "bad object or element length"); + Py_DECREF(InvalidBSON); + } + goto fail; + } + + /* objectify buffer */ + result = Py_BuildValue("y#", pymongo_buffer_get_buffer(buffer), + (Py_ssize_t)pymongo_buffer_get_position(buffer)); + goto done; +fail: + result = NULL; +done: + PyBuffer_Release(&view); + pymongo_buffer_free(buffer); + return result; +} + + static PyMethodDef _CBSONMethods[] = { {"_dict_to_bson", _cbson_dict_to_bson, METH_VARARGS, "convert a dictionary to a string containing its BSON representation."}, @@ -2676,56 +3084,114 @@ static PyMethodDef _CBSONMethods[] = { "convert binary data to a sequence of documents."}, {"_element_to_dict", _cbson_element_to_dict, METH_VARARGS, "Decode a single key, value pair."}, + {"_array_of_documents_to_buffer", _cbson_array_of_documents_to_buffer, METH_VARARGS, "Convert raw array of documents to a stream of BSON documents"}, + {"_test_long_long_to_str", _test_long_long_to_str, METH_VARARGS, "Test conversion of extreme and common Py_ssize_t values to str."}, {NULL, NULL, 0, NULL} }; -#define INITERROR return NULL +#define INITERROR return -1; static int _cbson_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->Binary); - Py_VISIT(GETSTATE(m)->Code); - Py_VISIT(GETSTATE(m)->ObjectId); - Py_VISIT(GETSTATE(m)->DBRef); - Py_VISIT(GETSTATE(m)->Regex); - Py_VISIT(GETSTATE(m)->UUID); - Py_VISIT(GETSTATE(m)->Timestamp); - Py_VISIT(GETSTATE(m)->MinKey); - Py_VISIT(GETSTATE(m)->MaxKey); - Py_VISIT(GETSTATE(m)->UTC); - Py_VISIT(GETSTATE(m)->REType); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->Binary); + Py_VISIT(state->Code); + Py_VISIT(state->ObjectId); + Py_VISIT(state->DBRef); + Py_VISIT(state->Regex); + Py_VISIT(state->UUID); + Py_VISIT(state->Timestamp); + Py_VISIT(state->MinKey); + Py_VISIT(state->MaxKey); + Py_VISIT(state->UTC); + Py_VISIT(state->REType); + Py_VISIT(state->_type_marker_str); + Py_VISIT(state->_flags_str); + Py_VISIT(state->_pattern_str); + Py_VISIT(state->_encoder_map_str); + Py_VISIT(state->_decoder_map_str); + Py_VISIT(state->_fallback_encoder_str); + Py_VISIT(state->_raw_str); + Py_VISIT(state->_subtype_str); + Py_VISIT(state->_binary_str); + Py_VISIT(state->_scope_str); + Py_VISIT(state->_inc_str); + Py_VISIT(state->_time_str); + Py_VISIT(state->_bid_str); + Py_VISIT(state->_replace_str); + Py_VISIT(state->_astimezone_str); + Py_VISIT(state->_id_str); + Py_VISIT(state->_dollar_ref_str); + Py_VISIT(state->_dollar_id_str); + Py_VISIT(state->_dollar_db_str); + Py_VISIT(state->_tzinfo_str); + Py_VISIT(state->_as_doc_str); + Py_VISIT(state->_utcoffset_str); + Py_VISIT(state->_from_uuid_str); + Py_VISIT(state->_as_uuid_str); + Py_VISIT(state->_from_bid_str); + Py_VISIT(state->min_datetime); + Py_VISIT(state->max_datetime); + Py_VISIT(state->replace_args); + Py_VISIT(state->replace_kwargs); return 0; } static int _cbson_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->Binary); - Py_CLEAR(GETSTATE(m)->Code); - Py_CLEAR(GETSTATE(m)->ObjectId); - Py_CLEAR(GETSTATE(m)->DBRef); - Py_CLEAR(GETSTATE(m)->Regex); - Py_CLEAR(GETSTATE(m)->UUID); - Py_CLEAR(GETSTATE(m)->Timestamp); - Py_CLEAR(GETSTATE(m)->MinKey); - Py_CLEAR(GETSTATE(m)->MaxKey); - Py_CLEAR(GETSTATE(m)->UTC); - Py_CLEAR(GETSTATE(m)->REType); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->Binary); + Py_CLEAR(state->Code); + Py_CLEAR(state->ObjectId); + Py_CLEAR(state->DBRef); + Py_CLEAR(state->Regex); + Py_CLEAR(state->UUID); + Py_CLEAR(state->Timestamp); + Py_CLEAR(state->MinKey); + Py_CLEAR(state->MaxKey); + Py_CLEAR(state->UTC); + Py_CLEAR(state->REType); + Py_CLEAR(state->_type_marker_str); + Py_CLEAR(state->_flags_str); + Py_CLEAR(state->_pattern_str); + Py_CLEAR(state->_encoder_map_str); + Py_CLEAR(state->_decoder_map_str); + Py_CLEAR(state->_fallback_encoder_str); + Py_CLEAR(state->_raw_str); + Py_CLEAR(state->_subtype_str); + Py_CLEAR(state->_binary_str); + Py_CLEAR(state->_scope_str); + Py_CLEAR(state->_inc_str); + Py_CLEAR(state->_time_str); + Py_CLEAR(state->_bid_str); + Py_CLEAR(state->_replace_str); + Py_CLEAR(state->_astimezone_str); + Py_CLEAR(state->_id_str); + Py_CLEAR(state->_dollar_ref_str); + Py_CLEAR(state->_dollar_id_str); + Py_CLEAR(state->_dollar_db_str); + Py_CLEAR(state->_tzinfo_str); + Py_CLEAR(state->_as_doc_str); + Py_CLEAR(state->_utcoffset_str); + Py_CLEAR(state->_from_uuid_str); + Py_CLEAR(state->_as_uuid_str); + Py_CLEAR(state->_from_bid_str); + Py_CLEAR(state->min_datetime); + Py_CLEAR(state->max_datetime); + Py_CLEAR(state->replace_args); + Py_CLEAR(state->replace_kwargs); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cbson", - NULL, - sizeof(struct module_state), - _CBSONMethods, - NULL, - _cbson_traverse, - _cbson_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cbson(void) +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cbson_exec(PyObject *m) { - PyObject *m; PyObject *c_api_object; static void *_cbson_API[_cbson_API_POINTER_COUNT]; @@ -2748,17 +3214,10 @@ PyInit__cbson(void) (void *) buffer_write_int32_at_position; _cbson_API[_cbson_downcast_and_check_INDEX] = (void *) _downcast_and_check; - /* PyCapsule is new in python 3.1 */ c_api_object = PyCapsule_New((void *) _cbson_API, "_cbson._C_API", NULL); if (c_api_object == NULL) INITERROR; - m = PyModule_Create(&moduledef); - if (m == NULL) { - Py_DECREF(c_api_object); - INITERROR; - } - /* Import several python objects */ if (_load_python_objects(m)) { Py_DECREF(c_api_object); @@ -2766,11 +3225,48 @@ PyInit__cbson(void) INITERROR; } +#if PY_VERSION_HEX >= 0x030D0000 + if (PyModule_Add(m, "_C_API", c_api_object) < 0) { + Py_DECREF(m); + INITERROR; + } +# else if (PyModule_AddObject(m, "_C_API", c_api_object) < 0) { Py_DECREF(c_api_object); Py_DECREF(m); INITERROR; } +#endif + + return 0; +} + +static PyModuleDef_Slot _cbson_slots[] = { + {Py_mod_exec, _cbson_exec}, +#if defined(Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED) + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cbson", + NULL, + sizeof(struct module_state), + _CBSONMethods, + _cbson_slots, + _cbson_traverse, + _cbson_clear, + NULL +}; - return m; +PyMODINIT_FUNC +PyInit__cbson(void) +{ + return PyModuleDef_Init(&moduledef); } diff --git a/bson/_cbsonmodule.h b/bson/_cbsonmodule.h index 12a2c8ac67..3be2b74427 100644 --- a/bson/_cbsonmodule.h +++ b/bson/_cbsonmodule.h @@ -23,28 +23,35 @@ /* * This macro is basically an implementation of asprintf for win32 * We print to the provided buffer to get the string value as an int. + * USE LL2STR. This is kept only to test LL2STR. */ #if defined(_MSC_VER) && (_MSC_VER >= 1400) #define INT2STRING(buffer, i) \ _snprintf_s((buffer), \ - _scprintf("%d", (i)) + 1, \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat_s((dest), (n), (src)) #else #define INT2STRING(buffer, i) \ _snprintf((buffer), \ - _scprintf("%d", (i)) + 1, \ - "%d", \ + _scprintf("%lld", (i)) + 1, \ + "%lld", \ (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif #else -#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%d", (i)) +#define INT2STRING(buffer, i) snprintf((buffer), sizeof((buffer)), "%lld", (i)) #define STRCAT(dest, n, src) strcat((dest), (src)) #endif +/* Just enough space in char array to hold LLONG_MIN and null terminator */ +#define BUF_SIZE 21 +/* Converts integer to its string representation in decimal notation. */ +extern int cbson_long_long_to_str(long long int num, char* str, size_t size); +#define LL2STR(buffer, i) cbson_long_long_to_str((i), (buffer), sizeof(buffer)) + typedef struct type_registry_t { PyObject* encoder_map; PyObject* decoder_map; @@ -62,6 +69,7 @@ typedef struct codec_options_t { char* unicode_decode_error_handler; PyObject* tzinfo; type_registry_t type_registry; + unsigned char datetime_conversion; PyObject* options_obj; unsigned char is_raw_bson; } codec_options_t; @@ -85,7 +93,7 @@ typedef struct codec_options_t { #define _cbson_convert_codec_options_INDEX 4 #define _cbson_convert_codec_options_RETURN int -#define _cbson_convert_codec_options_PROTO (PyObject* options_obj, void* p) +#define _cbson_convert_codec_options_PROTO (PyObject* self, PyObject* options_obj, codec_options_t* options) #define _cbson_destroy_codec_options_INDEX 5 #define _cbson_destroy_codec_options_RETURN void diff --git a/bson/_helpers.py b/bson/_helpers.py index ee3b0f1099..5a479867c2 100644 --- a/bson/_helpers.py +++ b/bson/_helpers.py @@ -13,8 +13,10 @@ # limitations under the License. """Setstate and getstate functions for objects with __slots__, allowing - compatibility with default pickling protocol +compatibility with default pickling protocol """ +from __future__ import annotations + from typing import Any, Mapping @@ -33,7 +35,7 @@ def _mangle_name(name: str, prefix: str) -> str: def _getstate_slots(self: Any) -> Mapping[Any, Any]: prefix = self.__class__.__name__ - ret = dict() + ret = {} for name in self.__slots__: mangled_name = _mangle_name(name, prefix) if hasattr(self, mangled_name): diff --git a/bson/binary.py b/bson/binary.py index a270eae8d2..48eb12b0ac 100644 --- a/bson/binary.py +++ b/bson/binary.py @@ -11,8 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -from typing import TYPE_CHECKING, Any, Tuple, Type, Union +import struct +import warnings +from enum import Enum +from typing import TYPE_CHECKING, Any, Optional, Sequence, Tuple, Type, Union, overload from uuid import UUID """Tools for representing BSON binary data. @@ -75,7 +79,7 @@ class UuidRepresentation: :class:`~bson.binary.Binary` instance will be returned instead of a :class:`uuid.UUID` instance. - See :ref:`unspecified-representation-details` for details. + See `unspecified representation details `_ for details. .. versionadded:: 3.11 """ @@ -87,7 +91,7 @@ class UuidRepresentation: and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`UUID_SUBTYPE`. - See :ref:`standard-representation-details` for details. + See `standard representation details `_ for details. .. versionadded:: 3.11 """ @@ -99,7 +103,7 @@ class UuidRepresentation: and decoded from BSON binary, using RFC-4122 byte order with binary subtype :data:`OLD_UUID_SUBTYPE`. - See :ref:`python-legacy-representation-details` for details. + See `python legacy representation details `_ for details. .. versionadded:: 3.11 """ @@ -111,7 +115,7 @@ class UuidRepresentation: and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the Java driver's legacy byte order. - See :ref:`java-legacy-representation-details` for details. + See `Java Legacy UUID `_ for details. .. versionadded:: 3.11 """ @@ -123,7 +127,7 @@ class UuidRepresentation: and decoded from BSON binary subtype :data:`OLD_UUID_SUBTYPE`, using the C# driver's legacy byte order. - See :ref:`csharp-legacy-representation-details` for details. + See `C# Legacy UUID `_ for details. .. versionadded:: 3.11 """ @@ -183,48 +187,122 @@ class UuidRepresentation: .. versionadded:: 4.0 """ +SENSITIVE_SUBTYPE = 8 +"""BSON binary subtype for sensitive data. + +.. versionadded:: 4.5 +""" + + +VECTOR_SUBTYPE = 9 +"""BSON binary subtype for densely packed vector data. + +.. versionadded:: 4.10 +""" + + USER_DEFINED_SUBTYPE = 128 """BSON binary subtype for any user defined structure. """ +class BinaryVectorDtype(Enum): + """Datatypes of vector subtype. + + :param FLOAT32: (0x27) Pack list of :class:`float` as float32 + :param INT8: (0x03) Pack list of :class:`int` in [-128, 127] as signed int8 + :param PACKED_BIT: (0x10) Pack list of :class:`int` in [0, 255] as unsigned uint8 + + The `PACKED_BIT` value represents a special case where vector values themselves + can only be of two values (0 or 1) but these are packed together into groups of 8, + a byte. In Python, these are displayed as ints in range [0, 255] + + Each value is of type bytes with a length of one. + + .. versionadded:: 4.10 + """ + + INT8 = b"\x03" + FLOAT32 = b"\x27" + PACKED_BIT = b"\x10" + + +class BinaryVector: + """Vector of numbers along with metadata for binary interoperability. + .. versionadded:: 4.10 + """ + + __slots__ = ("data", "dtype", "padding") + + def __init__(self, data: Sequence[float | int], dtype: BinaryVectorDtype, padding: int = 0): + """ + :param data: Sequence of numbers representing the mathematical vector. + :param dtype: The data type stored in binary + :param padding: The number of bits in the final byte that are to be ignored + when a vector element's size is less than a byte + and the length of the vector is not a multiple of 8. + """ + self.data = data + self.dtype = dtype + self.padding = padding + + def __repr__(self) -> str: + return f"BinaryVector(dtype={self.dtype}, padding={self.padding}, data={self.data})" + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, BinaryVector): + return False + return ( + self.dtype == other.dtype and self.padding == other.padding and self.data == other.data + ) + + def __len__(self) -> int: + return len(self.data) + + class Binary(bytes): """Representation of BSON binary data. - This is necessary because we want to represent Python strings as - the BSON string type. We need to wrap binary data so we can tell + We want to represent Python strings as the BSON string type. + We need to wrap binary data so that we can tell the difference between what should be considered binary data and what should be considered a string when we encode to BSON. - Raises TypeError if `data` is not an instance of :class:`bytes` - (:class:`str` in python 2) or `subtype` is not an instance of - :class:`int`. Raises ValueError if `subtype` is not in [0, 256). + Subtype 9 provides a space-efficient representation of 1-dimensional vector data. + Its data is prepended with two bytes of metadata. + The first (dtype) describes its data type, such as float32 or int8. + The second (padding) prescribes the number of bits to ignore in the final byte. + This is relevant when the element size of the dtype is not a multiple of 8. + + Raises TypeError if `subtype` is not an instance of :class:`int`. + Raises ValueError if `subtype` is not in [0, 256). .. note:: - In python 3 instances of Binary with subtype 0 will be decoded - directly to :class:`bytes`. + Instances of Binary with subtype 0 will be decoded directly to :class:`bytes`. - :Parameters: - - `data`: the binary data to represent. Can be any bytes-like type + :param data: the binary data to represent. Can be any bytes-like type that implements the buffer protocol. - - `subtype` (optional): the `binary subtype + :param subtype: the `binary subtype `_ to use .. versionchanged:: 3.9 - Support any bytes-like type that implements the buffer protocol. + Support any bytes-like type that implements the buffer protocol. + + .. versionchanged:: 4.10 + Addition of vector subtype. """ _type_marker = 5 __subtype: int def __new__( - cls: Type["Binary"], - data: Union[memoryview, bytes, "_mmap", "_array"], + cls: Type[Binary], + data: Union[memoryview, bytes, bytearray, _mmap, _array[Any]], subtype: int = BINARY_SUBTYPE, - ) -> "Binary": + ) -> Binary: if not isinstance(subtype, int): - raise TypeError("subtype must be an instance of int") + raise TypeError(f"subtype must be an instance of int, not {type(subtype)}") if subtype >= 256 or subtype < 0: raise ValueError("subtype must be contained in [0, 256)") # Support any type that implements the buffer protocol. @@ -234,8 +312,8 @@ def __new__( @classmethod def from_uuid( - cls: Type["Binary"], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD - ) -> "Binary": + cls: Type[Binary], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD + ) -> Binary: """Create a BSON Binary object from a Python UUID. Creates a :class:`~bson.binary.Binary` object from a @@ -246,17 +324,16 @@ def from_uuid( Raises :exc:`TypeError` if `uuid` is not an instance of :class:`~uuid.UUID`. - :Parameters: - - `uuid`: A :class:`uuid.UUID` instance. - - `uuid_representation`: A member of + :param uuid: A :class:`uuid.UUID` instance. + :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. .. versionadded:: 3.11 """ if not isinstance(uuid, UUID): - raise TypeError("uuid must be an instance of uuid.UUID") + raise TypeError(f"uuid must be an instance of uuid.UUID, not {type(uuid)}") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( @@ -297,16 +374,15 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI Raises :exc:`ValueError` if this :class:`~bson.binary.Binary` instance does not contain a UUID. - :Parameters: - - `uuid_representation`: A member of + :param uuid_representation: A member of :class:`~bson.binary.UuidRepresentation`. Default: :const:`~bson.binary.UuidRepresentation.STANDARD`. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. .. versionadded:: 3.11 """ if self.subtype not in ALL_UUID_SUBTYPES: - raise ValueError("cannot decode subtype %s as a uuid" % (self.subtype,)) + raise ValueError(f"cannot decode subtype {self.subtype} as a uuid") if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError( @@ -330,10 +406,144 @@ def as_uuid(self, uuid_representation: int = UuidRepresentation.STANDARD) -> UUI return UUID(bytes=self) raise ValueError( - "cannot decode subtype %s to %s" - % (self.subtype, UUID_REPRESENTATION_NAMES[uuid_representation]) + f"cannot decode subtype {self.subtype} to {UUID_REPRESENTATION_NAMES[uuid_representation]}" ) + @classmethod + @overload + def from_vector(cls: Type[Binary], vector: BinaryVector) -> Binary: + ... + + @classmethod + @overload + def from_vector( + cls: Type[Binary], + vector: Union[list[int], list[float]], + dtype: BinaryVectorDtype, + padding: int = 0, + ) -> Binary: + ... + + @classmethod + def from_vector( + cls: Type[Binary], + vector: Union[BinaryVector, list[int], list[float]], + dtype: Optional[BinaryVectorDtype] = None, + padding: Optional[int] = None, + ) -> Binary: + """Create a BSON :class:`~bson.binary.Binary` of Vector subtype. + + To interpret the representation of the numbers, a data type must be included. + See :class:`~bson.binary.BinaryVectorDtype` for available types and descriptions. + + The dtype and padding are prepended to the binary data's value. + + :param vector: Either a List of values, or a :class:`~bson.binary.BinaryVector` dataclass. + :param dtype: Data type of the values + :param padding: For fractional bytes, number of bits to ignore at end of vector. + :return: Binary packed data identified by dtype and padding. + + .. versionchanged:: 4.14 + When padding is non-zero, ignored bits should be zero. Raise exception on encoding, warn on decoding. + + .. versionadded:: 4.10 + """ + if isinstance(vector, BinaryVector): + if dtype or padding: + raise ValueError( + "The first argument, vector, has type BinaryVector. " + "dtype or padding cannot be separately defined, but were." + ) + dtype = vector.dtype + padding = vector.padding + vector = vector.data # type: ignore + + padding = 0 if padding is None else padding + if dtype == BinaryVectorDtype.INT8: # pack ints in [-128, 127] as signed int8 + format_str = "b" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + elif dtype == BinaryVectorDtype.PACKED_BIT: # pack ints in [0, 255] as unsigned uint8 + format_str = "B" + if 0 <= padding > 7: + raise ValueError(f"{padding=}. It must be in [0,1, ..7].") + if padding and not vector: + raise ValueError("Empty vector with non-zero padding.") + elif dtype == BinaryVectorDtype.FLOAT32: # pack floats as float32 + format_str = "f" + if padding: + raise ValueError(f"padding does not apply to {dtype=}") + else: + raise NotImplementedError("%s not yet supported" % dtype) + + metadata = struct.pack(" BinaryVector: + """From the Binary, create a list of numbers, along with dtype and padding. + + :return: BinaryVector + + .. versionadded:: 4.10 + """ + + if self.subtype != VECTOR_SUBTYPE: + raise ValueError(f"Cannot decode subtype {self.subtype} as a vector") + + position = 0 + dtype, padding = struct.unpack_from(" 7 or padding < 0: + raise ValueError(f"Corrupt data. Padding ({padding}) must be between 0 and 7.") + dtype_format = "B" + format_string = f"<{n_values}{dtype_format}" + unpacked_uint8s = list(struct.unpack_from(format_string, self, position)) + if padding and n_values and unpacked_uint8s[-1] & (1 << padding) - 1 != 0: + warnings.warn( + "Vector has a padding P, but bits in the final byte lower than P are non-zero. For pymongo>=5.0, they must be zero.", + DeprecationWarning, + stacklevel=2, + ) + return BinaryVector(unpacked_uint8s, dtype, padding) + + else: + raise NotImplementedError("Binary Vector dtype %s not yet supported" % dtype.name) + @property def subtype(self) -> int: """Subtype of this binary data.""" @@ -341,7 +551,7 @@ def subtype(self) -> int: def __getnewargs__(self) -> Tuple[bytes, int]: # type: ignore[override] # Work around http://bugs.python.org/issue7382 - data = super(Binary, self).__getnewargs__()[0] + data = super().__getnewargs__()[0] if not isinstance(data, bytes): data = data.encode("latin-1") return data, self.__subtype @@ -355,10 +565,13 @@ def __eq__(self, other: Any) -> bool: return False def __hash__(self) -> int: - return super(Binary, self).__hash__() ^ hash(self.__subtype) + return super().__hash__() ^ hash(self.__subtype) def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Binary(%s, %s)" % (bytes.__repr__(self), self.__subtype) + def __repr__(self) -> str: + if self.__subtype == SENSITIVE_SUBTYPE: + return f"" + else: + return f"Binary({bytes.__repr__(self)}, {self.__subtype})" diff --git a/bson/bson-endian.h b/bson/bson-endian.h index c34a58dde1..e906b0776f 100644 --- a/bson/bson-endian.h +++ b/bson/bson-endian.h @@ -25,7 +25,6 @@ #ifdef _MSC_VER -# include "bson-stdint-win32.h" # define BSON_INLINE __inline #else # include diff --git a/bson/bson-stdint-win32.h b/bson/bson-stdint-win32.h deleted file mode 100644 index cb2acd9384..0000000000 --- a/bson/bson-stdint-win32.h +++ /dev/null @@ -1,259 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2013 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the product nor the names of its contributors may -// be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#if _MSC_VER >= 1600 // [ -#include -#else // ] _MSC_VER >= 1600 [ - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -// These #ifndef's are needed to prevent collisions with . -// Check out Issue 9 for the details. -#ifndef INTMAX_C // [ -# define INTMAX_C INT64_C -#endif // INTMAX_C ] -#ifndef UINTMAX_C // [ -# define UINTMAX_C UINT64_C -#endif // UINTMAX_C ] - -#endif // __STDC_CONSTANT_MACROS ] - -#endif // _MSC_VER >= 1600 ] - -#endif // _MSC_STDINT_H_ ] diff --git a/bson/code.py b/bson/code.py index b732e82469..f0523b2a95 100644 --- a/bson/code.py +++ b/bson/code.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing JavaScript code in BSON. -""" +"""Tools for representing JavaScript code in BSON.""" +from __future__ import annotations from collections.abc import Mapping as _Mapping from typing import Any, Mapping, Optional, Type, Union @@ -23,23 +23,22 @@ class Code(str): """BSON's JavaScript code type. Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3) or `scope` - is not ``None`` or an instance of :class:`dict`. + :class:`str` or `scope` is not ``None`` or an instance + of :class:`dict`. Scope variables can be set by passing a dictionary as the `scope` argument or by using keyword arguments. If a variable is set as a keyword argument it will override any setting for that variable in the `scope` dictionary. - :Parameters: - - `code`: A string containing JavaScript code to be evaluated or another + :param code: A string containing JavaScript code to be evaluated or another instance of Code. In the latter case, the scope of `code` becomes this Code's :attr:`scope`. - - `scope` (optional): dictionary representing the scope in which + :param scope: dictionary representing the scope in which `code` should be evaluated - a mapping from identifiers (as strings) to values. Defaults to ``None``. This is applied after any scope associated with a given `code` above. - - `**kwargs` (optional): scope variables can also be passed as + :param kwargs: scope variables can also be passed as keyword arguments. These are applied after `scope` and `code`. .. versionchanged:: 3.4 @@ -51,13 +50,13 @@ class Code(str): __scope: Union[Mapping[str, Any], None] def __new__( - cls: Type["Code"], - code: Union[str, "Code"], + cls: Type[Code], + code: Union[str, Code], scope: Optional[Mapping[str, Any]] = None, - **kwargs: Any - ) -> "Code": + **kwargs: Any, + ) -> Code: if not isinstance(code, str): - raise TypeError("code must be an instance of str") + raise TypeError(f"code must be an instance of str, not {type(code)}") self = str.__new__(cls, code) @@ -68,7 +67,7 @@ def __new__( if scope is not None: if not isinstance(scope, _Mapping): - raise TypeError("scope must be an instance of dict") + raise TypeError(f"scope must be an instance of dict, not {type(scope)}") if self.__scope is not None: self.__scope.update(scope) # type: ignore else: @@ -87,8 +86,8 @@ def scope(self) -> Optional[Mapping[str, Any]]: """Scope dictionary for this instance or ``None``.""" return self.__scope - def __repr__(self): - return "Code(%s, %r)" % (str.__repr__(self), self.__scope) + def __repr__(self) -> str: + return f"Code({str.__repr__(self)}, {self.__scope!r})" def __eq__(self, other: Any) -> bool: if isinstance(other, Code): diff --git a/bson/codec_options.py b/bson/codec_options.py index 4eaff59ea7..add5416a5b 100644 --- a/bson/codec_options.py +++ b/bson/codec_options.py @@ -13,20 +13,23 @@ # limitations under the License. """Tools for specifying BSON codec options.""" +from __future__ import annotations import abc import datetime +import enum from collections.abc import MutableMapping as _MutableMapping from typing import ( + TYPE_CHECKING, Any, Callable, - Dict, + Generic, Iterable, Mapping, NamedTuple, Optional, + Tuple, Type, - TypeVar, Union, cast, ) @@ -36,11 +39,7 @@ UUID_REPRESENTATION_NAMES, UuidRepresentation, ) - - -def _abstractproperty(func: Callable[..., Any]) -> property: - return property(abc.abstractmethod(func)) - +from bson.typings import _DocumentType _RAW_BSON_DOCUMENT_MARKER = 101 @@ -58,18 +57,16 @@ class TypeEncoder(abc.ABC): Codec classes must implement the ``python_type`` attribute, and the ``transform_python`` method to support encoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def python_type(self) -> Any: """The Python type to be converted into something serializable.""" - pass @abc.abstractmethod def transform_python(self, value: Any) -> Any: """Convert the given Python object into something serializable.""" - pass class TypeDecoder(abc.ABC): @@ -79,18 +76,16 @@ class TypeDecoder(abc.ABC): Codec classes must implement the ``bson_type`` attribute, and the ``transform_bson`` method to support decoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ - @_abstractproperty + @abc.abstractproperty def bson_type(self) -> Any: """The BSON type to be converted into our own type.""" - pass @abc.abstractmethod def transform_bson(self, value: Any) -> Any: """Convert the given BSON value into our own type.""" - pass class TypeCodec(TypeEncoder, TypeDecoder): @@ -103,18 +98,15 @@ class TypeCodec(TypeEncoder, TypeDecoder): ``bson_type`` attribute, and the ``transform_bson`` method to support decoding. - See :ref:`custom-type-type-codec` documentation for an example. + See `encode data with type codecs `_ documentation for an example. """ - pass - _Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] _Fallback = Callable[[Any], Any] -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) -class TypeRegistry(object): +class TypeRegistry: """Encapsulates type codecs used in encoding and / or decoding BSON, as well as the fallback encoder. Type registries cannot be modified after instantiation. @@ -126,18 +118,17 @@ class TypeRegistry(object): >>> type_registry = TypeRegistry([Codec1, Codec2, Codec3, ...], ... fallback_encoder) - See :ref:`custom-type-type-registry` documentation for an example. + See `add codec to the type registry `_ documentation for an example. - :Parameters: - - `type_codecs` (optional): iterable of type codec instances. If + :param type_codecs: iterable of type codec instances. If ``type_codecs`` contains multiple codecs that transform a single python or BSON type, the transformation specified by the type codec occurring last prevails. A TypeError will be raised if one or more type codecs modify the encoding behavior of a built-in :mod:`bson` type. - - `fallback_encoder` (optional): callable that accepts a single, + :param fallback_encoder: callable that accepts a single, unencodable python value and transforms it into a type that - :mod:`bson` can encode. See :ref:`fallback-encoder-callable` + :mod:`bson` can encode. See `define a fallback encoder `_ documentation for an example. """ @@ -148,8 +139,8 @@ def __init__( ) -> None: self.__type_codecs = list(type_codecs or []) self._fallback_encoder = fallback_encoder - self._encoder_map: Dict[Any, Any] = {} - self._decoder_map: Dict[Any, Any] = {} + self._encoder_map: dict[Any, Any] = {} + self._decoder_map: dict[Any, Any] = {} if self._fallback_encoder is not None: if not callable(fallback_encoder): @@ -166,10 +157,19 @@ def __init__( self._decoder_map[codec.bson_type] = codec.transform_bson if not is_valid_codec: raise TypeError( - "Expected an instance of %s, %s, or %s, got %r instead" - % (TypeEncoder.__name__, TypeDecoder.__name__, TypeCodec.__name__, codec) + f"Expected an instance of {TypeEncoder.__name__}, {TypeDecoder.__name__}, or {TypeCodec.__name__}, got {codec!r} instead" ) + @property + def codecs(self) -> list[TypeEncoder | TypeDecoder | TypeCodec]: + """The list of type codecs in this registry.""" + return self.__type_codecs + + @property + def fallback_encoder(self) -> Optional[_Fallback]: + """The fallback encoder in this registry.""" + return self._fallback_encoder + def _validate_type_encoder(self, codec: _Codec) -> None: from bson import _BUILT_IN_TYPES @@ -177,12 +177,12 @@ def _validate_type_encoder(self, codec: _Codec) -> None: if issubclass(cast(TypeCodec, codec).python_type, pytype): err_msg = ( "TypeEncoders cannot change how built-in types are " - "encoded (encoder %s transforms type %s)" % (codec, pytype) + f"encoded (encoder {codec} transforms type {pytype})" ) raise TypeError(err_msg) - def __repr__(self): - return "%s(type_codecs=%r, fallback_encoder=%r)" % ( + def __repr__(self) -> str: + return "{}(type_codecs={!r}, fallback_encoder={!r})".format( self.__class__.__name__, self.__type_codecs, self._fallback_encoder, @@ -198,6 +198,41 @@ def __eq__(self, other: Any) -> Any: ) +class DatetimeConversion(int, enum.Enum): + """Options for decoding BSON datetimes.""" + + DATETIME = 1 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`. + + BSON UTC datetimes that cannot be represented as a + :class:`~datetime.datetime` will raise an :class:`OverflowError` + or a :class:`ValueError`. + + .. versionadded 4.3 + """ + + DATETIME_CLAMP = 2 + """Decode a BSON UTC datetime as a :class:`datetime.datetime`, clamping + to :attr:`~datetime.datetime.min` and :attr:`~datetime.datetime.max`. + + .. versionadded 4.3 + """ + + DATETIME_MS = 3 + """Decode a BSON UTC datetime as a :class:`~bson.datetime_ms.DatetimeMS` + object. + + .. versionadded 4.3 + """ + + DATETIME_AUTO = 4 + """Decode a BSON UTC datetime as a :class:`datetime.datetime` if possible, + and a :class:`~bson.datetime_ms.DatetimeMS` if not. + + .. versionadded 4.3 + """ + + class _BaseCodecOptions(NamedTuple): document_class: Type[Mapping[str, Any]] tz_aware: bool @@ -205,198 +240,269 @@ class _BaseCodecOptions(NamedTuple): unicode_decode_error_handler: str tzinfo: Optional[datetime.tzinfo] type_registry: TypeRegistry + datetime_conversion: Optional[DatetimeConversion] + + +if TYPE_CHECKING: + + class CodecOptions(Tuple[_DocumentType], Generic[_DocumentType]): + document_class: Type[_DocumentType] + tz_aware: bool + uuid_representation: int + unicode_decode_error_handler: Optional[str] + tzinfo: Optional[datetime.tzinfo] + type_registry: TypeRegistry + datetime_conversion: Optional[int] + + def __new__( + cls: Type[CodecOptions[_DocumentType]], + document_class: Optional[Type[_DocumentType]] = ..., + tz_aware: bool = ..., + uuid_representation: Optional[int] = ..., + unicode_decode_error_handler: Optional[str] = ..., + tzinfo: Optional[datetime.tzinfo] = ..., + type_registry: Optional[TypeRegistry] = ..., + datetime_conversion: Optional[int] = ..., + ) -> CodecOptions[_DocumentType]: + ... + + # CodecOptions API + def with_options(self, **kwargs: Any) -> CodecOptions[Any]: + ... + + def _arguments_repr(self) -> str: + ... + + def _options_dict(self) -> dict[Any, Any]: + ... + + # NamedTuple API + @classmethod + def _make(cls, obj: Iterable[Any]) -> CodecOptions[_DocumentType]: + ... + + def _asdict(self) -> dict[str, Any]: + ... + + def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: + ... + + _source: str + _fields: Tuple[str] + +else: + + class CodecOptions(_BaseCodecOptions): + """Encapsulates options used encoding and / or decoding BSON.""" + + def __init__(self, *args, **kwargs): + """Encapsulates options used encoding and / or decoding BSON. + + The `document_class` option is used to define a custom type for use + decoding BSON documents. Access to the underlying raw BSON bytes for + a document is available using the :class:`~bson.raw_bson.RawBSONDocument` + type:: + + >>> from bson.raw_bson import RawBSONDocument + >>> from bson.codec_options import CodecOptions + >>> codec_options = CodecOptions(document_class=RawBSONDocument) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc.raw + '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' + + The document class can be any type that inherits from + :class:`~collections.abc.MutableMapping`:: + + >>> class AttributeDict(dict): + ... # A dict that supports attribute access. + ... def __getattr__(self, key): + ... return self[key] + ... def __setattr__(self, key, value): + ... self[key] = value + ... + >>> codec_options = CodecOptions(document_class=AttributeDict) + >>> coll = db.get_collection('test', codec_options=codec_options) + >>> doc = coll.find_one() + >>> doc._id + ObjectId('5b3016359110ea14e8c58b93') + + See `Dates and Times `_ for examples using the `tz_aware` and + `tzinfo` options. + + See `UUID `_ for examples using the `uuid_representation` + option. + + :param document_class: BSON documents returned in queries will be decoded + to an instance of this class. Must be a subclass of + :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. + :param tz_aware: If ``True``, BSON datetimes will be decoded to timezone + aware instances of :class:`~datetime.datetime`. Otherwise they will be + naive. Defaults to ``False``. + :param uuid_representation: The BSON representation to use when encoding + and decoding instances of :class:`~uuid.UUID`. Defaults to + :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New + applications should consider setting this to + :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language + compatibility. See `UUID representations `_ for details. + :param unicode_decode_error_handler: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + :param tzinfo: A :class:`~datetime.tzinfo` subclass that specifies the + timezone to/from which :class:`~datetime.datetime` objects should be + encoded/decoded. + :param type_registry: Instance of :class:`TypeRegistry` used to customize + encoding and decoding behavior. + :param datetime_conversion: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. + + .. versionchanged:: 4.0 + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionadded:: 3.8 + `type_registry` attribute. + + .. warning:: Care must be taken when changing + `unicode_decode_error_handler` from its default value ('strict'). + The 'replace' and 'ignore' modes should not be used when documents + retrieved from the server will be modified in the client application + and stored back to the server. + """ + super().__init__() + + def __new__( + cls: Type[CodecOptions], + document_class: Optional[Type[Mapping[str, Any]]] = None, + tz_aware: bool = False, + uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, + unicode_decode_error_handler: str = "strict", + tzinfo: Optional[datetime.tzinfo] = None, + type_registry: Optional[TypeRegistry] = None, + datetime_conversion: Optional[DatetimeConversion] = DatetimeConversion.DATETIME, + ) -> CodecOptions: + doc_class = document_class or dict + # issubclass can raise TypeError for generic aliases like SON[str, Any]. + # In that case we can use the base class for the comparison. + is_mapping = False + try: + is_mapping = issubclass(doc_class, _MutableMapping) + except TypeError: + if hasattr(doc_class, "__origin__"): + is_mapping = issubclass(doc_class.__origin__, _MutableMapping) + if not (is_mapping or _raw_document_class(doc_class)): + raise TypeError( + "document_class must be dict, bson.son.SON, " + "bson.raw_bson.RawBSONDocument, or a " + "subclass of collections.abc.MutableMapping" + ) + if not isinstance(tz_aware, bool): + raise TypeError(f"tz_aware must be True or False, was: tz_aware={tz_aware}") + if uuid_representation not in ALL_UUID_REPRESENTATIONS: + raise ValueError( + "uuid_representation must be a value from bson.binary.UuidRepresentation" + ) + if not isinstance(unicode_decode_error_handler, str): + raise ValueError( + f"unicode_decode_error_handler must be a string, not {type(unicode_decode_error_handler)}" + ) + if tzinfo is not None: + if not isinstance(tzinfo, datetime.tzinfo): + raise TypeError( + f"tzinfo must be an instance of datetime.tzinfo, not {type(tzinfo)}" + ) + if not tz_aware: + raise ValueError("cannot specify tzinfo without also setting tz_aware=True") + type_registry = type_registry or TypeRegistry() -class CodecOptions(_BaseCodecOptions): - """Encapsulates options used encoding and / or decoding BSON. - - The `document_class` option is used to define a custom type for use - decoding BSON documents. Access to the underlying raw BSON bytes for - a document is available using the :class:`~bson.raw_bson.RawBSONDocument` - type:: - - >>> from bson.raw_bson import RawBSONDocument - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(document_class=RawBSONDocument) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc.raw - '\\x16\\x00\\x00\\x00\\x07_id\\x00[0\\x165\\x91\\x10\\xea\\x14\\xe8\\xc5\\x8b\\x93\\x00' - - The document class can be any type that inherits from - :class:`~collections.abc.MutableMapping`:: - - >>> class AttributeDict(dict): - ... # A dict that supports attribute access. - ... def __getattr__(self, key): - ... return self[key] - ... def __setattr__(self, key, value): - ... self[key] = value - ... - >>> codec_options = CodecOptions(document_class=AttributeDict) - >>> coll = db.get_collection('test', codec_options=codec_options) - >>> doc = coll.find_one() - >>> doc._id - ObjectId('5b3016359110ea14e8c58b93') - - See :doc:`/examples/datetimes` for examples using the `tz_aware` and - `tzinfo` options. - - See :doc:`/examples/uuid` for examples using the `uuid_representation` - option. - - :Parameters: - - `document_class`: BSON documents returned in queries will be decoded - to an instance of this class. Must be a subclass of - :class:`~collections.abc.MutableMapping`. Defaults to :class:`dict`. - - `tz_aware`: If ``True``, BSON datetimes will be decoded to timezone - aware instances of :class:`~datetime.datetime`. Otherwise they will be - naive. Defaults to ``False``. - - `uuid_representation`: The BSON representation to use when encoding - and decoding instances of :class:`~uuid.UUID`. Defaults to - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED`. New - applications should consider setting this to - :data:`~bson.binary.UuidRepresentation.STANDARD` for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `tzinfo`: A :class:`~datetime.tzinfo` subclass that specifies the - timezone to/from which :class:`~datetime.datetime` objects should be - encoded/decoded. - - `type_registry`: Instance of :class:`TypeRegistry` used to customize - encoding and decoding behavior. - - .. versionchanged:: 4.0 - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionadded:: 3.8 - `type_registry` attribute. - - .. warning:: Care must be taken when changing - `unicode_decode_error_handler` from its default value ('strict'). - The 'replace' and 'ignore' modes should not be used when documents - retrieved from the server will be modified in the client application - and stored back to the server. - """ + if not isinstance(type_registry, TypeRegistry): + raise TypeError( + f"type_registry must be an instance of TypeRegistry, not {type(type_registry)}" + ) - def __new__( - cls: Type["CodecOptions"], - document_class: Optional[Type[Mapping[str, Any]]] = None, - tz_aware: bool = False, - uuid_representation: Optional[int] = UuidRepresentation.UNSPECIFIED, - unicode_decode_error_handler: str = "strict", - tzinfo: Optional[datetime.tzinfo] = None, - type_registry: Optional[TypeRegistry] = None, - ) -> "CodecOptions": - doc_class = document_class or dict - # issubclass can raise TypeError for generic aliases like SON[str, Any]. - # In that case we can use the base class for the comparison. - is_mapping = False - try: - is_mapping = issubclass(doc_class, _MutableMapping) - except TypeError: - if hasattr(doc_class, "__origin__"): - is_mapping = issubclass(doc_class.__origin__, _MutableMapping) # type: ignore[union-attr] - if not (is_mapping or _raw_document_class(doc_class)): - raise TypeError( - "document_class must be dict, bson.son.SON, " - "bson.raw_bson.RawBSONDocument, or a " - "subclass of collections.abc.MutableMapping" - ) - if not isinstance(tz_aware, bool): - raise TypeError("tz_aware must be True or False") - if uuid_representation not in ALL_UUID_REPRESENTATIONS: - raise ValueError( - "uuid_representation must be a value from bson.binary.UuidRepresentation" + return tuple.__new__( + cls, + ( + doc_class, + tz_aware, + uuid_representation, + unicode_decode_error_handler, + tzinfo, + type_registry, + datetime_conversion, + ), ) - if not isinstance(unicode_decode_error_handler, str): - raise ValueError("unicode_decode_error_handler must be a string") - if tzinfo is not None: - if not isinstance(tzinfo, datetime.tzinfo): - raise TypeError("tzinfo must be an instance of datetime.tzinfo") - if not tz_aware: - raise ValueError("cannot specify tzinfo without also setting tz_aware=True") - - type_registry = type_registry or TypeRegistry() - - if not isinstance(type_registry, TypeRegistry): - raise TypeError("type_registry must be an instance of TypeRegistry") - - return tuple.__new__( - cls, - ( - doc_class, - tz_aware, - uuid_representation, - unicode_decode_error_handler, - tzinfo, - type_registry, - ), - ) - def _arguments_repr(self) -> str: - """Representation of the arguments used to create this object.""" - document_class_repr = "dict" if self.document_class is dict else repr(self.document_class) + def _arguments_repr(self) -> str: + """Representation of the arguments used to create this object.""" + document_class_repr = ( + "dict" if self.document_class is dict else repr(self.document_class) + ) - uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( - self.uuid_representation, self.uuid_representation - ) + uuid_rep_repr = UUID_REPRESENTATION_NAMES.get( + self.uuid_representation, self.uuid_representation + ) - return ( - "document_class=%s, tz_aware=%r, uuid_representation=%s, " - "unicode_decode_error_handler=%r, tzinfo=%r, " - "type_registry=%r" - % ( - document_class_repr, - self.tz_aware, - uuid_rep_repr, - self.unicode_decode_error_handler, - self.tzinfo, - self.type_registry, + return ( + "document_class={}, tz_aware={!r}, uuid_representation={}, " + "unicode_decode_error_handler={!r}, tzinfo={!r}, " + "type_registry={!r}, datetime_conversion={!s}".format( + document_class_repr, + self.tz_aware, + uuid_rep_repr, + self.unicode_decode_error_handler, + self.tzinfo, + self.type_registry, + self.datetime_conversion, + ) ) - ) - def _options_dict(self) -> Dict[str, Any]: - """Dictionary of the arguments used to create this object.""" - # TODO: PYTHON-2442 use _asdict() instead - return { - "document_class": self.document_class, - "tz_aware": self.tz_aware, - "uuid_representation": self.uuid_representation, - "unicode_decode_error_handler": self.unicode_decode_error_handler, - "tzinfo": self.tzinfo, - "type_registry": self.type_registry, - } + def _options_dict(self) -> dict[str, Any]: + """Dictionary of the arguments used to create this object.""" + # TODO: PYTHON-2442 use _asdict() instead + return { + "document_class": self.document_class, + "tz_aware": self.tz_aware, + "uuid_representation": self.uuid_representation, + "unicode_decode_error_handler": self.unicode_decode_error_handler, + "tzinfo": self.tzinfo, + "type_registry": self.type_registry, + "datetime_conversion": self.datetime_conversion, + } - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self._arguments_repr()) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._arguments_repr()})" - def with_options(self, **kwargs: Any) -> "CodecOptions": - """Make a copy of this CodecOptions, overriding some options:: + def with_options(self, **kwargs: Any) -> CodecOptions: + """Make a copy of this CodecOptions, overriding some options:: - >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS - >>> DEFAULT_CODEC_OPTIONS.tz_aware - False - >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) - >>> options.tz_aware - True + >>> from bson.codec_options import DEFAULT_CODEC_OPTIONS + >>> DEFAULT_CODEC_OPTIONS.tz_aware + False + >>> options = DEFAULT_CODEC_OPTIONS.with_options(tz_aware=True) + >>> options.tz_aware + True - .. versionadded:: 3.5 - """ - opts = self._options_dict() - opts.update(kwargs) - return CodecOptions(**opts) + .. versionadded:: 3.5 + """ + opts = self._options_dict() + opts.update(kwargs) + return CodecOptions(**opts) -DEFAULT_CODEC_OPTIONS = CodecOptions() +DEFAULT_CODEC_OPTIONS: CodecOptions[dict[str, Any]] = CodecOptions() -def _parse_codec_options(options: Any) -> CodecOptions: +def _parse_codec_options(options: Any) -> CodecOptions[Any]: """Parse BSON codec options.""" kwargs = {} for k in set(options) & { @@ -406,6 +512,7 @@ def _parse_codec_options(options: Any) -> CodecOptions: "unicode_decode_error_handler", "tzinfo", "type_registry", + "datetime_conversion", }: if k == "uuidrepresentation": kwargs["uuid_representation"] = options[k] diff --git a/bson/codec_options.pyi b/bson/codec_options.pyi deleted file mode 100644 index 9d5f5c2656..0000000000 --- a/bson/codec_options.pyi +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2022-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Workaround for https://bugs.python.org/issue43923. -Ideally we would have done this with a single class, but -generic subclasses *must* take a parameter, and prior to Python 3.9 -or in Python 3.7 and 3.8 with `from __future__ import annotations`, -you get the error: "TypeError: 'type' object is not subscriptable". -""" - -import datetime -import abc -from typing import Tuple, Generic, Optional, Mapping, Any, TypeVar, Type, Dict, Iterable, Tuple, MutableMapping, Callable, Union - - -class TypeEncoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def python_type(self) -> Any: ... - @abc.abstractmethod - def transform_python(self, value: Any) -> Any: ... - -class TypeDecoder(abc.ABC, metaclass=abc.ABCMeta): - @property - @abc.abstractmethod - def bson_type(self) -> Any: ... - @abc.abstractmethod - def transform_bson(self, value: Any) -> Any: ... - -class TypeCodec(TypeEncoder, TypeDecoder, metaclass=abc.ABCMeta): ... - -Codec = Union[TypeEncoder, TypeDecoder, TypeCodec] -Fallback = Callable[[Any], Any] - -class TypeRegistry: - _decoder_map: Dict[Any, Any] - _encoder_map: Dict[Any, Any] - _fallback_encoder: Optional[Fallback] - - def __init__(self, type_codecs: Optional[Iterable[Codec]] = ..., fallback_encoder: Optional[Fallback] = ...) -> None: ... - def __eq__(self, other: Any) -> Any: ... - - -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) - - -class CodecOptions(Tuple, Generic[_DocumentType]): - document_class: Type[_DocumentType] - tz_aware: bool - uuid_representation: int - unicode_decode_error_handler: Optional[str] - tzinfo: Optional[datetime.tzinfo] - type_registry: TypeRegistry - - def __new__( - cls: Type[CodecOptions], - document_class: Optional[Type[_DocumentType]] = ..., - tz_aware: bool = ..., - uuid_representation: Optional[int] = ..., - unicode_decode_error_handler: Optional[str] = ..., - tzinfo: Optional[datetime.tzinfo] = ..., - type_registry: Optional[TypeRegistry] = ..., - ) -> CodecOptions[_DocumentType]: ... - - # CodecOptions API - def with_options(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... - - def _arguments_repr(self) -> str: ... - - def _options_dict(self) -> Dict[Any, Any]: ... - - # NamedTuple API - @classmethod - def _make(cls, obj: Iterable) -> CodecOptions[_DocumentType]: ... - - def _asdict(self) -> Dict[str, Any]: ... - - def _replace(self, **kwargs: Any) -> CodecOptions[_DocumentType]: ... - - _source: str - _fields: Tuple[str] - - -DEFAULT_CODEC_OPTIONS: CodecOptions[MutableMapping[str, Any]] -_RAW_BSON_DOCUMENT_MARKER: int - -def _raw_document_class(document_class: Any) -> bool: ... - -def _parse_codec_options(options: Any) -> CodecOptions: ... diff --git a/bson/datetime_ms.py b/bson/datetime_ms.py new file mode 100644 index 0000000000..2047bd30b2 --- /dev/null +++ b/bson/datetime_ms.py @@ -0,0 +1,182 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for representing the BSON datetime type. + +.. versionadded:: 4.3 +""" +from __future__ import annotations + +import calendar +import datetime +from typing import Any, Union, cast + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, DatetimeConversion +from bson.errors import InvalidBSON +from bson.tz_util import utc + +EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) +EPOCH_NAIVE = EPOCH_AWARE.replace(tzinfo=None) +_DATETIME_ERROR_SUGGESTION = ( + "(Consider Using CodecOptions(datetime_conversion=DATETIME_AUTO)" + " or MongoClient(datetime_conversion='DATETIME_AUTO'))." + " See: https://www.mongodb.com/docs/languages/python/pymongo-driver/current/data-formats/dates-and-times/#handling-out-of-range-datetimes" +) + + +class DatetimeMS: + """Represents a BSON UTC datetime.""" + + __slots__ = ("_value",) + + def __init__(self, value: Union[int, datetime.datetime]): + """Represents a BSON UTC datetime. + + BSON UTC datetimes are defined as an int64 of milliseconds since the + Unix epoch. The principal use of DatetimeMS is to represent + datetimes outside the range of the Python builtin + :class:`~datetime.datetime` class when + encoding/decoding BSON. + + To decode UTC datetimes as a ``DatetimeMS``, `datetime_conversion` in + :class:`~bson.codec_options.CodecOptions` must be set to 'datetime_ms' or + 'datetime_auto'. See `handling out of range datetimes `_ for + details. + + :param value: An instance of :class:`datetime.datetime` to be + represented as milliseconds since the Unix epoch, or int of + milliseconds since the Unix epoch. + """ + if isinstance(value, int): + if not (-(2**63) <= value <= 2**63 - 1): + raise OverflowError("Must be a 64-bit integer of milliseconds") + self._value = value + elif isinstance(value, datetime.datetime): + self._value = _datetime_to_millis(value) + else: + raise TypeError(f"{type(value)} is not a valid type for DatetimeMS") + + def __hash__(self) -> int: + return hash(self._value) + + def __repr__(self) -> str: + return type(self).__name__ + "(" + str(self._value) + ")" + + def __lt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value < other + + def __le__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value <= other + + def __eq__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value == other._value + return False + + def __ne__(self, other: Any) -> bool: + if isinstance(other, DatetimeMS): + return self._value != other._value + return True + + def __gt__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value > other + + def __ge__(self, other: Union[DatetimeMS, int]) -> bool: + return self._value >= other + + _type_marker = 9 + + def as_datetime( + self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS + ) -> datetime.datetime: + """Create a Python :class:`~datetime.datetime` from this DatetimeMS object. + + :param codec_options: A CodecOptions instance for specifying how the + resulting DatetimeMS object will be formatted using ``tz_aware`` + and ``tz_info``. Defaults to + :const:`~bson.codec_options.DEFAULT_CODEC_OPTIONS`. + """ + return cast(datetime.datetime, _millis_to_datetime(self._value, codec_options)) + + def __int__(self) -> int: + return self._value + + +def _datetime_to_millis(dtm: datetime.datetime) -> int: + """Convert datetime to milliseconds since epoch UTC.""" + if dtm.utcoffset() is not None: + dtm = dtm - dtm.utcoffset() # type: ignore + return int(calendar.timegm(dtm.timetuple()) * 1000 + dtm.microsecond // 1000) + + +_MIN_UTC = datetime.datetime.min.replace(tzinfo=utc) +_MAX_UTC = datetime.datetime.max.replace(tzinfo=utc) +_MIN_UTC_MS = _datetime_to_millis(_MIN_UTC) +_MAX_UTC_MS = _datetime_to_millis(_MAX_UTC) + + +# Inclusive min and max for timezones. +def _min_datetime_ms(tz: datetime.tzinfo = utc) -> int: + delta = tz.utcoffset(_MIN_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return max(_MIN_UTC_MS, _MIN_UTC_MS - offset_millis) + + +def _max_datetime_ms(tz: datetime.tzinfo = utc) -> int: + delta = tz.utcoffset(_MAX_UTC) + if delta is not None: + offset_millis = (delta.days * 86400 + delta.seconds) * 1000 + delta.microseconds // 1000 + else: + offset_millis = 0 + return min(_MAX_UTC_MS, _MAX_UTC_MS - offset_millis) + + +def _millis_to_datetime( + millis: int, opts: CodecOptions[Any] +) -> Union[datetime.datetime, DatetimeMS]: + """Convert milliseconds since epoch UTC to datetime.""" + if ( + opts.datetime_conversion == DatetimeConversion.DATETIME + or opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP + or opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO + ): + tz = opts.tzinfo or utc + if opts.datetime_conversion == DatetimeConversion.DATETIME_CLAMP: + millis = max(_min_datetime_ms(tz), min(millis, _max_datetime_ms(tz))) + elif opts.datetime_conversion == DatetimeConversion.DATETIME_AUTO: + if not (_min_datetime_ms(tz) <= millis <= _max_datetime_ms(tz)): + return DatetimeMS(millis) + + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) // 1000 + micros = diff * 1000 + + try: + if opts.tz_aware: + dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds, microseconds=micros) + if opts.tzinfo: + dt = dt.astimezone(tz) + return dt + else: + return EPOCH_NAIVE + datetime.timedelta(seconds=seconds, microseconds=micros) + except ArithmeticError as err: + raise InvalidBSON(f"{err} {_DATETIME_ERROR_SUGGESTION}") from err + + elif opts.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(millis) + else: + raise ValueError("datetime_conversion must be an element of DatetimeConversion") diff --git a/bson/dbref.py b/bson/dbref.py index 7849435f23..40bdb73cff 100644 --- a/bson/dbref.py +++ b/bson/dbref.py @@ -13,6 +13,7 @@ # limitations under the License. """Tools for manipulating DBRefs (references to MongoDB documents).""" +from __future__ import annotations from copy import deepcopy from typing import Any, Mapping, Optional @@ -21,7 +22,7 @@ from bson.son import SON -class DBRef(object): +class DBRef: """A reference to a document stored in MongoDB.""" __slots__ = "__collection", "__id", "__database", "__kwargs" @@ -36,29 +37,28 @@ def __init__( id: Any, database: Optional[str] = None, _extra: Optional[Mapping[str, Any]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: """Initialize a new :class:`DBRef`. Raises :class:`TypeError` if `collection` or `database` is not - an instance of :class:`basestring` (:class:`str` in python 3). - `database` is optional and allows references to documents to work - across databases. Any additional keyword arguments will create - additional fields in the resultant embedded document. - - :Parameters: - - `collection`: name of the collection the document is stored in - - `id`: the value of the document's ``"_id"`` field - - `database` (optional): name of the database to reference - - `**kwargs` (optional): additional keyword arguments will + an instance of :class:`str`. `database` is optional and allows + references to documents to work across databases. Any additional + keyword arguments will create additional fields in the resultant + embedded document. + + :param collection: name of the collection the document is stored in + :param id: the value of the document's ``"_id"`` field + :param database: name of the database to reference + :param kwargs: additional keyword arguments will create additional, custom fields .. seealso:: The MongoDB documentation on `dbrefs `_. """ if not isinstance(collection, str): - raise TypeError("collection must be an instance of str") + raise TypeError(f"collection must be an instance of str, not {type(collection)}") if database is not None and not isinstance(database, str): - raise TypeError("database must be an instance of str") + raise TypeError(f"database must be an instance of str, not {type(database)}") self.__collection = collection self.__id = id @@ -88,7 +88,7 @@ def __getattr__(self, key: Any) -> Any: try: return self.__kwargs[key] except KeyError: - raise AttributeError(key) + raise AttributeError(key) from None def as_doc(self) -> SON[str, Any]: """Get the SON document representation of this DBRef. @@ -101,11 +101,11 @@ def as_doc(self) -> SON[str, Any]: doc.update(self.__kwargs) return doc - def __repr__(self): - extra = "".join([", %s=%r" % (k, v) for k, v in self.__kwargs.items()]) + def __repr__(self) -> str: + extra = "".join([f", {k}={v!r}" for k, v in self.__kwargs.items()]) if self.database is None: - return "DBRef(%r, %r%s)" % (self.collection, self.id, extra) - return "DBRef(%r, %r, %r%s)" % (self.collection, self.id, self.database, extra) + return f"DBRef({self.collection!r}, {self.id!r}{extra})" + return f"DBRef({self.collection!r}, {self.id!r}, {self.database!r}{extra})" def __eq__(self, other: Any) -> bool: if isinstance(other, DBRef): @@ -123,7 +123,7 @@ def __hash__(self) -> int: (self.__collection, self.__id, self.__database, tuple(sorted(self.__kwargs.items()))) ) - def __deepcopy__(self, memo: Any) -> "DBRef": + def __deepcopy__(self, memo: Any) -> DBRef: """Support function for `copy.deepcopy()`.""" return DBRef( deepcopy(self.__collection, memo), diff --git a/bson/decimal128.py b/bson/decimal128.py index ab2d1a24ac..7480f94d0a 100644 --- a/bson/decimal128.py +++ b/bson/decimal128.py @@ -16,11 +16,15 @@ .. versionadded:: 3.4 """ +from __future__ import annotations import decimal import struct +from decimal import Decimal from typing import Any, Sequence, Tuple, Type, Union +from bson.codec_options import TypeDecoder, TypeEncoder + _PACK_64 = struct.Struct(" Type[Decimal]: + return Decimal + + def transform_python(self, value: Any) -> Decimal128: + return Decimal128(value) + + +class DecimalDecoder(TypeDecoder): + """Converts BSON :class:`Decimal128` to Python :class:`decimal.Decimal`. + + For example:: + opts = CodecOptions(type_registry=TypeRegistry([DecimalDecoder()])) + bson.decode(data, codec_options=opts) + + .. versionadded:: 4.15 + """ + + @property + def bson_type(self) -> Type[Decimal128]: + return Decimal128 + + def transform_bson(self, value: Any) -> decimal.Decimal: + return value.to_decimal() + + def create_decimal128_context() -> decimal.Context: """Returns an instance of :class:`decimal.Context` appropriate for working with IEEE-754 128-bit decimal floating point values. @@ -69,8 +109,7 @@ def create_decimal128_context() -> decimal.Context: def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: """Converts a decimal.Decimal to BID (high bits, low bits). - :Parameters: - - `value`: An instance of decimal.Decimal + :param value: An instance of decimal.Decimal """ with decimal.localcontext(_DEC128_CTX) as ctx: value = ctx.create_decimal(value) @@ -100,7 +139,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: if significand & (1 << i): high |= 1 << (i - 64) - biased_exponent = exponent + _EXPONENT_BIAS + biased_exponent = exponent + _EXPONENT_BIAS # type: ignore[operator] if high >> 49 == 1: high = high & 0x7FFFFFFFFFFF @@ -115,7 +154,7 @@ def _decimal_to_128(value: _VALUE_OPTIONS) -> Tuple[int, int]: return high, low -class Decimal128(object): +class Decimal128: """BSON Decimal128 type:: >>> Decimal128(Decimal("0.0005")) @@ -125,8 +164,7 @@ class Decimal128(object): >>> Decimal128((3474527112516337664, 5)) Decimal128('0.0005') - :Parameters: - - `value`: An instance of :class:`decimal.Decimal`, string, or tuple of + :param value: An instance of :class:`decimal.Decimal`, string, or tuple of (high bits, low bits) from Binary Integer Decimal (BID) format. .. note:: :class:`~Decimal128` uses an instance of :class:`decimal.Context` @@ -224,9 +262,9 @@ def __init__(self, value: _VALUE_OPTIONS) -> None: "from list or tuple. Must have exactly 2 " "elements." ) - self.__high, self.__low = value # type: ignore + self.__high, self.__low = value else: - raise TypeError("Cannot convert %r to Decimal128" % (value,)) + raise TypeError(f"Cannot convert {value!r} to Decimal128") def to_decimal(self) -> decimal.Decimal: """Returns an instance of :class:`decimal.Decimal` for this @@ -270,16 +308,15 @@ def to_decimal(self) -> decimal.Decimal: return ctx.create_decimal((sign, digits, exponent)) @classmethod - def from_bid(cls: Type["Decimal128"], value: bytes) -> "Decimal128": + def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: """Create an instance of :class:`Decimal128` from Binary Integer Decimal string. - :Parameters: - - `value`: 16 byte string (128-bit IEEE 754-2008 decimal floating + :param value: 16 byte string (128-bit IEEE 754-2008 decimal floating point in Binary Integer Decimal (BID) format). """ if not isinstance(value, bytes): - raise TypeError("value must be an instance of bytes") + raise TypeError(f"value must be an instance of bytes, not {type(value)}") if len(value) != 16: raise ValueError("value must be exactly 16 bytes") return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[:8])[0])) # type: ignore @@ -296,8 +333,8 @@ def __str__(self) -> str: return "NaN" return str(dec) - def __repr__(self): - return "Decimal128('%s')" % (str(self),) + def __repr__(self) -> str: + return f"Decimal128('{self!s}')" def __setstate__(self, value: Tuple[int, int]) -> None: self.__high, self.__low = value diff --git a/bson/errors.py b/bson/errors.py index 7333b27b58..ffc117f7ac 100644 --- a/bson/errors.py +++ b/bson/errors.py @@ -13,6 +13,9 @@ # limitations under the License. """Exceptions raised by the BSON package.""" +from __future__ import annotations + +from typing import Any, Optional class BSONError(Exception): @@ -30,6 +33,17 @@ class InvalidStringData(BSONError): class InvalidDocument(BSONError): """Raised when trying to create a BSON object from an invalid document.""" + def __init__(self, message: str, document: Optional[Any] = None) -> None: + super().__init__(message) + self._document = document + + @property + def document(self) -> Any: + """The invalid document that caused the error. + + ..versionadded:: 4.16""" + return self._document + class InvalidId(BSONError): """Raised when trying to create an ObjectId from invalid data.""" diff --git a/bson/int64.py b/bson/int64.py index ed4dfa5661..5846504a2d 100644 --- a/bson/int64.py +++ b/bson/int64.py @@ -13,6 +13,7 @@ # limitations under the License. """A BSON wrapper for long (int in python3)""" +from __future__ import annotations from typing import Any @@ -24,8 +25,7 @@ class Int64(int): Python 3. Small integral numbers are encoded to BSON int32 by default, but Int64 numbers will always be encoded to BSON int64. - :Parameters: - - `value`: the numeric value to represent + :param value: the numeric value to represent """ __slots__ = () diff --git a/bson/json_util.py b/bson/json_util.py index 369c3d5f4a..8151226a26 100644 --- a/bson/json_util.py +++ b/bson/json_util.py @@ -22,14 +22,16 @@ when :const:`CANONICAL_JSON_OPTIONS` or :const:`LEGACY_JSON_OPTIONS` is provided, respectively. -.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json.rst +.. _Extended JSON: https://github.com/mongodb/specifications/blob/master/source/extended-json/extended-json.md Example usage (deserialization): .. doctest:: >>> from bson.json_util import loads - >>> loads('[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]') + >>> loads( + ... '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$scope": {}, "$code": "function x() { return 1; }"}}, {"bin": {"$type": "80", "$binary": "AQIDBA=="}}]' + ... ) [{'foo': [1, 2]}, {'bar': {'hello': 'world'}}, {'code': Code('function x() { return 1; }', {})}, {'bin': Binary(b'...', 128)}] Example usage with :const:`RELAXED_JSON_OPTIONS` (the default): @@ -38,10 +40,14 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}]) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ] + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`CANONICAL_JSON_OPTIONS`): @@ -50,11 +56,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, CANONICAL_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }")}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=CANONICAL_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }")}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=CANONICAL_JSON_OPTIONS, + ... ) '[{"foo": [{"$numberInt": "1"}, {"$numberInt": "2"}]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }"}}, {"bin": {"$binary": {"base64": "AQIDBA==", "subType": "00"}}}]' Example usage (with :const:`LEGACY_JSON_OPTIONS`): @@ -63,11 +73,15 @@ >>> from bson import Binary, Code >>> from bson.json_util import dumps, LEGACY_JSON_OPTIONS - >>> dumps([{'foo': [1, 2]}, - ... {'bar': {'hello': 'world'}}, - ... {'code': Code("function x() { return 1; }", {})}, - ... {'bin': Binary(b"\x01\x02\x03\x04")}], - ... json_options=LEGACY_JSON_OPTIONS) + >>> dumps( + ... [ + ... {"foo": [1, 2]}, + ... {"bar": {"hello": "world"}}, + ... {"code": Code("function x() { return 1; }", {})}, + ... {"bin": Binary(b"\x01\x02\x03\x04")}, + ... ], + ... json_options=LEGACY_JSON_OPTIONS, + ... ) '[{"foo": [1, 2]}, {"bar": {"hello": "world"}}, {"code": {"$code": "function x() { return 1; }", "$scope": {}}}, {"bin": {"$binary": "AQIDBA==", "$type": "00"}}]' Alternatively, you can manually pass the `default` to :func:`json.dumps`. @@ -85,6 +99,7 @@ `libbson `_. `python-bsonjs` works best with PyMongo when using :class:`~bson.raw_bson.RawBSONDocument`. """ +from __future__ import annotations import base64 import datetime @@ -92,13 +107,30 @@ import math import re import uuid -from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Type, Union, cast +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Mapping, + MutableMapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) -import bson -from bson import EPOCH_AWARE from bson.binary import ALL_UUID_SUBTYPES, UUID_SUBTYPE, Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import ( + _MAX_UTC_MS, + EPOCH_AWARE, + DatetimeMS, + _datetime_to_millis, + _millis_to_datetime, +) from bson.dbref import DBRef from bson.decimal128 import Decimal128 from bson.int64 import Int64 @@ -106,7 +138,7 @@ from bson.min_key import MinKey from bson.objectid import ObjectId from bson.regex import Regex -from bson.son import RE_TYPE, SON +from bson.son import RE_TYPE from bson.timestamp import Timestamp from bson.tz_util import utc @@ -199,68 +231,86 @@ class JSONMode: """ -class JSONOptions(CodecOptions): - """Encapsulates JSON options for :func:`dumps` and :func:`loads`. - - :Parameters: - - `strict_number_long`: If ``True``, :class:`~bson.int64.Int64` objects - are encoded to MongoDB Extended JSON's *Strict mode* type - `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they - will be encoded as an `int`. Defaults to ``False``. - - `datetime_representation`: The representation to use when encoding - instances of :class:`datetime.datetime`. Defaults to - :const:`~DatetimeRepresentation.LEGACY`. - - `strict_uuid`: If ``True``, :class:`uuid.UUID` object are encoded to - MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it - will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. - - `json_mode`: The :class:`JSONMode` to use when encoding BSON types to - Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. - - `document_class`: BSON documents returned by :func:`loads` will be - decoded to an instance of this class. Must be a subclass of - :class:`collections.MutableMapping`. Defaults to :class:`dict`. - - `uuid_representation`: The :class:`~bson.binary.UuidRepresentation` - to use when encoding and decoding instances of :class:`uuid.UUID`. - Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - `tz_aware`: If ``True``, MongoDB Extended JSON's *Strict mode* type - `Date` will be decoded to timezone aware instances of - :class:`datetime.datetime`. Otherwise they will be naive. Defaults - to ``False``. - - `tzinfo`: A :class:`datetime.tzinfo` subclass that specifies the - timezone from which :class:`~datetime.datetime` objects should be - decoded. Defaults to :const:`~bson.tz_util.utc`. - - `args`: arguments to :class:`~bson.codec_options.CodecOptions` - - `kwargs`: arguments to :class:`~bson.codec_options.CodecOptions` - - .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. +if TYPE_CHECKING: + _BASE_CLASS = CodecOptions[MutableMapping[str, Any]] +else: + _BASE_CLASS = CodecOptions - .. versionchanged:: 4.0 - The default for `json_mode` was changed from :const:`JSONMode.LEGACY` - to :const:`JSONMode.RELAXED`. - The default for `uuid_representation` was changed from - :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to - :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. - - .. versionchanged:: 3.5 - Accepts the optional parameter `json_mode`. +_INT32_MAX = 2**31 - .. versionchanged:: 4.0 - Changed default value of `tz_aware` to False. - """ +class JSONOptions(_BASE_CLASS): json_mode: int strict_number_long: bool datetime_representation: int strict_uuid: bool + document_class: Type[MutableMapping[str, Any]] + + def __init__(self, *args: Any, **kwargs: Any): + """Encapsulates JSON options for :func:`dumps` and :func:`loads`. + + :param strict_number_long: If ``True``, :class:`~bson.int64.Int64` objects + are encoded to MongoDB Extended JSON's *Strict mode* type + `NumberLong`, ie ``'{"$numberLong": "" }'``. Otherwise they + will be encoded as an `int`. Defaults to ``False``. + :param datetime_representation: The representation to use when encoding + instances of :class:`datetime.datetime`. Defaults to + :const:`~DatetimeRepresentation.LEGACY`. + :param strict_uuid: If ``True``, :class:`uuid.UUID` object are encoded to + MongoDB Extended JSON's *Strict mode* type `Binary`. Otherwise it + will be encoded as ``'{"$uuid": "" }'``. Defaults to ``False``. + :param json_mode: The :class:`JSONMode` to use when encoding BSON types to + Extended JSON. Defaults to :const:`~JSONMode.LEGACY`. + :param document_class: BSON documents returned by :func:`loads` will be + decoded to an instance of this class. Must be a subclass of + :class:`collections.MutableMapping`. Defaults to :class:`dict`. + :param uuid_representation: The :class:`~bson.binary.UuidRepresentation` + to use when encoding and decoding instances of :class:`uuid.UUID`. + Defaults to :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + :param tz_aware: If ``True``, MongoDB Extended JSON's *Strict mode* type + `Date` will be decoded to timezone aware instances of + :class:`datetime.datetime`. Otherwise they will be naive. Defaults + to ``False``. + :param tzinfo: A :class:`datetime.tzinfo` subclass that specifies the + timezone from which :class:`~datetime.datetime` objects should be + decoded. Defaults to :const:`~bson.tz_util.utc`. + :param datetime_conversion: Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + :param args: arguments to :class:`~bson.codec_options.CodecOptions` + :param kwargs: arguments to :class:`~bson.codec_options.CodecOptions` + + .. seealso:: The specification for Relaxed and Canonical `Extended JSON`_. + + .. versionchanged:: 4.0 + The default for `json_mode` was changed from :const:`JSONMode.LEGACY` + to :const:`JSONMode.RELAXED`. + The default for `uuid_representation` was changed from + :const:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` to + :const:`~bson.binary.UuidRepresentation.UNSPECIFIED`. + + .. versionchanged:: 3.5 + Accepts the optional parameter `json_mode`. + + .. versionchanged:: 4.0 + Changed default value of `tz_aware` to False. + """ + super().__init__() def __new__( - cls: Type["JSONOptions"], + cls: Type[JSONOptions], strict_number_long: Optional[bool] = None, datetime_representation: Optional[int] = None, strict_uuid: Optional[bool] = None, json_mode: int = JSONMode.RELAXED, *args: Any, - **kwargs: Any - ) -> "JSONOptions": + **kwargs: Any, + ) -> JSONOptions: kwargs["tz_aware"] = kwargs.get("tz_aware", False) if kwargs["tz_aware"]: kwargs["tzinfo"] = kwargs.get("tzinfo", utc) @@ -274,7 +324,7 @@ def __new__( "JSONOptions.datetime_representation must be one of LEGACY, " "NUMBERLONG, or ISO8601 from DatetimeRepresentation." ) - self = cast(JSONOptions, super(JSONOptions, cls).__new__(cls, *args, **kwargs)) + self = cast(JSONOptions, super().__new__(cls, *args, **kwargs)) if json_mode not in (JSONMode.LEGACY, JSONMode.RELAXED, JSONMode.CANONICAL): raise ValueError( "JSONOptions.json_mode must be one of LEGACY, RELAXED, " @@ -321,21 +371,20 @@ def __new__( def _arguments_repr(self) -> str: return ( - "strict_number_long=%r, " - "datetime_representation=%r, " - "strict_uuid=%r, json_mode=%r, %s" - % ( + "strict_number_long={!r}, " + "datetime_representation={!r}, " + "strict_uuid={!r}, json_mode={!r}, {}".format( self.strict_number_long, self.datetime_representation, self.strict_uuid, self.json_mode, - super(JSONOptions, self)._arguments_repr(), + super()._arguments_repr(), ) ) - def _options_dict(self) -> Dict[Any, Any]: + def _options_dict(self) -> dict[Any, Any]: # TODO: PYTHON-2442 use _asdict() instead - options_dict = super(JSONOptions, self)._options_dict() + options_dict = super()._options_dict() options_dict.update( { "strict_number_long": self.strict_number_long, @@ -346,7 +395,7 @@ def _options_dict(self) -> Dict[Any, Any]: ) return options_dict - def with_options(self, **kwargs: Any) -> "JSONOptions": + def with_options(self, **kwargs: Any) -> JSONOptions: """ Make a copy of this JSONOptions, overriding some options:: @@ -409,8 +458,7 @@ def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: Recursive function that handles all BSON types including :class:`~bson.binary.Binary` and :class:`~bson.code.Code`. - :Parameters: - - `json_options`: A :class:`JSONOptions` instance used to modify the + :param json_options: A :class:`JSONOptions` instance used to modify the encoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. @@ -425,7 +473,7 @@ def dumps(obj: Any, *args: Any, **kwargs: Any) -> str: return json.dumps(_json_convert(obj, json_options), *args, **kwargs) -def loads(s: str, *args: Any, **kwargs: Any) -> Any: +def loads(s: Union[str, bytes, bytearray], *args: Any, **kwargs: Any) -> Any: """Helper function that wraps :func:`json.loads`. Automatically passes the object_hook for BSON type conversion. @@ -433,8 +481,7 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: Raises ``TypeError``, ``ValueError``, ``KeyError``, or :exc:`~bson.errors.InvalidId` on invalid MongoDB Extended JSON. - :Parameters: - - `json_options`: A :class:`JSONOptions` instance used to modify the + :param json_options: A :class:`JSONOptions` instance used to modify the decoding of MongoDB Extended JSON types. Defaults to :const:`DEFAULT_JSON_OPTIONS`. @@ -452,7 +499,11 @@ def loads(s: str, *args: Any, **kwargs: Any) -> Any: Accepts optional parameter `json_options`. See :class:`JSONOptions`. """ json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS) - kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) + # Execution time optimization if json_options.document_class is dict + if json_options.document_class is dict: + kwargs["object_hook"] = lambda obj: object_hook(obj, json_options) + else: + kwargs["object_pairs_hook"] = lambda pairs: object_pairs_hook(pairs, json_options) return json.loads(s, *args, **kwargs) @@ -461,9 +512,9 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> converted into json. """ if hasattr(obj, "items"): - return SON(((k, _json_convert(v, json_options)) for k, v in obj.items())) + return {k: _json_convert(v, json_options) for k, v in obj.items()} elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): - return list((_json_convert(v, json_options) for v in obj)) + return [_json_convert(v, json_options) for v in obj] try: return default(obj, json_options) except TypeError: @@ -473,58 +524,21 @@ def _json_convert(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> def object_pairs_hook( pairs: Sequence[Tuple[str, Any]], json_options: JSONOptions = DEFAULT_JSON_OPTIONS ) -> Any: - return object_hook(json_options.document_class(pairs), json_options) + return object_hook(json_options.document_class(pairs), json_options) # type:ignore[call-arg] def object_hook(dct: Mapping[str, Any], json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: - if "$oid" in dct: - return _parse_canonical_oid(dct) - if ( - isinstance(dct.get("$ref"), str) - and "$id" in dct - and isinstance(dct.get("$db"), (str, type(None))) - ): - return _parse_canonical_dbref(dct) - if "$date" in dct: - return _parse_canonical_datetime(dct, json_options) - if "$regex" in dct: - return _parse_legacy_regex(dct) - if "$minKey" in dct: - return _parse_canonical_minkey(dct) - if "$maxKey" in dct: - return _parse_canonical_maxkey(dct) - if "$binary" in dct: - if "$type" in dct: - return _parse_legacy_binary(dct, json_options) - else: - return _parse_canonical_binary(dct, json_options) - if "$code" in dct: - return _parse_canonical_code(dct) - if "$uuid" in dct: - return _parse_legacy_uuid(dct, json_options) - if "$undefined" in dct: - return None - if "$numberLong" in dct: - return _parse_canonical_int64(dct) - if "$timestamp" in dct: - tsp = dct["$timestamp"] - return Timestamp(tsp["t"], tsp["i"]) - if "$numberDecimal" in dct: - return _parse_canonical_decimal128(dct) - if "$dbPointer" in dct: - return _parse_canonical_dbpointer(dct) - if "$regularExpression" in dct: - return _parse_canonical_regex(dct) - if "$symbol" in dct: - return _parse_canonical_symbol(dct) - if "$numberInt" in dct: - return _parse_canonical_int32(dct) - if "$numberDouble" in dct: - return _parse_canonical_double(dct) + match = None + for k in dct: + if k in _PARSERS_SET: + match = k + break + if match: + return _PARSERS[match](dct, json_options) return dct -def _parse_legacy_regex(doc: Any) -> Any: +def _parse_legacy_regex(doc: Any, dummy0: Any) -> Any: pattern = doc["$regex"] # Check if this is the $regex query operator. if not isinstance(pattern, (str, bytes)): @@ -539,9 +553,9 @@ def _parse_legacy_regex(doc: Any) -> Any: def _parse_legacy_uuid(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: """Decode a JSON legacy $uuid to Python UUID.""" if len(doc) != 1: - raise TypeError("Bad $uuid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $uuid, extra field(s): {doc}") if not isinstance(doc["$uuid"], str): - raise TypeError("$uuid must be a string: %s" % (doc,)) + raise TypeError(f"$uuid must be a string: {doc}") if json_options.uuid_representation == UuidRepresentation.UNSPECIFIED: return Binary.from_uuid(uuid.UUID(doc["$uuid"])) else: @@ -584,42 +598,47 @@ def _parse_canonical_binary(doc: Any, json_options: JSONOptions) -> Union[Binary b64 = binary["base64"] subtype = binary["subType"] if not isinstance(b64, str): - raise TypeError("$binary base64 must be a string: %s" % (doc,)) + raise TypeError(f"$binary base64 must be a string: {doc}") if not isinstance(subtype, str) or len(subtype) > 2: - raise TypeError("$binary subType must be a string at most 2 characters: %s" % (doc,)) + raise TypeError(f"$binary subType must be a string at most 2 characters: {doc}") if len(binary) != 2: - raise TypeError('$binary must include only "base64" and "subType" components: %s' % (doc,)) + raise TypeError(f'$binary must include only "base64" and "subType" components: {doc}') data = base64.b64decode(b64.encode()) return _binary_or_uuid(data, int(subtype, 16), json_options) -def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.datetime: +def _parse_canonical_datetime( + doc: Any, json_options: JSONOptions +) -> Union[datetime.datetime, DatetimeMS]: """Decode a JSON datetime to python datetime.datetime.""" dtm = doc["$date"] if len(doc) != 1: - raise TypeError("Bad $date, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $date, extra field(s): {doc}") # mongoexport 2.6 and newer if isinstance(dtm, str): - # Parse offset - if dtm[-1] == "Z": - dt = dtm[:-1] - offset = "Z" - elif dtm[-6] in ("+", "-") and dtm[-3] == ":": - # (+|-)HH:MM - dt = dtm[:-6] - offset = dtm[-6:] - elif dtm[-5] in ("+", "-"): - # (+|-)HHMM - dt = dtm[:-5] - offset = dtm[-5:] - elif dtm[-3] in ("+", "-"): - # (+|-)HH - dt = dtm[:-3] - offset = dtm[-3:] - else: - dt = dtm - offset = "" + try: + # Parse offset + if dtm[-1] == "Z": + dt = dtm[:-1] + offset = "Z" + elif dtm[-6] in ("+", "-") and dtm[-3] == ":": + # (+|-)HH:MM + dt = dtm[:-6] + offset = dtm[-6:] + elif dtm[-5] in ("+", "-"): + # (+|-)HHMM + dt = dtm[:-5] + offset = dtm[-5:] + elif dtm[-3] in ("+", "-"): + # (+|-)HH + dt = dtm[:-3] + offset = dtm[-3:] + else: + dt = dtm + offset = "" + except IndexError as exc: + raise ValueError(f"time data {dtm!r} does not match ISO-8601 datetime format") from exc # Parse the optional factional seconds portion. dot_index = dt.rfind(".") @@ -647,44 +666,48 @@ def _parse_canonical_datetime(doc: Any, json_options: JSONOptions) -> datetime.d if json_options.tz_aware: if json_options.tzinfo: aware = aware.astimezone(json_options.tzinfo) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware) return aware else: - return aware.replace(tzinfo=None) - return bson._millis_to_datetime(int(dtm), json_options) + aware_tzinfo_none = aware.replace(tzinfo=None) + if json_options.datetime_conversion == DatetimeConversion.DATETIME_MS: + return DatetimeMS(aware_tzinfo_none) + return aware_tzinfo_none + return _millis_to_datetime(int(dtm), cast("CodecOptions[Any]", json_options)) -def _parse_canonical_oid(doc: Any) -> ObjectId: +def _parse_canonical_oid(doc: Any, dummy0: Any) -> ObjectId: """Decode a JSON ObjectId to bson.objectid.ObjectId.""" if len(doc) != 1: - raise TypeError("Bad $oid, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $oid, extra field(s): {doc}") return ObjectId(doc["$oid"]) -def _parse_canonical_symbol(doc: Any) -> str: +def _parse_canonical_symbol(doc: Any, dummy0: Any) -> str: """Decode a JSON symbol to Python string.""" symbol = doc["$symbol"] if len(doc) != 1: - raise TypeError("Bad $symbol, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $symbol, extra field(s): {doc}") return str(symbol) -def _parse_canonical_code(doc: Any) -> Code: +def _parse_canonical_code(doc: Any, dummy0: Any) -> Code: """Decode a JSON code to bson.code.Code.""" for key in doc: if key not in ("$code", "$scope"): - raise TypeError("Bad $code, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $code, extra field(s): {doc}") return Code(doc["$code"], scope=doc.get("$scope")) -def _parse_canonical_regex(doc: Any) -> Regex: +def _parse_canonical_regex(doc: Any, dummy0: Any) -> Regex[str]: """Decode a JSON regex to bson.regex.Regex.""" regex = doc["$regularExpression"] if len(doc) != 1: - raise TypeError("Bad $regularExpression, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $regularExpression, extra field(s): {doc}") if len(regex) != 2: raise TypeError( - 'Bad $regularExpression must include only "pattern"' - 'and "options" components: %s' % (doc,) + f'Bad $regularExpression must include only "pattern and "options" components: {doc}' ) opts = regex["options"] if not isinstance(opts, str): @@ -694,174 +717,195 @@ def _parse_canonical_regex(doc: Any) -> Regex: return Regex(regex["pattern"], opts) -def _parse_canonical_dbref(doc: Any) -> DBRef: +def _parse_canonical_dbref(doc: Any, dummy0: Any) -> Any: """Decode a JSON DBRef to bson.dbref.DBRef.""" - return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) + if ( + isinstance(doc.get("$ref"), str) + and "$id" in doc + and isinstance(doc.get("$db"), (str, type(None))) + ): + return DBRef(doc.pop("$ref"), doc.pop("$id"), database=doc.pop("$db", None), **doc) + return doc -def _parse_canonical_dbpointer(doc: Any) -> Any: +def _parse_canonical_dbpointer(doc: Any, dummy0: Any) -> Any: """Decode a JSON (deprecated) DBPointer to bson.dbref.DBRef.""" dbref = doc["$dbPointer"] if len(doc) != 1: - raise TypeError("Bad $dbPointer, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s): {doc}") if isinstance(dbref, DBRef): dbref_doc = dbref.as_doc() # DBPointer must not contain $db in its value. if dbref.database is not None: - raise TypeError("Bad $dbPointer, extra field $db: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field $db: {dbref_doc}") if not isinstance(dbref.id, ObjectId): - raise TypeError("Bad $dbPointer, $id must be an ObjectId: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, $id must be an ObjectId: {dbref_doc}") if len(dbref_doc) != 2: - raise TypeError("Bad $dbPointer, extra field(s) in DBRef: %s" % (dbref_doc,)) + raise TypeError(f"Bad $dbPointer, extra field(s) in DBRef: {dbref_doc}") return dbref else: - raise TypeError("Bad $dbPointer, expected a DBRef: %s" % (doc,)) + raise TypeError(f"Bad $dbPointer, expected a DBRef: {doc}") -def _parse_canonical_int32(doc: Any) -> int: +def _parse_canonical_int32(doc: Any, dummy0: Any) -> int: """Decode a JSON int32 to python int.""" i_str = doc["$numberInt"] if len(doc) != 1: - raise TypeError("Bad $numberInt, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberInt, extra field(s): {doc}") if not isinstance(i_str, str): - raise TypeError("$numberInt must be string: %s" % (doc,)) + raise TypeError(f"$numberInt must be string: {doc}") return int(i_str) -def _parse_canonical_int64(doc: Any) -> Int64: +def _parse_canonical_int64(doc: Any, dummy0: Any) -> Int64: """Decode a JSON int64 to bson.int64.Int64.""" l_str = doc["$numberLong"] if len(doc) != 1: - raise TypeError("Bad $numberLong, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberLong, extra field(s): {doc}") return Int64(l_str) -def _parse_canonical_double(doc: Any) -> float: +def _parse_canonical_double(doc: Any, dummy0: Any) -> float: """Decode a JSON double to python float.""" d_str = doc["$numberDouble"] if len(doc) != 1: - raise TypeError("Bad $numberDouble, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDouble, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDouble must be string: %s" % (doc,)) + raise TypeError(f"$numberDouble must be string: {doc}") return float(d_str) -def _parse_canonical_decimal128(doc: Any) -> Decimal128: +def _parse_canonical_decimal128(doc: Any, dummy0: Any) -> Decimal128: """Decode a JSON decimal128 to bson.decimal128.Decimal128.""" d_str = doc["$numberDecimal"] if len(doc) != 1: - raise TypeError("Bad $numberDecimal, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $numberDecimal, extra field(s): {doc}") if not isinstance(d_str, str): - raise TypeError("$numberDecimal must be string: %s" % (doc,)) + raise TypeError(f"$numberDecimal must be string: {doc}") return Decimal128(d_str) -def _parse_canonical_minkey(doc: Any) -> MinKey: +def _parse_canonical_minkey(doc: Any, dummy0: Any) -> MinKey: """Decode a JSON MinKey to bson.min_key.MinKey.""" - if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: - raise TypeError("$minKey value must be 1: %s" % (doc,)) + if type(doc["$minKey"]) is not int or doc["$minKey"] != 1: # noqa: E721 + raise TypeError(f"$minKey value must be 1: {doc}") if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MinKey() -def _parse_canonical_maxkey(doc: Any) -> MaxKey: +def _parse_canonical_maxkey(doc: Any, dummy0: Any) -> MaxKey: """Decode a JSON MaxKey to bson.max_key.MaxKey.""" - if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: + if type(doc["$maxKey"]) is not int or doc["$maxKey"] != 1: # noqa: E721 raise TypeError("$maxKey value must be 1: %s", (doc,)) if len(doc) != 1: - raise TypeError("Bad $minKey, extra field(s): %s" % (doc,)) + raise TypeError(f"Bad $minKey, extra field(s): {doc}") return MaxKey() +def _parse_binary(doc: Any, json_options: JSONOptions) -> Union[Binary, uuid.UUID]: + if "$type" in doc: + return _parse_legacy_binary(doc, json_options) + else: + return _parse_canonical_binary(doc, json_options) + + +def _parse_timestamp(doc: Any, dummy0: Any) -> Timestamp: + tsp = doc["$timestamp"] + return Timestamp(tsp["t"], tsp["i"]) + + +_PARSERS: dict[str, Callable[[Any, JSONOptions], Any]] = { + "$oid": _parse_canonical_oid, + "$ref": _parse_canonical_dbref, + "$date": _parse_canonical_datetime, + "$regex": _parse_legacy_regex, + "$minKey": _parse_canonical_minkey, + "$maxKey": _parse_canonical_maxkey, + "$binary": _parse_binary, + "$code": _parse_canonical_code, + "$uuid": _parse_legacy_uuid, + "$undefined": lambda _, _1: None, + "$numberLong": _parse_canonical_int64, + "$timestamp": _parse_timestamp, + "$numberDecimal": _parse_canonical_decimal128, + "$dbPointer": _parse_canonical_dbpointer, + "$regularExpression": _parse_canonical_regex, + "$symbol": _parse_canonical_symbol, + "$numberInt": _parse_canonical_int32, + "$numberDouble": _parse_canonical_double, +} +_PARSERS_SET = set(_PARSERS) + + def _encode_binary(data: bytes, subtype: int, json_options: JSONOptions) -> Any: if json_options.json_mode == JSONMode.LEGACY: - return SON([("$binary", base64.b64encode(data).decode()), ("$type", "%02x" % subtype)]) - return { - "$binary": SON([("base64", base64.b64encode(data).decode()), ("subType", "%02x" % subtype)]) - } + return {"$binary": base64.b64encode(data).decode(), "$type": "%02x" % subtype} + return {"$binary": {"base64": base64.b64encode(data).decode(), "subType": "%02x" % subtype}} -def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: - # We preserve key order when rendering SON, DBRef, etc. as JSON by - # returning a SON for those types instead of a dict. - if isinstance(obj, ObjectId): - return {"$oid": str(obj)} - if isinstance(obj, DBRef): - return _json_convert(obj.as_doc(), json_options=json_options) - if isinstance(obj, datetime.datetime): - if json_options.datetime_representation == DatetimeRepresentation.ISO8601: - if not obj.tzinfo: - obj = obj.replace(tzinfo=utc) - assert obj.tzinfo is not None - if obj >= EPOCH_AWARE: - off = obj.tzinfo.utcoffset(obj) - if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore - tz_string = "Z" - else: - tz_string = obj.strftime("%z") - millis = int(obj.microsecond / 1000) - fracsecs = ".%03d" % (millis,) if millis else "" - return { - "$date": "%s%s%s" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) - } - - millis = bson._datetime_to_millis(obj) - if json_options.datetime_representation == DatetimeRepresentation.LEGACY: - return {"$date": millis} - return {"$date": {"$numberLong": str(millis)}} - if json_options.strict_number_long and isinstance(obj, Int64): +def _encode_datetimems(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if ( + json_options.datetime_representation == DatetimeRepresentation.ISO8601 + and 0 <= int(obj) <= _MAX_UTC_MS + ): + return _encode_datetime(obj.as_datetime(), json_options) + elif json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": int(obj)} + return {"$date": {"$numberLong": str(int(obj))}} + + +def _encode_code(obj: Code, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if obj.scope is None: + return {"$code": str(obj)} + else: + return {"$code": str(obj), "$scope": _json_convert(obj.scope, json_options)} + + +def _encode_int64(obj: Int64, json_options: JSONOptions) -> Any: + if json_options.strict_number_long: return {"$numberLong": str(obj)} - if isinstance(obj, (RE_TYPE, Regex)): - flags = "" - if obj.flags & re.IGNORECASE: - flags += "i" - if obj.flags & re.LOCALE: - flags += "l" - if obj.flags & re.MULTILINE: - flags += "m" - if obj.flags & re.DOTALL: - flags += "s" - if obj.flags & re.UNICODE: - flags += "u" - if obj.flags & re.VERBOSE: - flags += "x" - if isinstance(obj.pattern, str): - pattern = obj.pattern - else: - pattern = obj.pattern.decode("utf-8") - if json_options.json_mode == JSONMode.LEGACY: - return SON([("$regex", pattern), ("$options", flags)]) - return {"$regularExpression": SON([("pattern", pattern), ("options", flags)])} - if isinstance(obj, MinKey): - return {"$minKey": 1} - if isinstance(obj, MaxKey): - return {"$maxKey": 1} - if isinstance(obj, Timestamp): - return {"$timestamp": SON([("t", obj.time), ("i", obj.inc)])} - if isinstance(obj, Code): - if obj.scope is None: - return {"$code": str(obj)} - return SON([("$code", str(obj)), ("$scope", _json_convert(obj.scope, json_options))]) - if isinstance(obj, Binary): - return _encode_binary(obj, obj.subtype, json_options) - if isinstance(obj, bytes): - return _encode_binary(obj, 0, json_options) - if isinstance(obj, uuid.UUID): - if json_options.strict_uuid: - binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) - return _encode_binary(binval, binval.subtype, json_options) - else: - return {"$uuid": obj.hex} - if isinstance(obj, Decimal128): - return {"$numberDecimal": str(obj)} - if isinstance(obj, bool): - return obj - if json_options.json_mode == JSONMode.CANONICAL and isinstance(obj, int): - if -(2**31) <= obj < 2**31: + else: + return int(obj) + + +def _encode_noop(obj: Any, dummy0: Any) -> Any: + return obj + + +def _encode_regex(obj: Any, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + flags = "" + if obj.flags & re.IGNORECASE: + flags += "i" + if obj.flags & re.LOCALE: + flags += "l" + if obj.flags & re.MULTILINE: + flags += "m" + if obj.flags & re.DOTALL: + flags += "s" + if obj.flags & re.UNICODE: + flags += "u" + if obj.flags & re.VERBOSE: + flags += "x" + if isinstance(obj.pattern, str): + pattern = obj.pattern + else: + pattern = obj.pattern.decode("utf-8") + if json_options.json_mode == JSONMode.LEGACY: + return {"$regex": pattern, "$options": flags} + return {"$regularExpression": {"pattern": pattern, "options": flags}} + + +def _encode_int(obj: int, json_options: JSONOptions) -> Any: + if json_options.json_mode == JSONMode.CANONICAL: + if -_INT32_MAX <= obj < _INT32_MAX: return {"$numberInt": str(obj)} return {"$numberLong": str(obj)} - if json_options.json_mode != JSONMode.LEGACY and isinstance(obj, float): + return obj + + +def _encode_float(obj: float, json_options: JSONOptions) -> Any: + if json_options.json_mode != JSONMode.LEGACY: if math.isnan(obj): return {"$numberDouble": "NaN"} elif math.isinf(obj): @@ -871,4 +915,250 @@ def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: # repr() will return the shortest string guaranteed to produce the # original value, when float() is called on it. return {"$numberDouble": str(repr(obj))} + return obj + + +def _encode_datetime(obj: datetime.datetime, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if json_options.datetime_representation == DatetimeRepresentation.ISO8601: + if not obj.tzinfo: + obj = obj.replace(tzinfo=utc) + assert obj.tzinfo is not None + if obj >= EPOCH_AWARE: + off = obj.tzinfo.utcoffset(obj) + if (off.days, off.seconds, off.microseconds) == (0, 0, 0): # type: ignore + tz_string = "Z" + else: + tz_string = obj.strftime("%z") + millis = int(obj.microsecond / 1000) + fracsecs = ".%03d" % (millis,) if millis else "" + return { + "$date": "{}{}{}".format(obj.strftime("%Y-%m-%dT%H:%M:%S"), fracsecs, tz_string) + } + + millis = _datetime_to_millis(obj) + if json_options.datetime_representation == DatetimeRepresentation.LEGACY: + return {"$date": millis} + return {"$date": {"$numberLong": str(millis)}} + + +def _encode_bytes(obj: bytes, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _encode_binary(obj, 0, json_options) + + +def _encode_binary_obj(obj: Binary, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _encode_binary(obj, obj.subtype, json_options) + + +def _encode_uuid(obj: uuid.UUID, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + if json_options.strict_uuid: + binval = Binary.from_uuid(obj, uuid_representation=json_options.uuid_representation) + return _encode_binary(binval, binval.subtype, json_options) + else: + return {"$uuid": obj.hex} + + +def _encode_objectid(obj: ObjectId, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$oid": str(obj)} + + +def _encode_timestamp(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$timestamp": {"t": obj.time, "i": obj.inc}} + + +def _encode_decimal128(obj: Timestamp, dummy0: Any) -> dict: # type: ignore[type-arg] + return {"$numberDecimal": str(obj)} + + +def _encode_dbref(obj: DBRef, json_options: JSONOptions) -> dict: # type: ignore[type-arg] + return _json_convert(obj.as_doc(), json_options=json_options) + + +def _encode_minkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] + return {"$minKey": 1} + + +def _encode_maxkey(dummy0: Any, dummy1: Any) -> dict: # type: ignore[type-arg] + return {"$maxKey": 1} + + +# Encoders for BSON types +# Each encoder function's signature is: +# - obj: a Python data type, e.g. a Python int for _encode_int +# - json_options: a JSONOptions +_ENCODERS: dict[Type, Callable[[Any, JSONOptions], Any]] = { # type: ignore[type-arg] + bool: _encode_noop, + bytes: _encode_bytes, + datetime.datetime: _encode_datetime, + DatetimeMS: _encode_datetimems, + float: _encode_float, + int: _encode_int, + str: _encode_noop, + type(None): _encode_noop, + uuid.UUID: _encode_uuid, + Binary: _encode_binary_obj, + Int64: _encode_int64, + Code: _encode_code, + DBRef: _encode_dbref, + MaxKey: _encode_maxkey, + MinKey: _encode_minkey, + ObjectId: _encode_objectid, + Regex: _encode_regex, + RE_TYPE: _encode_regex, + Timestamp: _encode_timestamp, + Decimal128: _encode_decimal128, +} + +# Map each _type_marker to its encoder for faster lookup. +_MARKERS: dict[int, Callable[[Any, JSONOptions], Any]] = {} +for _typ in _ENCODERS: + if hasattr(_typ, "_type_marker"): + _MARKERS[_typ._type_marker] = _ENCODERS[_typ] + +_BUILT_IN_TYPES = tuple(t for t in _ENCODERS) + + +def default(obj: Any, json_options: JSONOptions = DEFAULT_JSON_OPTIONS) -> Any: + # First see if the type is already cached. KeyError will only ever + # happen once per subtype. + try: + return _ENCODERS[type(obj)](obj, json_options) + except KeyError: + pass + + # Second, fall back to trying _type_marker. This has to be done + # before the loop below since users could subclass one of our + # custom types that subclasses a python built-in (e.g. Binary) + if hasattr(obj, "_type_marker"): + marker = obj._type_marker + if marker in _MARKERS: + func = _MARKERS[marker] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + + # Third, test each base type. This will only happen once for + # a subtype of a supported base type. + for base in _BUILT_IN_TYPES: + if isinstance(obj, base): + func = _ENCODERS[base] + # Cache this type for faster subsequent lookup. + _ENCODERS[type(obj)] = func + return func(obj, json_options) + raise TypeError("%r is not JSON serializable" % obj) + + +def _get_str_size(obj: Any) -> int: + return len(obj) + + +def _get_datetime_size(obj: datetime.datetime) -> int: + return 5 + len(str(obj.time())) + + +def _get_regex_size(obj: Regex) -> int: # type: ignore[type-arg] + return 18 + len(obj.pattern) + + +def _get_dbref_size(obj: DBRef) -> int: + return 34 + len(obj.collection) + + +_CONSTANT_SIZE_TABLE: dict[Any, int] = { + ObjectId: 28, + int: 11, + Int64: 11, + Decimal128: 11, + Timestamp: 14, + MinKey: 8, + MaxKey: 8, +} + +_VARIABLE_SIZE_TABLE: dict[Any, Callable[[Any], int]] = { + str: _get_str_size, + bytes: _get_str_size, + datetime.datetime: _get_datetime_size, + Regex: _get_regex_size, + DBRef: _get_dbref_size, +} + + +def get_size(obj: Any, max_size: int, current_size: int = 0) -> int: + """Recursively finds size of objects""" + if current_size >= max_size: + return current_size + + obj_type = type(obj) + + # Check to see if the obj has a constant size estimate + try: + return _CONSTANT_SIZE_TABLE[obj_type] + except KeyError: + pass + + # Check to see if the obj has a variable but simple size estimate + try: + return _VARIABLE_SIZE_TABLE[obj_type](obj) + except KeyError: + pass + + # Special cases that require recursion + if obj_type == Code: + if obj.scope: + current_size += ( + 5 + get_size(obj.scope, max_size, current_size) + len(obj) - len(obj.scope) + ) + else: + current_size += 5 + len(obj) + elif obj_type == dict: + for k, v in obj.items(): + current_size += get_size(k, max_size, current_size) + current_size += get_size(v, max_size, current_size) + if current_size >= max_size: + return current_size + elif hasattr(obj, "__iter__"): + for i in obj: + current_size += get_size(i, max_size, current_size) + if current_size >= max_size: + return current_size + return current_size + + +def _truncate_documents(obj: Any, max_length: int) -> Tuple[Any, int]: + """Recursively truncate documents as needed to fit inside max_length characters.""" + if max_length <= 0: + return None, 0 + remaining = max_length + if hasattr(obj, "items"): + truncated: Any = {} + for k, v in obj.items(): + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated[k] = truncated_v + if remaining <= 0: + break + return truncated, remaining + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes)): + truncated: Any = [] # type:ignore[no-redef] + for v in obj: + truncated_v, remaining = _truncate_documents(v, remaining) + if truncated_v: + truncated.append(truncated_v) + if remaining <= 0: + break + return truncated, remaining + else: + return _truncate(obj, remaining) + + +def _truncate(obj: Any, remaining: int) -> Tuple[Any, int]: + size = get_size(obj, remaining) + + if size <= remaining: + return obj, remaining - size + else: + try: + truncated = obj[:remaining] + except TypeError: + truncated = obj + return truncated, remaining - size diff --git a/bson/max_key.py b/bson/max_key.py index b4f38d072e..445e12f519 100644 --- a/bson/max_key.py +++ b/bson/max_key.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MaxKey type. -""" +"""Representation for the MongoDB internal MaxKey type.""" +from __future__ import annotations + from typing import Any -class MaxKey(object): +class MaxKey: """MongoDB internal MaxKey type.""" __slots__ = () @@ -51,5 +52,5 @@ def __ge__(self, dummy: Any) -> bool: def __gt__(self, other: Any) -> bool: return not isinstance(other, MaxKey) - def __repr__(self): + def __repr__(self) -> str: return "MaxKey()" diff --git a/bson/min_key.py b/bson/min_key.py index babc655e43..37828dcf74 100644 --- a/bson/min_key.py +++ b/bson/min_key.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Representation for the MongoDB internal MinKey type. -""" +"""Representation for the MongoDB internal MinKey type.""" +from __future__ import annotations + from typing import Any -class MinKey(object): +class MinKey: """MongoDB internal MinKey type.""" __slots__ = () @@ -51,5 +52,5 @@ def __ge__(self, other: Any) -> bool: def __gt__(self, dummy: Any) -> bool: return False - def __repr__(self): + def __repr__(self) -> str: return "MinKey()" diff --git a/bson/objectid.py b/bson/objectid.py index 4bc0243532..970c4e52e8 100644 --- a/bson/objectid.py +++ b/bson/objectid.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with MongoDB ObjectIds. -""" +"""Tools for working with MongoDB ObjectIds.""" +from __future__ import annotations import binascii -import calendar import datetime import os import struct @@ -25,10 +24,14 @@ from random import SystemRandom from typing import Any, NoReturn, Optional, Type, Union +from bson.datetime_ms import _datetime_to_millis from bson.errors import InvalidId from bson.tz_util import utc _MAX_COUNTER_VALUE = 0xFFFFFF +_PACK_INT = struct.Struct(">I").pack +_PACK_INT_RANDOM = struct.Struct(">I5s").pack +_UNPACK_INT = struct.Struct(">I").unpack def _raise_invalid_id(oid: str) -> NoReturn: @@ -43,7 +46,7 @@ def _random_bytes() -> bytes: return os.urandom(5) -class ObjectId(object): +class ObjectId: """A MongoDB ObjectId.""" _pid = os.getpid() @@ -57,7 +60,7 @@ class ObjectId(object): _type_marker = 7 - def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: + def __init__(self, oid: Optional[Union[str, ObjectId, bytes]] = None) -> None: """Initialize a new ObjectId. An ObjectId is a 12-byte unique identifier consisting of: @@ -84,8 +87,7 @@ def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: Raises :class:`~bson.errors.InvalidId` if `oid` is not 12 bytes nor 24 hex digits, or :class:`TypeError` if `oid` is not an accepted type. - :Parameters: - - `oid` (optional): a valid ObjectId. + :param oid: a valid ObjectId. .. seealso:: The MongoDB documentation on `ObjectIds `_. @@ -103,7 +105,7 @@ def __init__(self, oid: Optional[Union[str, "ObjectId", bytes]] = None) -> None: self.__validate(oid) @classmethod - def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> "ObjectId": + def from_datetime(cls: Type[ObjectId], generation_time: datetime.datetime) -> ObjectId: """Create a dummy ObjectId instance with a specific generation time. This method is useful for doing range queries on a field @@ -126,23 +128,20 @@ def from_datetime(cls: Type["ObjectId"], generation_time: datetime.datetime) -> >>> dummy_id = ObjectId.from_datetime(gen_time) >>> result = collection.find({"_id": {"$lt": dummy_id}}) - :Parameters: - - `generation_time`: :class:`~datetime.datetime` to be used + :param generation_time: :class:`~datetime.datetime` to be used as the generation time for the resulting ObjectId. """ - offset = generation_time.utcoffset() - if offset is not None: - generation_time = generation_time - offset - timestamp = calendar.timegm(generation_time.timetuple()) - oid = struct.pack(">I", int(timestamp)) + b"\x00\x00\x00\x00\x00\x00\x00\x00" + oid = ( + _PACK_INT(_datetime_to_millis(generation_time) // 1000) + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + ) return cls(oid) @classmethod - def is_valid(cls: Type["ObjectId"], oid: Any) -> bool: + def is_valid(cls: Type[ObjectId], oid: Any) -> bool: """Checks if a `oid` string is valid or not. - :Parameters: - - `oid`: the object id to validate + :param oid: the object id to validate .. versionadded:: 2.3 """ @@ -166,30 +165,21 @@ def _random(cls) -> bytes: def __generate(self) -> None: """Generate a new value for this ObjectId.""" - - # 4 bytes current time - oid = struct.pack(">I", int(time.time())) - - # 5 bytes random - oid += ObjectId._random() - - # 3 bytes inc with ObjectId._inc_lock: - oid += struct.pack(">I", ObjectId._inc)[1:4] - ObjectId._inc = (ObjectId._inc + 1) % (_MAX_COUNTER_VALUE + 1) + inc = ObjectId._inc + ObjectId._inc = (inc + 1) % (_MAX_COUNTER_VALUE + 1) - self.__id = oid + # 4 bytes current time, 5 bytes random, 3 bytes inc. + self.__id = _PACK_INT_RANDOM(int(time.time()), ObjectId._random()) + _PACK_INT(inc)[1:4] def __validate(self, oid: Any) -> None: """Validate and use the given id for this ObjectId. - Raises TypeError if id is not an instance of - (:class:`basestring` (:class:`str` or :class:`bytes` - in python 3), ObjectId) and InvalidId if it is not a + Raises TypeError if id is not an instance of :class:`str`, + :class:`bytes`, or ObjectId. Raises InvalidId if it is not a valid ObjectId. - :Parameters: - - `oid`: a valid ObjectId + :param oid: a valid ObjectId """ if isinstance(oid, ObjectId): self.__id = oid.binary @@ -202,9 +192,7 @@ def __validate(self, oid: Any) -> None: else: _raise_invalid_id(oid) else: - raise TypeError( - "id must be an instance of (bytes, str, ObjectId), not %s" % (type(oid),) - ) + raise TypeError(f"id must be an instance of (bytes, str, ObjectId), not {type(oid)}") @property def binary(self) -> bytes: @@ -220,18 +208,18 @@ def generation_time(self) -> datetime.datetime: represents the generation time in UTC. It is precise to the second. """ - timestamp = struct.unpack(">I", self.__id[0:4])[0] + timestamp = _UNPACK_INT(self.__id[0:4])[0] return datetime.datetime.fromtimestamp(timestamp, utc) def __getstate__(self) -> bytes: - """return value of object for pickling. + """Return value of object for pickling. needed explicitly because __slots__() defined. """ return self.__id def __setstate__(self, value: Any) -> None: - """explicit state set from pickling""" - # Provide backwards compatability with OIDs + """Explicit state set from pickling""" + # Provide backwards compatibility with OIDs # pickled with pymongo-1.9 or older. if isinstance(value, dict): oid = value["_ObjectId__id"] @@ -248,8 +236,8 @@ def __setstate__(self, value: Any) -> None: def __str__(self) -> str: return binascii.hexlify(self.__id).decode() - def __repr__(self): - return "ObjectId('%s')" % (str(self),) + def __repr__(self) -> str: + return f"ObjectId('{self!s}')" def __eq__(self, other: Any) -> bool: if isinstance(other, ObjectId): diff --git a/bson/raw_bson.py b/bson/raw_bson.py index ca7207f0a2..9ead0765dc 100644 --- a/bson/raw_bson.py +++ b/bson/raw_bson.py @@ -25,18 +25,18 @@ >>> from pymongo import MongoClient >>> from bson.raw_bson import RawBSONDocument >>> client = MongoClient(document_class=RawBSONDocument) - >>> client.drop_database('db') - >>> client.drop_database('replica_db') + >>> client.drop_database("db") + >>> client.drop_database("replica_db") >>> db = client.db - >>> result = db.test.insert_many([{'_id': 1, 'a': 1}, - ... {'_id': 2, 'b': 1}, - ... {'_id': 3, 'c': 1}, - ... {'_id': 4, 'd': 1}]) + >>> result = db.test.insert_many( + ... [{"_id": 1, "a": 1}, {"_id": 2, "b": 1}, {"_id": 3, "c": 1}, {"_id": 4, "d": 1}] + ... ) >>> replica_db = client.replica_db >>> for doc in db.test.find(): - ... print(f"raw document: {doc.raw}") - ... print(f"decoded document: {bson.decode(doc.raw)}") - ... result = replica_db.test.insert_one(doc) + ... print(f"raw document: {doc.raw}") + ... print(f"decoded document: {bson.decode(doc.raw)}") + ... result = replica_db.test.insert_one(doc) + ... raw document: b'...' decoded document: {'_id': 1, 'a': 1} raw document: b'...' @@ -50,14 +50,28 @@ blobs to disk, using raw BSON documents provides better speed and avoids the overhead of decoding or encoding BSON. """ +from __future__ import annotations from typing import Any, ItemsView, Iterator, Mapping, Optional from bson import _get_object_size, _raw_to_dict -from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER +from bson.codec_options import _RAW_BSON_DOCUMENT_MARKER, CodecOptions from bson.codec_options import DEFAULT_CODEC_OPTIONS as DEFAULT -from bson.codec_options import CodecOptions -from bson.son import SON + + +def _inflate_bson( + bson_bytes: bytes | memoryview, + codec_options: CodecOptions[RawBSONDocument], + raw_array: bool = False, +) -> dict[str, Any]: + """Inflates the top level fields of a BSON document. + + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` whose ``document_class`` + must be :class:`RawBSONDocument`. + """ + return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, {}, raw_array=raw_array) class RawBSONDocument(Mapping[str, Any]): @@ -70,8 +84,13 @@ class RawBSONDocument(Mapping[str, Any]): __slots__ = ("__raw", "__inflated_doc", "__codec_options") _type_marker = _RAW_BSON_DOCUMENT_MARKER + __codec_options: CodecOptions[RawBSONDocument] - def __init__(self, bson_bytes: bytes, codec_options: Optional[CodecOptions] = None) -> None: + def __init__( + self, + bson_bytes: bytes | memoryview, + codec_options: Optional[CodecOptions[RawBSONDocument]] = None, + ) -> None: """Create a new :class:`RawBSONDocument` :class:`RawBSONDocument` is a representation of a BSON document that @@ -90,9 +109,8 @@ class from the standard library so it can be used like a read-only >>> raw_doc['_id'] 'my_doc' - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options` (optional): An instance of + :param bson_bytes: the BSON bytes that compose this document + :param codec_options: An instance of :class:`~bson.codec_options.CodecOptions` whose ``document_class`` must be :class:`RawBSONDocument`. The default is :attr:`DEFAULT_RAW_BSON_OPTIONS`. @@ -111,17 +129,17 @@ class from the standard library so it can be used like a read-only # it refers to this class RawBSONDocument. if codec_options is None: codec_options = DEFAULT_RAW_BSON_OPTIONS - elif codec_options.document_class is not RawBSONDocument: + elif not issubclass(codec_options.document_class, RawBSONDocument): raise TypeError( "RawBSONDocument cannot use CodecOptions with document " - "class %s" % (codec_options.document_class,) + f"class {codec_options.document_class}" ) self.__codec_options = codec_options # Validate the bson object size. _get_object_size(bson_bytes, 0, len(bson_bytes)) @property - def raw(self) -> bytes: + def raw(self) -> bytes | memoryview: """The raw BSON bytes composing this document.""" return self.__raw @@ -134,10 +152,15 @@ def __inflated(self) -> Mapping[str, Any]: if self.__inflated_doc is None: # We already validated the object's size when this document was # created, so no need to do that again. - # Use SON to preserve ordering of elements. - self.__inflated_doc = _inflate_bson(self.__raw, self.__codec_options) + self.__inflated_doc = self._inflate_bson(self.__raw, self.__codec_options) return self.__inflated_doc + @staticmethod + def _inflate_bson( + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options) + def __getitem__(self, item: str) -> Any: return self.__inflated[item] @@ -152,24 +175,26 @@ def __eq__(self, other: Any) -> bool: return self.__raw == other.raw return NotImplemented - def __repr__(self): - return "RawBSONDocument(%r, codec_options=%r)" % (self.raw, self.__codec_options) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.raw!r}, codec_options={self.__codec_options!r})" -def _inflate_bson(bson_bytes: bytes, codec_options: CodecOptions) -> Mapping[Any, Any]: - """Inflates the top level fields of a BSON document. +class _RawArrayBSONDocument(RawBSONDocument): + """A RawBSONDocument that only expands sub-documents and arrays when accessed.""" - :Parameters: - - `bson_bytes`: the BSON bytes that compose this document - - `codec_options`: An instance of - :class:`~bson.codec_options.CodecOptions` whose ``document_class`` - must be :class:`RawBSONDocument`. - """ - # Use SON to preserve ordering of elements. - return _raw_to_dict(bson_bytes, 4, len(bson_bytes) - 1, codec_options, SON()) + @staticmethod + def _inflate_bson( + bson_bytes: bytes | memoryview, codec_options: CodecOptions[RawBSONDocument] + ) -> Mapping[str, Any]: + return _inflate_bson(bson_bytes, codec_options, raw_array=True) -DEFAULT_RAW_BSON_OPTIONS: CodecOptions = DEFAULT.with_options(document_class=RawBSONDocument) +DEFAULT_RAW_BSON_OPTIONS: CodecOptions[RawBSONDocument] = DEFAULT.with_options( + document_class=RawBSONDocument +) +_RAW_ARRAY_BSON_OPTIONS: CodecOptions[_RawArrayBSONDocument] = DEFAULT.with_options( + document_class=_RawArrayBSONDocument +) """The default :class:`~bson.codec_options.CodecOptions` for :class:`RawBSONDocument`. """ diff --git a/bson/regex.py b/bson/regex.py index 3e98477198..60cff4fd08 100644 --- a/bson/regex.py +++ b/bson/regex.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB regular expressions. -""" +"""Tools for representing MongoDB regular expressions.""" +from __future__ import annotations import re from typing import Any, Generic, Pattern, Type, TypeVar, Union @@ -54,7 +54,7 @@ class Regex(Generic[_T]): _type_marker = 11 @classmethod - def from_native(cls: Type["Regex"], regex: "Pattern[_T]") -> "Regex[_T]": + def from_native(cls: Type[Regex[Any]], regex: Pattern[_T]) -> Regex[_T]: """Convert a Python regular expression into a ``Regex`` instance. Note that in Python 3, a regular expression compiled from a @@ -66,8 +66,7 @@ def from_native(cls: Type["Regex"], regex: "Pattern[_T]") -> "Regex[_T]": >>> regex.flags ^= re.UNICODE >>> db.collection.insert_one({'pattern': regex}) - :Parameters: - - `regex`: A regular expression object from ``re.compile()``. + :param regex: A regular expression object from ``re.compile()``. .. warning:: Python regular expressions use a different syntax and different @@ -89,9 +88,8 @@ def __init__(self, pattern: _T, flags: Union[str, int] = 0) -> None: This class is useful to store and retrieve regular expressions that are incompatible with Python's regular expression dialect. - :Parameters: - - `pattern`: string - - `flags`: (optional) an integer bitmask, or a string of flag + :param pattern: string + :param flags: an integer bitmask, or a string of flag characters like "im" for IGNORECASE and MULTILINE """ if not isinstance(pattern, (str, bytes)): @@ -116,10 +114,10 @@ def __eq__(self, other: Any) -> bool: def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "Regex(%r, %r)" % (self.pattern, self.flags) + def __repr__(self) -> str: + return f"Regex({self.pattern!r}, {self.flags!r})" - def try_compile(self) -> "Pattern[_T]": + def try_compile(self) -> Pattern[_T]: """Compile this :class:`Regex` as a Python regular expression. .. warning:: diff --git a/bson/son.py b/bson/son.py index e4238b4058..8fd4f95cd2 100644 --- a/bson/son.py +++ b/bson/son.py @@ -16,7 +16,9 @@ Regular dictionaries can be used instead of SON objects, but not when the order of keys is important. A SON object can be used just like a normal Python -dictionary.""" +dictionary. +""" +from __future__ import annotations import copy import re @@ -26,7 +28,6 @@ Dict, Iterable, Iterator, - List, Mapping, Optional, Pattern, @@ -34,6 +35,7 @@ Type, TypeVar, Union, + cast, ) # This sort of sucks, but seems to be as good as it gets... @@ -53,27 +55,27 @@ class SON(Dict[_Key, _Value]): similar to collections.OrderedDict. """ - __keys: List[Any] + __keys: list[Any] def __init__( self, data: Optional[Union[Mapping[_Key, _Value], Iterable[Tuple[_Key, _Value]]]] = None, - **kwargs: Any + **kwargs: Any, ) -> None: self.__keys = [] dict.__init__(self) self.update(data) self.update(kwargs) - def __new__(cls: Type["SON[_Key, _Value]"], *args: Any, **kwargs: Any) -> "SON[_Key, _Value]": - instance = super(SON, cls).__new__(cls, *args, **kwargs) + def __new__(cls: Type[SON[_Key, _Value]], *args: Any, **kwargs: Any) -> SON[_Key, _Value]: + instance = super().__new__(cls, *args, **kwargs) instance.__keys = [] return instance - def __repr__(self): + def __repr__(self) -> str: result = [] for key in self.__keys: - result.append("(%r, %r)" % (key, self[key])) + result.append(f"({key!r}, {self[key]!r})") return "SON([%s])" % ", ".join(result) def __setitem__(self, key: _Key, value: _Value) -> None: @@ -85,7 +87,7 @@ def __delitem__(self, key: _Key) -> None: self.__keys.remove(key) dict.__delitem__(self, key) - def copy(self) -> "SON[_Key, _Value]": + def copy(self) -> SON[_Key, _Value]: other: SON[_Key, _Value] = SON() other.update(self) return other @@ -94,8 +96,7 @@ def copy(self) -> "SON[_Key, _Value]": # efficient. # second level definitions support higher levels def __iter__(self) -> Iterator[_Key]: - for k in self.__keys: - yield k + yield from self.__keys def has_key(self, key: _Key) -> bool: return key in self.__keys @@ -108,14 +109,14 @@ def itervalues(self) -> Iterator[_Value]: for _, v in self.items(): yield v - def values(self) -> List[_Value]: # type: ignore[override] + def values(self) -> list[_Value]: # type: ignore[override] return [v for _, v in self.items()] def clear(self) -> None: self.__keys = [] - super(SON, self).clear() + super().clear() - def setdefault(self, key: _Key, default: _Value) -> _Value: # type: ignore[override] + def setdefault(self, key: _Key, default: _Value) -> _Value: try: return self[key] except KeyError: @@ -138,11 +139,11 @@ def popitem(self) -> Tuple[_Key, _Value]: try: k, v = next(iter(self.items())) except StopIteration: - raise KeyError("container is empty") + raise KeyError("container is empty") from None del self[k] return (k, v) - def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type: ignore[override] + def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # Make progressively weaker assumptions about "other" if other is None: pass @@ -150,7 +151,7 @@ def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type for k, v in other.items(): self[k] = v elif hasattr(other, "keys"): - for k in other.keys(): + for k in other: self[k] = other[k] else: for k, v in other: @@ -158,7 +159,9 @@ def update(self, other: Optional[Any] = None, **kwargs: _Value) -> None: # type if kwargs: self.update(kwargs) - def get(self, key: _Key, default: Optional[Union[_Value, _T]] = None) -> Union[_Value, _T, None]: # type: ignore[override] + def get( # type: ignore[override] + self, key: _Key, default: Optional[Union[_Value, _T]] = None + ) -> Union[_Value, _T, None]: try: return self[key] except KeyError: @@ -170,7 +173,7 @@ def __eq__(self, other: Any) -> bool: """ if isinstance(other, SON): return len(self) == len(other) and list(self.items()) == list(other.items()) - return self.to_dict() == other + return cast(bool, self.to_dict() == other) def __ne__(self, other: Any) -> bool: return not self == other @@ -178,7 +181,7 @@ def __ne__(self, other: Any) -> bool: def __len__(self) -> int: return len(self.__keys) - def to_dict(self) -> Dict[_Key, _Value]: + def to_dict(self) -> dict[_Key, _Value]: """Convert a SON document to a normal Python dictionary instance. This is trickier than just *dict(...)* because it needs to be @@ -189,13 +192,13 @@ def transform_value(value: Any) -> Any: if isinstance(value, list): return [transform_value(v) for v in value] elif isinstance(value, _Mapping): - return dict([(k, transform_value(v)) for k, v in value.items()]) + return {k: transform_value(v) for k, v in value.items()} else: return value - return transform_value(dict(self)) + return cast("dict[_Key, _Value]", transform_value(dict(self))) - def __deepcopy__(self, memo: Dict[int, "SON[_Key, _Value]"]) -> "SON[_Key, _Value]": + def __deepcopy__(self, memo: dict[int, SON[_Key, _Value]]) -> SON[_Key, _Value]: out: SON[_Key, _Value] = SON() val_id = id(self) if val_id in memo: @@ -203,6 +206,6 @@ def __deepcopy__(self, memo: Dict[int, "SON[_Key, _Value]"]) -> "SON[_Key, _Valu memo[val_id] = out for k, v in self.items(): if not isinstance(v, RE_TYPE): - v = copy.deepcopy(v, memo) + v = copy.deepcopy(v, memo) # noqa: PLW2901 out[k] = v return out diff --git a/bson/time64.c b/bson/time64.c index 8d2886592e..a21fbb90bd 100644 --- a/bson/time64.c +++ b/bson/time64.c @@ -73,7 +73,7 @@ static const Year years_in_gregorian_cycle = 400; #define days_in_gregorian_cycle ((365 * 400) + 100 - 4 + 1) static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL; -/* Year range we can trust the time funcitons with */ +/* Year range we can trust the time functions with */ #define MAX_SAFE_YEAR 2037 #define MIN_SAFE_YEAR 1971 @@ -739,7 +739,7 @@ struct TM *cbson_localtime64_r (const Time64_T *time, struct TM *local_tm) /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st in a non-leap xx00. There is one point in the cycle we can't account for which the safe xx00 year is a leap - year. So we need to correct for Dec 31st comming out as + year. So we need to correct for Dec 31st coming out as the 366th day of the year. */ if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 ) diff --git a/bson/timestamp.py b/bson/timestamp.py index a333b9fa3e..949bd7b36c 100644 --- a/bson/timestamp.py +++ b/bson/timestamp.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing MongoDB internal Timestamps. -""" +"""Tools for representing MongoDB internal Timestamps.""" +from __future__ import annotations import calendar import datetime @@ -25,7 +25,7 @@ UPPERBOUND = 4294967296 -class Timestamp(object): +class Timestamp: """MongoDB internal timestamps used in the opLog.""" __slots__ = ("__time", "__inc") @@ -47,11 +47,10 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: an instance of :class:`int`. Raises :class:`ValueError` if `time` or `inc` is not in [0, 2**32). - :Parameters: - - `time`: time in seconds since epoch UTC, or a naive UTC + :param time: time in seconds since epoch UTC, or a naive UTC :class:`~datetime.datetime`, or an aware :class:`~datetime.datetime` - - `inc`: the incrementing counter + :param inc: the incrementing counter """ if isinstance(time, datetime.datetime): offset = time.utcoffset() @@ -59,9 +58,9 @@ def __init__(self, time: Union[datetime.datetime, int], inc: int) -> None: time = time - offset time = int(calendar.timegm(time.timetuple())) if not isinstance(time, int): - raise TypeError("time must be an instance of int") + raise TypeError(f"time must be an instance of int, not {type(time)}") if not isinstance(inc, int): - raise TypeError("inc must be an instance of int") + raise TypeError(f"inc must be an instance of int, not {type(inc)}") if not 0 <= time < UPPERBOUND: raise ValueError("time must be contained in [0, 2**32)") if not 0 <= inc < UPPERBOUND: @@ -112,8 +111,8 @@ def __ge__(self, other: Any) -> bool: return (self.time, self.inc) >= (other.time, other.inc) return NotImplemented - def __repr__(self): - return "Timestamp(%s, %s)" % (self.__time, self.__inc) + def __repr__(self) -> str: + return f"Timestamp({self.__time}, {self.__inc})" def as_datetime(self) -> datetime.datetime: """Return a :class:`~datetime.datetime` instance corresponding diff --git a/bson/typings.py b/bson/typings.py new file mode 100644 index 0000000000..5913860556 --- /dev/null +++ b/bson/typings.py @@ -0,0 +1,31 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Type aliases used by bson""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, TypeVar, Union + +if TYPE_CHECKING: + from array import array + from mmap import mmap + + from bson.raw_bson import RawBSONDocument + + +# Common Shared Types. +_DocumentOut = Union[MutableMapping[str, Any], "RawBSONDocument"] +_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +_DocumentTypeArg = TypeVar("_DocumentTypeArg", bound=Mapping[str, Any]) +_ReadableBuffer = Union[bytes, memoryview, bytearray, "mmap", "array"] # type: ignore[type-arg] diff --git a/bson/tz_util.py b/bson/tz_util.py index 8106c77b40..4d31c04f9c 100644 --- a/bson/tz_util.py +++ b/bson/tz_util.py @@ -13,6 +13,7 @@ # limitations under the License. """Timezone related utilities for BSON.""" +from __future__ import annotations from datetime import datetime, timedelta, tzinfo from typing import Optional, Tuple, Union @@ -38,6 +39,9 @@ def __init__(self, offset: Union[float, timedelta], name: str) -> None: def __getinitargs__(self) -> Tuple[timedelta, str]: return self.__offset, self.__name + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__offset!r}, {self.__name!r})" + def utcoffset(self, dt: Optional[datetime]) -> timedelta: return self.__offset diff --git a/doc/api/bson/binary.rst b/doc/api/bson/binary.rst index b9673d70d9..7084a45b4e 100644 --- a/doc/api/bson/binary.rst +++ b/doc/api/bson/binary.rst @@ -15,11 +15,21 @@ .. autodata:: CSHARP_LEGACY .. autodata:: MD5_SUBTYPE .. autodata:: COLUMN_SUBTYPE + .. autodata:: SENSITIVE_SUBTYPE + .. autodata:: VECTOR_SUBTYPE .. autodata:: USER_DEFINED_SUBTYPE .. autoclass:: UuidRepresentation :members: + .. autoclass:: BinaryVectorDtype + :members: + :show-inheritance: + + .. autoclass:: BinaryVector + :members: + + .. autoclass:: Binary(data, subtype=BINARY_SUBTYPE) :members: :show-inheritance: diff --git a/doc/api/bson/datetime_ms.rst b/doc/api/bson/datetime_ms.rst new file mode 100644 index 0000000000..1afaad69fc --- /dev/null +++ b/doc/api/bson/datetime_ms.rst @@ -0,0 +1,6 @@ +:mod:`datetime_ms` -- Support for BSON UTC Datetime +=================================================== + +.. automodule:: bson.datetime_ms + :synopsis: Support for BSON UTC datetimes. + :members: diff --git a/doc/api/bson/index.rst b/doc/api/bson/index.rst index 5f15ed99eb..d5b69607de 100644 --- a/doc/api/bson/index.rst +++ b/doc/api/bson/index.rst @@ -3,7 +3,7 @@ .. automodule:: bson :synopsis: BSON (Binary JSON) Encoding and Decoding - :members: + :members: BSON, decode, decode_all, decode_file_iter, decode_iter, encode, gen_list_name, has_c, is_valid Sub-modules: @@ -13,6 +13,7 @@ Sub-modules: binary code codec_options + datetime_ms dbref decimal128 errors diff --git a/doc/api/gridfs/asynchronous/grid_file.rst b/doc/api/gridfs/asynchronous/grid_file.rst new file mode 100644 index 0000000000..fbf34adc8a --- /dev/null +++ b/doc/api/gridfs/asynchronous/grid_file.rst @@ -0,0 +1,19 @@ +:mod:`grid_file` -- Async tools for representing files stored in GridFS +======================================================================= + +.. automodule:: gridfs.asynchronous.grid_file + :synopsis: Async tools for representing files stored in GridFS + + .. autoclass:: AsyncGridIn + :members: + + .. autoattribute:: _id + + .. autoclass:: AsyncGridOut + :members: + + .. autoattribute:: _id + .. automethod:: __aiter__ + + .. autoclass:: AsyncGridOutCursor + :members: diff --git a/doc/api/gridfs/asynchronous/index.rst b/doc/api/gridfs/asynchronous/index.rst new file mode 100644 index 0000000000..7b6ebb28b8 --- /dev/null +++ b/doc/api/gridfs/asynchronous/index.rst @@ -0,0 +1,14 @@ +:mod:`gridfs async` -- Async tools for working with GridFS +========================================================== + + +.. automodule:: gridfs.asynchronous + :synopsis: Async tools for working with GridFS + :members: AsyncGridFS, AsyncGridFSBucket + +Sub-modules: + +.. toctree:: + :maxdepth: 2 + + grid_file diff --git a/doc/api/gridfs/index.rst b/doc/api/gridfs/index.rst index 6764ef622b..190c561d05 100644 --- a/doc/api/gridfs/index.rst +++ b/doc/api/gridfs/index.rst @@ -3,12 +3,13 @@ .. automodule:: gridfs :synopsis: Tools for working with GridFS - :members: + :members: GridFS, GridFSBucket Sub-modules: .. toctree:: - :maxdepth: 2 + :maxdepth: 3 + asynchronous/index errors grid_file diff --git a/doc/api/index.rst b/doc/api/index.rst index 30ae3608ca..339f5843bf 100644 --- a/doc/api/index.rst +++ b/doc/api/index.rst @@ -3,10 +3,10 @@ API Documentation The PyMongo distribution contains three top-level packages for interacting with MongoDB. :mod:`bson` is an implementation of the -`BSON format `_, :mod:`pymongo` is a +`BSON format `_, :mod:`pymongo` is a full-featured driver for MongoDB, and :mod:`gridfs` is a set of tools for working with the `GridFS -`_ storage +`_ storage specification. .. toctree:: diff --git a/doc/api/pymongo/asynchronous/change_stream.rst b/doc/api/pymongo/asynchronous/change_stream.rst new file mode 100644 index 0000000000..1b506fdb55 --- /dev/null +++ b/doc/api/pymongo/asynchronous/change_stream.rst @@ -0,0 +1,6 @@ +:mod:`change_stream` -- Watch changes on a collection, database, or cluster +=========================================================================== + + +.. automodule:: pymongo.asynchronous.change_stream + :members: diff --git a/doc/api/pymongo/asynchronous/client_session.rst b/doc/api/pymongo/asynchronous/client_session.rst new file mode 100644 index 0000000000..d8403325d7 --- /dev/null +++ b/doc/api/pymongo/asynchronous/client_session.rst @@ -0,0 +1,6 @@ +:mod:`client_session` -- Logical sessions for sequential operations +=================================================================== + + +.. automodule:: pymongo.asynchronous.client_session + :members: diff --git a/doc/api/pymongo/asynchronous/collection.rst b/doc/api/pymongo/asynchronous/collection.rst new file mode 100644 index 0000000000..779295ced1 --- /dev/null +++ b/doc/api/pymongo/asynchronous/collection.rst @@ -0,0 +1,62 @@ +:mod:`collection` -- Collection level operations +================================================ + + +.. automodule:: pymongo.asynchronous.collection + :synopsis: Collection level operations + + .. autoclass:: pymongo.asynchronous.collection.ReturnDocument + + .. autoclass:: pymongo.asynchronous.collection.AsyncCollection(database, name, create=False, **kwargs) + + .. describe:: c[name] || c.name + + Get the `name` sub-collection of :class:`AsyncCollection` `c`. + + Raises :class:`~pymongo.asynchronous.errors.InvalidName` if an invalid + collection name is used. + + .. autoattribute:: full_name + .. autoattribute:: name + .. autoattribute:: database + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern + .. automethod:: with_options + .. automethod:: bulk_write + .. automethod:: insert_one + .. automethod:: insert_many + .. automethod:: replace_one + .. automethod:: update_one + .. automethod:: update_many + .. automethod:: delete_one + .. automethod:: delete_many + .. automethod:: aggregate + .. automethod:: aggregate_raw_batches + .. automethod:: watch + .. automethod:: find(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_raw_batches(filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + .. automethod:: find_one(filter=None, *args, **kwargs) + .. automethod:: find_one_and_delete + .. automethod:: find_one_and_replace(filter, replacement, projection=None, sort=None, return_document=ReturnDocument.BEFORE, hint=None, session=None, **kwargs) + .. automethod:: find_one_and_update(filter, update, projection=None, sort=None, return_document=ReturnDocument.BEFORE, array_filters=None, hint=None, session=None, **kwargs) + .. automethod:: count_documents + .. automethod:: estimated_document_count + .. automethod:: distinct + .. automethod:: create_index + .. automethod:: create_indexes + .. automethod:: drop_index + .. automethod:: drop_indexes + .. automethod:: list_indexes + .. automethod:: index_information + .. automethod:: create_search_index + .. automethod:: create_search_indexes + .. automethod:: drop_search_index + .. automethod:: list_search_indexes + .. automethod:: update_search_index + .. automethod:: drop + .. automethod:: rename + .. automethod:: options + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/asynchronous/command_cursor.rst b/doc/api/pymongo/asynchronous/command_cursor.rst new file mode 100644 index 0000000000..1f94c6e525 --- /dev/null +++ b/doc/api/pymongo/asynchronous/command_cursor.rst @@ -0,0 +1,7 @@ +:mod:`command_cursor` -- Tools for iterating over MongoDB command results +========================================================================= + + +.. automodule:: pymongo.asynchronous.command_cursor + :synopsis: Tools for iterating over MongoDB command results + :members: diff --git a/doc/api/pymongo/asynchronous/cursor.rst b/doc/api/pymongo/asynchronous/cursor.rst new file mode 100644 index 0000000000..f511734de4 --- /dev/null +++ b/doc/api/pymongo/asynchronous/cursor.rst @@ -0,0 +1,17 @@ +:mod:`cursor` -- Tools for iterating over MongoDB query results +=============================================================== + + +.. automodule:: pymongo.asynchronous.cursor + :synopsis: Tools for iterating over MongoDB query results + + .. autoclass:: pymongo.asynchronous.cursor.AsyncCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None) + :members: + + .. describe:: c[index] + + See :meth:`__getitem__` and read the warning. + + .. automethod:: __getitem__ + + .. autoclass:: pymongo.asynchronous.cursor.AsyncRawBatchCursor(collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, allow_disk_use=None) diff --git a/doc/api/pymongo/asynchronous/database.rst b/doc/api/pymongo/asynchronous/database.rst new file mode 100644 index 0000000000..7b043ab0d1 --- /dev/null +++ b/doc/api/pymongo/asynchronous/database.rst @@ -0,0 +1,27 @@ +:mod:`database` -- Database level operations +============================================ + + +.. automodule:: pymongo.asynchronous.database + :synopsis: Database level operations + + .. autoclass:: pymongo.asynchronous.database.AsyncDatabase + :members: + + .. describe:: db[collection_name] || db.collection_name + + Get the `collection_name` :class:`~pymongo.asynchronous.collection.AsyncCollection` of + :class:`AsyncDatabase` `db`. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid collection + name is used. + + .. note:: Use dictionary style access if `collection_name` is an + attribute of the :class:`AsyncDatabase` class eg: db[`collection_name`]. + + .. automethod:: __getitem__ + .. automethod:: __getattr__ + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern diff --git a/doc/api/pymongo/asynchronous/index.rst b/doc/api/pymongo/asynchronous/index.rst new file mode 100644 index 0000000000..b7fc985415 --- /dev/null +++ b/doc/api/pymongo/asynchronous/index.rst @@ -0,0 +1,23 @@ +:mod:`pymongo async` -- Async Python driver for MongoDB +======================================================= + + +.. automodule:: pymongo.asynchronous + :synopsis: Asynchronous Python driver for MongoDB + + .. data:: AsyncMongoClient + + Alias for :class:`pymongo.asynchronous.mongo_client.MongoClient`. + +Sub-modules: + +.. toctree:: + :maxdepth: 2 + + change_stream + client_session + collection + command_cursor + cursor + database + mongo_client diff --git a/doc/api/pymongo/asynchronous/mongo_client.rst b/doc/api/pymongo/asynchronous/mongo_client.rst new file mode 100644 index 0000000000..899ca687d5 --- /dev/null +++ b/doc/api/pymongo/asynchronous/mongo_client.rst @@ -0,0 +1,41 @@ +:mod:`mongo_client` -- Tools for connecting to MongoDB +====================================================== + + +.. automodule:: pymongo.asynchronous.mongo_client + :synopsis: Tools for connecting to MongoDB + + .. autoclass:: pymongo.asynchronous.mongo_client.AsyncMongoClient(host='localhost', port=27017, document_class=dict, tz_aware=False, connect=True, **kwargs) + + .. automethod:: close + + .. describe:: c[db_name] || c.db_name + + Get the `db_name` :class:`~pymongo.asynchronous.database.AsyncDatabase` on :class:`AsyncMongoClient` `c`. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid database name is used. + + .. autoattribute:: topology_description + .. autoattribute:: address + .. autoattribute:: primary + .. autoattribute:: secondaries + .. autoattribute:: arbiters + .. autoattribute:: is_primary + .. autoattribute:: is_mongos + .. autoattribute:: nodes + .. autoattribute:: codec_options + .. autoattribute:: read_preference + .. autoattribute:: write_concern + .. autoattribute:: read_concern + .. autoattribute:: options + .. automethod:: start_session + .. automethod:: list_databases + .. automethod:: list_database_names + .. automethod:: drop_database + .. automethod:: get_default_database + .. automethod:: get_database + .. automethod:: server_info + .. automethod:: watch + .. automethod:: bulk_write + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/auth_oidc.rst b/doc/api/pymongo/auth_oidc.rst new file mode 100644 index 0000000000..1466b21e9d --- /dev/null +++ b/doc/api/pymongo/auth_oidc.rst @@ -0,0 +1,5 @@ +:mod:`auth_oidc` -- MONGODB-OIDC Authentication +=========================================================================== + +.. automodule:: pymongo.auth_oidc + :members: diff --git a/doc/api/pymongo/bulk.rst b/doc/api/pymongo/bulk.rst deleted file mode 100644 index 0d597c26df..0000000000 --- a/doc/api/pymongo/bulk.rst +++ /dev/null @@ -1,6 +0,0 @@ -:mod:`bulk` -- The bulk write operations interface -================================================== - -.. automodule:: pymongo.bulk - :synopsis: The bulk write operations interface. - :members: diff --git a/doc/api/pymongo/collection.rst b/doc/api/pymongo/collection.rst index 3e8188b281..e3746c68b7 100644 --- a/doc/api/pymongo/collection.rst +++ b/doc/api/pymongo/collection.rst @@ -61,6 +61,13 @@ .. automethod:: drop_indexes .. automethod:: list_indexes .. automethod:: index_information + .. automethod:: create_search_index + .. automethod:: create_search_indexes + .. automethod:: drop_search_index + .. automethod:: list_search_indexes + .. automethod:: update_search_index .. automethod:: drop .. automethod:: rename .. automethod:: options + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/api/pymongo/database.rst b/doc/api/pymongo/database.rst index b40a77dff3..044e04971e 100644 --- a/doc/api/pymongo/database.rst +++ b/doc/api/pymongo/database.rst @@ -20,6 +20,8 @@ .. note:: Use dictionary style access if `collection_name` is an attribute of the :class:`Database` class eg: db[`collection_name`]. + .. automethod:: __getitem__ + .. automethod:: __getattr__ .. autoattribute:: codec_options .. autoattribute:: read_preference .. autoattribute:: write_concern diff --git a/doc/api/pymongo/encryption_options.rst b/doc/api/pymongo/encryption_options.rst index 08bfc157a9..b8a886ea68 100644 --- a/doc/api/pymongo/encryption_options.rst +++ b/doc/api/pymongo/encryption_options.rst @@ -3,6 +3,4 @@ .. automodule:: pymongo.encryption_options :synopsis: Support for automatic client-side field level encryption - - .. autoclass:: pymongo.encryption_options.AutoEncryptionOpts - :members: + :members: diff --git a/doc/api/pymongo/index.rst b/doc/api/pymongo/index.rst index a4e15b9878..4fb6c81568 100644 --- a/doc/api/pymongo/index.rst +++ b/doc/api/pymongo/index.rst @@ -9,6 +9,10 @@ Alias for :class:`pymongo.mongo_client.MongoClient`. + .. data:: AsyncMongoClient + + Alias for :class:`pymongo.asynchronous.mongo_client.AsyncMongoClient`. + .. data:: ReadPreference Alias for :class:`pymongo.read_preferences.ReadPreference`. @@ -27,9 +31,10 @@ Sub-modules: .. toctree:: - :maxdepth: 2 + :maxdepth: 3 - bulk + asynchronous/index + auth_oidc change_stream client_options client_session diff --git a/doc/api/pymongo/mongo_client.rst b/doc/api/pymongo/mongo_client.rst index 83dab27f2c..0409e7ef68 100644 --- a/doc/api/pymongo/mongo_client.rst +++ b/doc/api/pymongo/mongo_client.rst @@ -35,3 +35,6 @@ .. automethod:: get_database .. automethod:: server_info .. automethod:: watch + .. automethod:: bulk_write + .. automethod:: __getitem__ + .. automethod:: __getattr__ diff --git a/doc/atlas.rst b/doc/atlas.rst deleted file mode 100644 index 6685cf9fb8..0000000000 --- a/doc/atlas.rst +++ /dev/null @@ -1,43 +0,0 @@ -Using PyMongo with MongoDB Atlas -================================ - -`Atlas `_ is MongoDB, Inc.'s hosted MongoDB as a -service offering. To connect to Atlas, pass the connection string provided by -Atlas to :class:`~pymongo.mongo_client.MongoClient`:: - - client = pymongo.MongoClient() - -Connections to Atlas require TLS/SSL. - -.. warning:: Industry best practices recommend, and some regulations require, - the use of TLS 1.1 or newer. Though no application changes are required for - PyMongo to make use of the newest protocols, some operating systems or - versions may not provide an OpenSSL version new enough to support them. - - Users of macOS older than 10.13 (High Sierra) will need to install Python - from `python.org`_, `homebrew`_, `macports`_, or another similar source. - - Users of Linux or other non-macOS Unix can check their OpenSSL version like - this:: - - $ openssl version - - If the version number is less than 1.0.1 support for TLS 1.1 or newer is not - available. Contact your operating system vendor for a solution or upgrade to - a newer distribution. - - You can check your Python interpreter by installing the `requests`_ module - and executing the following command:: - - python -c "import requests; print(requests.get('https://www.howsmyssl.com/a/check', verify=False).json()['tls_version'])" - - You should see "TLS 1.X" where X is >= 1. - - You can read more about TLS versions and their security implications here: - - ``_ - -.. _python.org: https://www.python.org/downloads/ -.. _homebrew: https://brew.sh/ -.. _macports: https://www.macports.org/ -.. _requests: https://pypi.python.org/pypi/requests diff --git a/doc/changelog.rst b/doc/changelog.rst index b6b099fd31..f3eb4f6f23 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -1,8 +1,936 @@ Changelog ========= -Changes in Version 4.2 ----------------------- +Changes in Version 4.16.0 (XXXX/XX/XX) +-------------------------------------- + +PyMongo 4.16 brings a number of changes including: + +.. warning:: PyMongo 4.16 drops support for Python 3.9: Python 3.10+ is now required. + +- Dropped support for Python 3.9. +- Removed invalid documents from :class:`bson.errors.InvalidDocument` error messages as + doing so may leak sensitive user data. + Instead, invalid documents are stored in :attr:`bson.errors.InvalidDocument.document`. +- PyMongo now requires ``dnspython>=2.6.1``, since ``dnspython`` 1.0 is no longer maintained and is incompatible with + Python 3.10+. The minimum version is ``2.6.1`` to account for `CVE-2023-29483 `_. +- Removed support for Eventlet. + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. + +Changes in Version 4.15.3 (2025/10/07) +-------------------------------------- + +Version 4.15.3 is a bug fix release. + +- Fixed a memory leak when raising :class:`bson.errors.InvalidDocument` with C extensions. +- Fixed the return type of the :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct`, + :meth:`~pymongo.synchronous.collection.Collection.distinct`, :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct`, + and :meth:`pymongo.asynchronous.cursor.AsyncCursor.distinct` methods. + +Issues Resolved +............... + +See the `PyMongo 4.15.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47293 + +Changes in Version 4.15.2 (2025/10/01) +-------------------------------------- + +Version 4.15.2 is a bug fix release. + +- Add wheels for Python 3.14 and 3.14t that were missing from 4.15.0 release. Drop the 3.13t wheel. + +Issues Resolved +............... + +See the `PyMongo 4.15.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=47186 + +Changes in Version 4.15.1 (2025/09/16) +-------------------------------------- + +Version 4.15.1 is a bug fix release. + +- Fixed a bug in :meth:`~pymongo.synchronous.encryption.ClientEncryption.encrypt` + and :meth:`~pymongo.asynchronous.encryption.AsyncClientEncryption.encrypt` + that would cause a ``TypeError`` when using ``pymongocrypt<1.16`` by passing + an unsupported ``type_opts`` parameter even if Queryable Encryption text + queries beta was not used. + +- Fixed a bug in ``AsyncMongoClient`` that caused a ``ServerSelectionTimeoutError`` + when used with ``uvicorn``, ``FastAPI``, or ``uvloop``. + +Issues Resolved +............... + +See the `PyMongo 4.15.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.15.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=46486 + +Changes in Version 4.15.0 (2025/09/10) +-------------------------------------- + +PyMongo 4.15 brings a number of changes including: + +- Added :class:`~pymongo.encryption_options.TextOpts`, + :attr:`~pymongo.encryption.Algorithm.TEXTPREVIEW`, + :attr:`~pymongo.encryption.QueryType.PREFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUFFIXPREVIEW`, + :attr:`~pymongo.encryption.QueryType.SUBSTRINGPREVIEW`, + as part of the experimental Queryable Encryption text queries beta. + ``pymongocrypt>=1.16`` is required for text query support. +- Added :class:`bson.decimal128.DecimalEncoder` and + :class:`bson.decimal128.DecimalDecoder` + to support encoding and decoding of BSON Decimal128 values to + decimal.Decimal values using the TypeRegistry API. +- Added support for Windows ``arm64`` wheels. + +Changes in Version 4.14.1 (2025/08/19) +-------------------------------------- + +Version 4.14.1 is a bug fix release. + +- Fixed a bug in ``MongoClient.append_metadata()`` and + ``AsyncMongoClient.append_metadata()`` + that allowed duplicate ``DriverInfo.name`` to be appended to the metadata. + +Issues Resolved +............... + +See the `PyMongo 4.14.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=45256 + +Changes in Version 4.14.0 (2025/08/06) +-------------------------------------- + +.. warning:: PyMongo 4.14 drops support for MongoDB 4.0. PyMongo now supports + MongoDB 4.2+. + +PyMongo 4.14 brings a number of changes including: + +- Dropped support for MongoDB 4.0. +- Added preliminary support for Python 3.14 and 3.14 with free-threading. We do + not yet support the following with Python 3.14: + + - Subinterpreters (``concurrent.interpreters``) + - Free-threading with Encryption + - mod_wsgi + +- Removed experimental support for free-threading support in Python 3.13. +- Added :attr:`bson.codec_options.TypeRegistry.codecs` and + :attr:`bson.codec_options.TypeRegistry.fallback_encoder` properties + to allow users to directly access the type codecs and fallback encoder for a + given :class:`bson.codec_options.TypeRegistry`. +- Added + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.append_metadata` and + :meth:`pymongo.mongo_client.MongoClient.append_metadata` to allow instantiated + MongoClients to send client metadata on-demand +- Improved performance of selecting a server with the Primary selector. +- Introduces a minor breaking change. When encoding + :class:`bson.binary.BinaryVector`, a ``ValueError`` will be raised if the + 'padding' metadata field is < 0 or > 7, or non-zero for any type other than + PACKED_BIT. +- Changed :meth:`~pymongo.uri_parser.parse_uri`'s ``options`` return value to be + type ``dict`` instead of ``_CaseInsensitiveDictionary``. + +Issues Resolved +............... + +See the `PyMongo 4.14 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.14 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43041 + +Changes in Version 4.13.2 (2025/06/17) +-------------------------------------- + +Version 4.13.2 is a bug fix release. + +- Fixed a bug where ``AsyncMongoClient`` would block the event loop while creating new connections, + potentially significantly increasing latency for ongoing operations. + +Issues Resolved +............... + +See the `PyMongo 4.13.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43937 + + +Changes in Version 4.13.1 (2025/06/10) +-------------------------------------- + +Version 4.13.1 is a bug fix release. + +- Fixed a bug that could raise ``ServerSelectionTimeoutError`` when using timeouts with ``AsyncMongoClient``. +- Fixed a bug that could raise ``NetworkTimeout`` errors on Windows. + +Issues Resolved +............... + +See the `PyMongo 4.13.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43924 + +Changes in Version 4.13.0 (2025/05/14) +-------------------------------------- + +PyMongo 4.13 brings a number of changes including: + +- The asynchronous API is now stable and no longer in beta. + See the :mod:`pymongo.asynchronous` docs + or the `migration guide `_ for more information. +- Fixed a bug where :class:`pymongo.write_concern.WriteConcern` repr was not eval-able + when using ``w="majority"``. +- When padding is set, ignored bits in a BSON BinaryVector of PACKED_BIT dtype should be set to zero. + When encoding, this is enforced and is a breaking change. + It is not yet enforced when decoding, so reading from the database will not fail, however a warning will be triggered. + From PyMongo 5.0, this rule will be enforced for both encoding and decoding. + +Issues Resolved +............... + +See the `PyMongo 4.13 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.13 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42509 + +Changes in Version 4.12.1 (2025/04/29) +-------------------------------------- + +Version 4.12.1 is a bug fix release. + +- Fixed a bug that could raise ``UnboundLocalError`` when creating asynchronous connections over SSL. +- Fixed a bug causing SRV hostname validation to fail when resolver and resolved hostnames are identical with three domain levels. +- Fixed a bug that caused direct use of ``pymongo.uri_parser`` to raise an ``AttributeError``. +- Fixed a bug where clients created with connect=False and a "mongodb+srv://" connection string + could cause public ``pymongo.MongoClient`` and ``pymongo.AsyncMongoClient`` attributes (topology_description, + nodes, address, primary, secondaries, arbiters) to incorrectly return a Database, leading to type + errors such as: "NotImplementedError: Database objects do not implement truth value testing or bool()". +- Removed Eventlet testing against Python versions newer than 3.9 since + Eventlet is actively being sunset by its maintainers and has compatibility issues with PyMongo's dnspython dependency. +- Fixed a bug where MongoDB cluster topology changes could cause asynchronous operations to take much longer to complete + due to holding the Topology lock while closing stale connections. +- Fixed a bug that would cause AsyncMongoClient to attempt to use PyOpenSSL when available, resulting in errors such as + "pymongo.errors.ServerSelectionTimeoutError: 'SSLContext' object has no attribute 'wrap_bio'". + +Issues Resolved +............... + +See the `PyMongo 4.12.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=43094 + +Changes in Version 4.12.0 (2025/04/08) +-------------------------------------- + +.. warning:: Driver support for MongoDB 4.0 reached end of life in April 2025. + PyMongo 4.12 will be the last release to support MongoDB 4.0. + +PyMongo 4.12 brings a number of changes including: + +- Support for configuring DEK cache lifetime via the ``key_expiration_ms`` argument to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. +- Support for $lookup in CSFLE and QE supported on MongoDB 8.1+. +- pymongocrypt>=1.13 is now required for `In-Use Encryption `_ support. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.rename_by_name` and :meth:`gridfs.grid_file.GridFSBucket.rename_by_name` + for more performant renaming of a file with multiple revisions. +- Added :meth:`gridfs.asynchronous.grid_file.AsyncGridFSBucket.delete_by_name` and :meth:`gridfs.grid_file.GridFSBucket.delete_by_name` + for more performant deletion of a file with multiple revisions. +- AsyncMongoClient no longer performs DNS resolution for "mongodb+srv://" connection strings on creation. + To avoid blocking the asyncio loop, the resolution is now deferred until the client is first connected. +- Added index hinting support to the + :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` and + :meth:`~pymongo.collection.Collection.distinct` commands. +- Deprecated the ``hedge`` parameter for + :class:`~pymongo.read_preferences.PrimaryPreferred`, + :class:`~pymongo.read_preferences.Secondary`, + :class:`~pymongo.read_preferences.SecondaryPreferred`, + :class:`~pymongo.read_preferences.Nearest`. Support for ``hedge`` will be removed in PyMongo 5.0. +- Removed PyOpenSSL support from the asynchronous API due to limitations of the CPython asyncio.Protocol SSL implementation. +- Allow valid SRV hostnames with less than 3 parts. + +Issues Resolved +............... + +See the `PyMongo 4.12 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.12 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=41916 + +Changes in Version 4.11.2 (2025/03/05) +-------------------------------------- + +Version 4.11.2 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.database.Database.command` would fail when attempting to run the bulkWrite command. + +Issues Resolved +............... + +See the `PyMongo 4.11.2 release notes in JIRA`_ for the list of resolved issues in this release. + +.. _PyMongo 4.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=42506 + +Changes in Version 4.11.1 (2025/02/10) +-------------------------------------- + +- Fixed support for prebuilt ``ppc64le`` and ``s390x`` wheels. + +Changes in Version 4.11.0 (2025/01/28) +-------------------------------------- + +.. warning:: PyMongo 4.11 drops support for Python 3.8 and PyPy 3.9: Python 3.9+ or PyPy 3.10+ is now required. +.. warning:: PyMongo 4.11 drops support for MongoDB 3.6. PyMongo now supports MongoDB 4.0+. + Driver support for MongoDB 3.6 reached end of life in April 2024. +.. warning:: Driver support for MongoDB 4.0 reaches end of life in April 2025. + A future minor release of PyMongo will raise the minimum supported MongoDB Server version from 4.0 to 4.2. + This is in accordance with [MongoDB Software Lifecycle Schedules](https://www.mongodb.com/legal/support-policy/lifecycles). + **Support for MongoDB Server 4.0 will be dropped in a future release!** +.. warning:: This version does not include wheels for ``ppc64le`` or ``s390x`` architectures, see `PYTHON-5058`_ for more information. + +PyMongo 4.11 brings a number of changes including: + +- Dropped support for Python 3.8 and PyPy 3.9. +- Dropped support for MongoDB 3.6. +- Dropped support for the MONGODB-CR authenticate mechanism, which is no longer supported by MongoDB 4.0+. +- pymongocrypt>=1.12 is now required for `In-Use Encryption `_ support. +- Added support for free-threaded Python with the GIL disabled. For more information see: + `Free-threaded CPython `_. + We do not yet support free-threaded Python on Windows (`PYTHON-5027`_) or with In-Use Encryption (`PYTHON-5024`_). +- :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.address` and + :attr:`~pymongo.mongo_client.MongoClient.address` now correctly block when called on unconnected clients + until either connection succeeds or a server selection timeout error is raised. +- Added :func:`repr` support to :class:`pymongo.operations.IndexModel`. +- Added :func:`repr` support to :class:`pymongo.operations.SearchIndexModel`. +- Added ``sort`` parameter to + :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.replace_one`, + :class:`~pymongo.operations.UpdateOne`, and + :class:`~pymongo.operations.UpdateMany`, +- :meth:`~pymongo.mongo_client.MongoClient.bulk_write` and + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` now throw an error + when ``ordered=True`` or ``verboseResults=True`` are used with unacknowledged writes. + These are unavoidable breaking changes. +- Fixed a bug in :const:`bson.json_util.dumps` where a :class:`bson.datetime_ms.DatetimeMS` would + be incorrectly encoded as ``'{"$date": "X"}'`` instead of ``'{"$date": X}'`` when using the + legacy MongoDB Extended JSON datetime representation. +- Fixed a bug where :const:`bson.json_util.loads` would raise an IndexError when parsing an invalid + ``"$date"`` instead of a ValueError. + +Issues Resolved +............... + +See the `PyMongo 4.11 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.11 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40784 +.. _PYTHON-5027: https://jira.mongodb.org/browse/PYTHON-5027 +.. _PYTHON-5024: https://jira.mongodb.org/browse/PYTHON-5024 +.. _PYTHON-5058: https://jira.mongodb.org/browse/PYTHON-5058 + +Changes in Version 4.10.1 (2024/10/01) +-------------------------------------- + +Version 4.10.1 is a bug fix release. + +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. +- Fixed Binary BSON subtype (9) support on big-endian operating systems (such as zSeries). + +Issues Resolved +............... + +See the `PyMongo 4.10.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40788 + + +Changes in Version 4.10.0 (2024/09/30) +-------------------------------------- + +- Added provisional **(BETA)** support for a new Binary BSON subtype (9) used for efficient storage and retrieval of vectors: + densely packed arrays of numbers, all of the same type. + This includes new methods :meth:`~bson.binary.Binary.from_vector` and :meth:`~bson.binary.Binary.as_vector`. +- Added C extension use to client metadata, for example: ``{"driver": {"name": "PyMongo|c", "version": "4.10.0"}, ...}`` +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. + +Issues Resolved +............... + +See the `PyMongo 4.10 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40553 + +Changes in Version 4.9.2 (2024/10/02) +------------------------------------- + +- Fixed a bug where :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` could deadlock. +- Fixed a bug where PyMongo could fail to import on Windows if ``asyncio`` is misconfigured. +- Fixed a bug where :meth:`~pymongo.results.UpdateResult.did_upsert` would raise a ``TypeError``. + +Issues Resolved +............... + +See the `PyMongo 4.9.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40732 + + +Changes in Version 4.9.1 (2024/09/18) +------------------------------------- + +- Add missing documentation about the fact the async API is in beta state. + +Issues Resolved +............... + +See the `PyMongo 4.9.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=40720 + + +Changes in Version 4.9 (2024/09/18) +----------------------------------- + +.. warning:: Driver support for MongoDB 3.6 reached end of life in April 2024. + PyMongo 4.9 will be the last release to support MongoDB 3.6. + +.. warning:: PyMongo 4.9 refactors a large portion of internal APIs to support the new asynchronous API beta. + As a result, versions of Motor older than 3.6 are not compatible with PyMongo 4.9. + Existing users of these versions must either upgrade to Motor 3.6 and PyMongo 4.9, + or cap their PyMongo version to ``< 4.9``. + Any applications that use private APIs may also break as a result of these internal changes. + +PyMongo 4.9 brings a number of improvements including: + +- Added support for MongoDB 8.0. +- Added support for Python 3.13. +- A new beta asynchronous API with full asyncio support. + This new asynchronous API is a work-in-progress that may change during the beta period before the full release. +- Added support for In-Use Encryption range queries with MongoDB 8.0. + Added :attr:`~pymongo.encryption.Algorithm.RANGE`. + ``sparsity`` and ``trim_factor`` are now optional in :class:`~pymongo.encryption_options.RangeOpts`. +- Added support for the "delegated" option for the KMIP ``master_key`` in + :meth:`~pymongo.encryption.ClientEncryption.create_data_key`. +- pymongocrypt>=1.10 is now required for `In-Use Encryption `_ support. +- Added :meth:`~pymongo.cursor.Cursor.to_list` to :class:`~pymongo.cursor.Cursor`, + :class:`~pymongo.command_cursor.CommandCursor`, + :class:`~pymongo.asynchronous.cursor.AsyncCursor`, + and :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` + as an asynchronous-friendly alternative to ``list(cursor)``. +- Added :meth:`~pymongo.mongo_client.MongoClient.bulk_write` to :class:`~pymongo.mongo_client.MongoClient` + and :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient`, + enabling users to perform insert, update, and delete operations + against mixed namespaces in a minimized number of round trips. + Please see `Client Bulk Write `_ for more information. +- Added support for the ``namespace`` parameter to the + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.DeleteOne`, and + :class:`~pymongo.operations.DeleteMany` operations, so + they can be used in the new :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. +- Added :func:`repr` support to :class:`bson.tz_util.FixedOffset`. +- Fixed a bug where PyMongo would raise ``InvalidBSON: unhashable type: 'tzfile'`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a timezone from dateutil. +- Fixed a bug where PyMongo would raise ``InvalidBSON: date value out of range`` + when using :attr:`~bson.codec_options.DatetimeConversion.DATETIME_CLAMP` or + :attr:`~bson.codec_options.DatetimeConversion.DATETIME_AUTO` with a non-UTC timezone. +- Added a warning to unclosed MongoClient instances + telling users to explicitly close clients when finished with them to avoid leaking resources. + For example: + + .. code-block:: + + sys:1: ResourceWarning: Unclosed MongoClient opened at: + File "/Users//my_file.py", line 8, in `` + client = MongoClient() + Call MongoClient.close() to safely shut down your client and free up resources. +- The default value for ``connect`` in ``MongoClient`` is changed to ``False`` when running on + unction-as-a-service (FaaS) like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. + On some FaaS systems, there is a ``fork()`` operation at function + startup. By delaying the connection to the first operation, we avoid a deadlock. See + `multiple forks `_ for more information. + + +Issues Resolved +............... + +See the `PyMongo 4.9 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39940 + + +Changes in Version 4.8.0 (2024/06/26) +------------------------------------- + +.. warning:: PyMongo 4.8 drops support for Python 3.7 and PyPy 3.8: Python 3.8+ or PyPy 3.9+ is now required. + +PyMongo 4.8 brings a number of improvements including: + +- The handshake metadata for "os.name" on Windows has been simplified to "Windows" to improve import time. +- The repr of ``bson.binary.Binary`` is now redacted when the subtype is SENSITIVE_SUBTYPE(8). +- Secure Software Development Life Cycle automation for release process. + GitHub Releases now include a Software Bill of Materials, and signature + files corresponding to the distribution files released on PyPI. +- Fixed a bug in change streams where both ``startAtOperationTime`` and ``resumeToken`` + could be added to a retry attempt, which caused the retry to fail. +- Fallback to stdlib ``ssl`` module when ``pyopenssl`` import fails with AttributeError. +- Improved performance of MongoClient operations, especially when many operations are being run concurrently. + +Unavoidable breaking changes +............................ + +- Since we are now using ``hatch`` as our build backend, we no longer have a usable ``setup.py`` file + and require installation using ``pip``. Attempts to invoke the ``setup.py`` file will raise an exception. + Additionally, ``pip`` >= 21.3 is now required for editable installs. +- We no longer support the ``srv`` extra, since ``dnspython`` is included as a dependency in PyMongo 4.7+. + Instead of ``pip install pymongo[srv]``, use ``pip install pymongo``. +- We no longer support the ``tls`` extra, which was only valid for Python 2. + Instead of ``pip install pymongo[tls]``, use ``pip install pymongo``. + +Issues Resolved +............... + +See the `PyMongo 4.8 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37057 + +Changes in Version 4.7.3 (2024/06/04) +------------------------------------- + +Version 4.7.3 has further fixes for lazily loading modules. + +- Use deferred imports instead of importlib lazy module loading. +- Improve import time on Windows. +- Reduce verbosity of "Waiting for suitable server to become available" log message from info to debug. + +Issues Resolved +............... + +See the `PyMongo 4.7.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39865 + +Changes in Version 4.7.2 (2024/05/07) +------------------------------------- + +Version 4.7.2 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo could not be used with the Nuitka compiler. + +Issues Resolved +............... + +See the `PyMongo 4.7.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39710 + + +Changes in Version 4.7.1 (2024/04/30) +------------------------------------- + +Version 4.7.1 fixes a bug introduced in 4.7.0: + +- Fixed a bug where PyMongo would cause an ``AttributeError`` if ``dns.resolver`` was imported and referenced + after PyMongo was imported. +- Clarified the behavior of the ``TOKEN_RESOURCE`` auth mechanism property for ``MONGODB-OIDC``. + +Issues Resolved +............... + +See the `PyMongo 4.7.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=39680 + +Changes in Version 4.7.0 (2024/04/24) +------------------------------------- + +PyMongo 4.7 brings a number of improvements including: + +- Added support for ``MONGODB-OIDC`` authentication. The MONGODB-OIDC mechanism authenticates + using an OpenID Connect (OIDC) access token. + The driver supports OIDC for workload identity, defined as an identity you assign to a software workload + (such as an application, service, script, or container) to authenticate and access other services and resources. + Please see `Authentication `_ for more information. +- Added support for Python's `native logging library `_, + enabling developers to customize the verbosity of log messages for their applications. + Please see `Logging `_ for more information. +- Significantly improved the performance of encoding BSON documents to JSON. +- Added support for named KMS providers for client side field level encryption. + Previously supported KMS providers were only: aws, azure, gcp, kmip, and local. + The KMS provider is now expanded to support name suffixes (e.g. local:myname). + Named KMS providers enables more than one of each KMS provider type to be configured. + See the docstring for :class:`~pymongo.encryption_options.AutoEncryptionOpts`. + Note that named KMS providers requires pymongocrypt >=1.9 and libmongocrypt >=1.9. +- Added the :class:`pymongo.hello.Hello.connection_id`, + :attr:`pymongo.monitoring.CommandStartedEvent.server_connection_id`, + :attr:`pymongo.monitoring.CommandSucceededEvent.server_connection_id`, and + :attr:`pymongo.monitoring.CommandFailedEvent.server_connection_id` properties. +- Fixed a bug where inflating a :class:`~bson.raw_bson.RawBSONDocument` containing a :class:`~bson.code.Code` would cause an error. +- :meth:`~pymongo.encryption.ClientEncryption.encrypt` and + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression` now allow ``key_id`` + to be passed in as a :class:`uuid.UUID`. +- Fixed a bug where :class:`~bson.int64.Int64` instances could not always be encoded by `orjson`_. The following now + works:: + + >>> import orjson + >>> from bson import json_util + >>> orjson.dumps({'a': Int64(1)}, default=json_util.default, option=orjson.OPT_PASSTHROUGH_SUBCLASS) + +.. _orjson: https://github.com/ijl/orjson + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. +- Added a warning when connecting to DocumentDB and CosmosDB clusters. + For more information regarding feature compatibility and support please visit + `mongodb.com/supportability/documentdb `_ and + `mongodb.com/supportability/cosmosdb `_. +- Added the :attr:`pymongo.monitoring.ConnectionCheckedOutEvent.duration`, + :attr:`pymongo.monitoring.ConnectionCheckOutFailedEvent.duration`, and + :attr:`pymongo.monitoring.ConnectionReadyEvent.duration` properties. +- Added the ``type`` and ``kwargs`` arguments to :class:`~pymongo.operations.SearchIndexModel` to enable + creating vector search indexes in MongoDB Atlas. +- Fixed a bug where ``read_concern`` and ``write_concern`` were improperly added to + :meth:`~pymongo.collection.Collection.list_search_indexes` queries. +- Deprecated :attr:`pymongo.write_concern.WriteConcern.wtimeout` and :attr:`pymongo.mongo_client.MongoClient.wTimeoutMS`. + Use :meth:`~pymongo.timeout` instead. + +.. warning:: PyMongo depends on ``dnspython``, which released version 2.6.1 with a fix for + `CVE-2023-29483 `_. We do not explicitly require + that version, but we strongly recommend that you install at least that version in your environment. + +Unavoidable breaking changes +............................ + +- Replaced usage of :class:`bson.son.SON` on all internal classes and commands to dict, + :attr:`options.pool_options.metadata` is now of type ``dict`` as opposed to :class:`bson.son.SON`. + Here's some examples of how this changes expected output as well as how to convert from :class:`dict` to :class:`bson.son.SON`:: + + # Before + >>> from pymongo import MongoClient + >>> client = MongoClient() + >>> client.options.pool_options.metadata + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + + # After + >>> client.options.pool_options.metadata + {'driver': {'name': 'PyMongo', 'version': '4.7.0.dev0'}, 'os': {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}, 'platform': 'CPython 3.11.6.final.0'} + + # To convert from dict to SON + # This will only convert the first layer of the dictionary + >>> data_as_dict = client.options.pool_options.metadata + >>> SON(data_as_dict) + SON([('driver', {'name': 'PyMongo', 'version': '4.7.0.dev0'}), ('os', {'type': 'Darwin', 'name': 'Darwin', 'architecture': 'arm64', 'version': '14.3'}), ('platform', 'CPython 3.11.6.final.0')]) + + # To convert from dict to SON on a nested dictionary + >>> def dict_to_SON(data_as_dict: dict[Any, Any]): + ... data_as_SON = SON() + ... for key, value in data_as_dict.items(): + ... data_as_SON[key] = dict_to_SON(value) if isinstance(value, dict) else value + ... return data_as_SON + >>> + >>> dict_to_SON(data_as_dict) + SON([('driver', SON([('name', 'PyMongo'), ('version', '4.7.0.dev0')])), ('os', SON([('type', 'Darwin'), ('name', 'Darwin'), ('architecture', 'arm64'), ('version', '14.3')])), ('platform', 'CPython 3.11.6.final.0')]) + +- PyMongo now uses `lazy imports `_ for external dependencies. + If you are relying on any kind of monkey-patching of the standard library, you may need to explicitly import those external libraries in addition + to ``pymongo`` before applying the patch. Note that we test with ``gevent`` and ``eventlet`` patching, and those continue to work. + +- The "aws" extra now requires minimum version of ``1.1.0`` for ``pymongo_auth_aws``. + +Changes in Version 4.6.3 (2024/03/27) +------------------------------------- + +PyMongo 4.6.3 fixes the following bug: + +- Fixed a potential memory access violation when decoding invalid bson. + +Issues Resolved +............... + +See the `PyMongo 4.6.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=38360 + +Changes in Version 4.6.2 (2024/02/21) +------------------------------------- + +PyMongo 4.6.2 fixes the following bug: + +- Fixed a bug appearing in Python 3.12 where "RuntimeError: can't create new thread at interpreter shutdown" + could be written to stderr when a MongoClient's thread starts as the python interpreter is shutting down. + +Issues Resolved +............... + +See the `PyMongo 4.6.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37906 + +Changes in Version 4.6.1 (2023/11/29) +------------------------------------- + +PyMongo 4.6.1 fixes the following bug: + +- Ensure retryable read ``OperationFailure`` errors re-raise exception when 0 or NoneType error code is provided. + +Issues Resolved +............... + +See the `PyMongo 4.6.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=37138 + +Changes in Version 4.6.0 (2023/11/01) +------------------------------------- + +PyMongo 4.6 brings a number of improvements including: + +- Added the ``serverMonitoringMode`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. +- Improved client performance and reduced connection requirements in Function-as-a-service (FaaS) + environments like AWS Lambda, Google Cloud Functions, and Microsoft Azure Functions. +- Added the :attr:`pymongo.monitoring.CommandSucceededEvent.database_name` property. +- Added the :attr:`pymongo.monitoring.CommandFailedEvent.database_name` property. +- Allow passing a ``dict`` to sort/create_index/hint. +- Added :func:`repr` support to the write result classes: + :class:`~pymongo.results.BulkWriteResult`, + :class:`~pymongo.results.DeleteResult`, + :class:`~pymongo.results.InsertManyResult`, + :class:`~pymongo.results.InsertOneResult`, + :class:`~pymongo.results.UpdateResult`, and + :class:`~pymongo.encryption.RewrapManyDataKeyResult`. For example: + + >>> client.t.t.insert_one({}) + InsertOneResult(ObjectId('65319acdd55bb3a27ab5502b'), acknowledged=True) + >>> client.t.t.insert_many([{} for _ in range(3)]) + InsertManyResult([ObjectId('6532f85e826f2b6125d6ce39'), ObjectId('6532f85e826f2b6125d6ce3a'), ObjectId('6532f85e826f2b6125d6ce3b')], acknowledged=True) + +- :meth:`~pymongo.uri_parser.parse_uri` now considers the delimiting slash (``/``) + between hosts and connection options optional. For example, + "mongodb://example.com?tls=true" is now a valid URI. +- Fixed a bug where PyMongo would incorrectly promote all cursors to exhaust cursors + when connected to load balanced MongoDB clusters or Serverless clusters. +- Added the `network compression `_ documentation page. +- Added more timeout information to network errors. + +Issues Resolved +............... + +See the `PyMongo 4.6 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36542 + +Changes in Version 4.5.0 (2023/08/22) +------------------------------------- + +PyMongo 4.5 brings a number of improvements including: + +- Added new helper methods for Atlas Search Index (requires MongoDB Server 7.0+): + :meth:`~pymongo.collection.Collection.list_search_indexes`, + :meth:`~pymongo.collection.Collection.create_search_index`, + :meth:`~pymongo.collection.Collection.create_search_indexes`, + :meth:`~pymongo.collection.Collection.drop_search_index`, + :meth:`~pymongo.collection.Collection.update_search_index` +- Added :meth:`~pymongo.database.Database.cursor_command` + and :meth:`~pymongo.command_cursor.CommandCursor.try_next` to support + executing an arbitrary command that returns a cursor. +- ``cryptography`` 2.5 or later is now required for `OCSP `_ support. +- Improved bson encoding and decoding performance by up to 134%(`PYTHON-3729`_, `PYTHON-3797`_, `PYTHON-3816`_, `PYTHON-3817`_, `PYTHON-3820`_, `PYTHON-3824`_, and `PYTHON-3846`_). + +.. warning:: PyMongo no longer supports PyPy3 versions older than 3.8. Users + must upgrade to PyPy3.8+. + +Issues Resolved +............... + +See the `PyMongo 4.5 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=35492 + +.. _PYTHON-3729: https://jira.mongodb.org/browse/PYTHON-3729 +.. _PYTHON-3797: https://jira.mongodb.org/browse/PYTHON-3797 +.. _PYTHON-3816: https://jira.mongodb.org/browse/PYTHON-3816 +.. _PYTHON-3817: https://jira.mongodb.org/browse/PYTHON-3817 +.. _PYTHON-3820: https://jira.mongodb.org/browse/PYTHON-3820 +.. _PYTHON-3824: https://jira.mongodb.org/browse/PYTHON-3824 +.. _PYTHON-3846: https://jira.mongodb.org/browse/PYTHON-3846 + +Changes in Version 4.4.1 (2023/07/13) +------------------------------------- + +Version 4.4.1 fixes the following bugs: + +- Fixed a bug where pymongo would raise a ``ConfigurationError: Invalid SRV host`` + error when connecting to a "mongodb+srv://" URI that included capital letters + in the SRV hosts returned from DNS. (`PYTHON-3800`_). +- Fixed a minor reference counting bug in the C extension (`PYTHON-3798`_). + +Issues Resolved +............... + +See the `PyMongo 4.4.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3798: https://jira.mongodb.org/browse/PYTHON-3798 +.. _PYTHON-3800: https://jira.mongodb.org/browse/PYTHON-3800 +.. _PyMongo 4.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=36329 + +Changes in Version 4.4.0 (2023/06/21) +------------------------------------- + +PyMongo 4.4 brings a number of improvements including: + +- Added support for MongoDB 7.0. +- Added support for Python 3.11. +- Added support for passing a list containing (key, direction) pairs + or keys to :meth:`~pymongo.collection.Collection.create_index`. +- Improved bson encoding performance (`PYTHON-3717`_ and `PYTHON-3718`_). +- Improved support for Pyright to improve typing support for IDEs like Visual Studio Code + or Visual Studio. +- Improved support for type-checking with MyPy "strict" mode (`--strict`). +- Added :meth:`~pymongo.encryption.ClientEncryption.create_encrypted_collection`, + :class:`~pymongo.errors.EncryptedCollectionError`, + :meth:`~pymongo.encryption.ClientEncryption.encrypt_expression`, + :class:`~pymongo.encryption_options.RangeOpts`, + and :attr:`~pymongo.encryption.Algorithm.RANGEPREVIEW` as part of the experimental + Queryable Encryption beta. +- pymongocrypt 1.6.0 or later is now required for `In-Use Encryption `_ support. MongoDB + Server 7.0 introduced a backwards breaking change to the QE protocol. Users taking + advantage of the Queryable Encryption beta must now upgrade to MongoDB 7.0+ and + PyMongo 4.4+. +- Previously, PyMongo's docs recommended using :meth:`datetime.datetime.utcnow` and + :meth:`datetime.datetime.utcfromtimestamp`. utcnow and utcfromtimestamp are deprecated + in Python 3.12, for reasons explained `in this Github issue`_. Instead, users should + use :meth:`datetime.datetime.now(tz=timezone.utc)` and + :meth:`datetime.datetime.fromtimestamp(tz=timezone.utc)` instead. + +.. _in this Github issue: https://github.com/python/cpython/issues/103857 + +Issues Resolved +............... + +See the `PyMongo 4.4 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34354 + +.. _PYTHON-3717: https://jira.mongodb.org/browse/PYTHON-3717 +.. _PYTHON-3718: https://jira.mongodb.org/browse/PYTHON-3718 + +Changes in Version 4.3.3 (2022/11/17) +------------------------------------- + +Version 4.3.3 documents support for the following: + +- `CSFLE on-demand credentials `_ for cloud KMS providers. +- Authentication support for `EKS Clusters `_. +- Added the `timeout `_ example page to improve the documentation + for :func:`pymongo.timeout`. + +Bug Fixes +......... +- Fixed a performance regression in :meth:`~gridfs.GridFSBucket.download_to_stream` + and :meth:`~gridfs.GridFSBucket.download_to_stream_by_name` by reading in chunks + instead of line by line (`PYTHON-3502`_). +- Improved performance of :meth:`gridfs.grid_file.GridOut.read` and + :meth:`gridfs.grid_file.GridOut.readline` (`PYTHON-3508`_). + +Issues Resolved +............... + +See the `PyMongo 4.3.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3502: https://jira.mongodb.org/browse/PYTHON-3502 +.. _PYTHON-3508: https://jira.mongodb.org/browse/PYTHON-3508 +.. _PyMongo 4.3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=34709 + +Changes in Version 4.3.2 (2022/10/18) +------------------------------------- + +Note: We withheld uploading tags 4.3.0 and 4.3.1 to PyPI due to a +version handling error and a necessary documentation update. + +`dnspython `_ is now a required +dependency. This change makes PyMongo easier to install for use with "mongodb+srv://" +connection strings and `MongoDB Atlas `_. + +PyMongo 4.3 brings a number of improvements including: + +- Added support for decoding BSON datetimes outside of the range supported + by Python's :class:`~datetime.datetime` builtin. See + `handling out of range datetimes `_ for examples, as well as + :class:`bson.datetime_ms.DatetimeMS`, + :class:`bson.codec_options.DatetimeConversion`, and + :class:`bson.codec_options.CodecOptions`'s ``datetime_conversion`` + parameter for more details (`PYTHON-1824`_). +- PyMongo now resets its locks and other shared state in the child process + after a :py:func:`os.fork` to reduce the frequency of deadlocks. Note that + deadlocks are still possible because libraries that PyMongo depends like + OpenSSL cannot be made fork() safe in multithreaded applications. + (`PYTHON-2484`_). For more info see `multiple forks `_. +- When used with MongoDB 6.0+, :class:`~pymongo.change_stream.ChangeStream` s + now allow for new types of events (such as DDL and C2C replication events) + to be recorded with the new parameter ``show_expanded_events`` + that can be passed to methods such as :meth:`~pymongo.collection.Collection.watch`. +- PyMongo now internally caches AWS credentials that it fetches from AWS + endpoints, to avoid rate limitations. The cache is cleared when the + credentials expire or an error is encountered. +- When using the ``MONGODB-AWS`` authentication mechanism with the + ``aws`` extra, the behavior of credential fetching has changed with + ``pymongo_auth_aws>=1.1.0``. Please see `Authentication `_ for + more information. + +Bug fixes +......... + +- Fixed a bug where :class:`~pymongo.change_stream.ChangeStream` + would allow an app to retry calling ``next()`` or ``try_next()`` even + after non-resumable errors (`PYTHON-3389`_). +- Fixed a bug where the client could be unable to discover the new primary + after a simultaneous replica set election and reconfig (`PYTHON-2970`_). + +Issues Resolved +............... + +See the `PyMongo 4.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-1824: https://jira.mongodb.org/browse/PYTHON-1824 +.. _PYTHON-2484: https://jira.mongodb.org/browse/PYTHON-2484 +.. _PYTHON-2970: https://jira.mongodb.org/browse/PYTHON-2970 +.. _PYTHON-3389: https://jira.mongodb.org/browse/PYTHON-3389 +.. _PyMongo 4.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33425 + +Changes in Version 4.2.0 (2022/07/20) +------------------------------------- .. warning:: PyMongo 4.2 drops support for Python 3.6: Python 3.7+ is now required. @@ -10,14 +938,29 @@ PyMongo 4.2 brings a number of improvements including: - Support for MongoDB 6.0. - Support for the Queryable Encryption beta with MongoDB 6.0. Note that backwards-breaking - changes may be made before the final release. See :ref:`automatic-queryable-client-side-encryption` for example usage. + changes may be made before the final release. See `automatic queryable client-side encryption `_ for example usage. - Provisional (beta) support for :func:`pymongo.timeout` to apply a single timeout - to an entire block of pymongo operations. + to an entire block of pymongo operations. See `timeout `_ for examples. +- Added the ``timeoutMS`` URI and keyword argument to :class:`~pymongo.mongo_client.MongoClient`. - Added the :attr:`pymongo.errors.PyMongoError.timeout` property which is ``True`` when the error was caused by a timeout. -- Added ``check_exists`` option to :meth:`~pymongo.database.Database.create_collection` +- Added the ``check_exists`` argument to :meth:`~pymongo.database.Database.create_collection` that when True (the default) runs an additional ``listCollections`` command to verify that the collection does not exist already. +- Added the following key management APIs to :class:`~pymongo.encryption.ClientEncryption`: + + - :meth:`~pymongo.encryption.ClientEncryption.get_key` + - :meth:`~pymongo.encryption.ClientEncryption.get_keys` + - :meth:`~pymongo.encryption.ClientEncryption.delete_key` + - :meth:`~pymongo.encryption.ClientEncryption.add_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.get_key_by_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.remove_key_alt_name` + - :meth:`~pymongo.encryption.ClientEncryption.rewrap_many_data_key` + - :class:`~pymongo.encryption.RewrapManyDataKeyResult` + +- Support for the ``crypt_shared`` library to replace ``mongocryptd`` using the new + ``crypt_shared_lib_path`` and ``crypt_shared_lib_required`` arguments to + :class:`~pymongo.encryption_options.AutoEncryptionOpts`. Bug fixes ......... @@ -25,7 +968,18 @@ Bug fixes - Fixed a bug where :meth:`~pymongo.collection.Collection.estimated_document_count` would fail with a "CommandNotSupportedOnView" error on views (`PYTHON-2885`_). - Fixed a bug where invalid UTF-8 strings could be passed as patterns for :class:`~bson.regex.Regex` - objects (`PYTHON-3048`_). :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData`. + objects. :func:`bson.encode` now correctly raises :class:`bson.errors.InvalidStringData` (`PYTHON-3048`_). +- Fixed a bug that caused ``AutoReconnect("connection pool paused")`` errors in the child + process after fork (`PYTHON-3257`_). +- Fixed a bug where :meth:`~pymongo.collection.Collection.count_documents` and + :meth:`~pymongo.collection.Collection.distinct` would fail in a transaction with + ``directConnection=True`` (`PYTHON-3333`_). +- GridFS no longer uploads an incomplete files collection document after encountering an + error in the middle of an upload fork. This results in fewer + :class:`~gridfs.errors.CorruptGridFile` errors (`PYTHON-1552`_). +- Renamed PyMongo's internal C extension methods to avoid crashing due to name conflicts + with mpi4py and other shared libraries (`PYTHON-2110`_). +- Fixed tight CPU loop for network I/O when using PyOpenSSL (`PYTHON-3187`_). Unavoidable breaking changes ............................ @@ -34,10 +988,15 @@ Unavoidable breaking changes encryption support. - :meth:`~pymongo.collection.Collection.estimated_document_count` now always uses the `count`_ command. Due to an oversight in versions 5.0.0-5.0.8 of MongoDB, - the count command was not included in V1 of the :ref:`versioned-api-ref`. + the count command was not included in V1 of the `Stable API `_. Users of the Stable API with estimated_document_count are recommended to upgrade their server version to 5.0.9+ or set :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors (`PYTHON-3167`_). +- Removed generic typing from :class:`~pymongo.client_session.ClientSession` to improve + support for Pyright (`PYTHON-3283`_). +- Added ``__all__`` to the bson, pymongo, and gridfs packages. This could be a breaking + change for apps that relied on ``from bson import *`` to import APIs not present in + ``__all__`` (`PYTHON-3311`_). .. _count: https://mongodb.com/docs/manual/reference/command/count/ @@ -50,11 +1009,17 @@ in this release. .. _PYTHON-3048: https://jira.mongodb.org/browse/PYTHON-3048 .. _PYTHON-2885: https://jira.mongodb.org/browse/PYTHON-2885 .. _PYTHON-3167: https://jira.mongodb.org/browse/PYTHON-3167 +.. _PYTHON-3257: https://jira.mongodb.org/browse/PYTHON-3257 +.. _PYTHON-3333: https://jira.mongodb.org/browse/PYTHON-3333 +.. _PYTHON-1552: https://jira.mongodb.org/browse/PYTHON-1552 +.. _PYTHON-2110: https://jira.mongodb.org/browse/PYTHON-2110 +.. _PYTHON-3283: https://jira.mongodb.org/browse/PYTHON-3283 +.. _PYTHON-3311: https://jira.mongodb.org/browse/PYTHON-3311 +.. _PYTHON-3187: https://jira.mongodb.org/browse/PYTHON-3187 .. _PyMongo 4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33196 -.. _Queryable Encryption: automatic-queryable-client-side-encryption -Changes in Version 4.1.1 -------------------------- +Changes in Version 4.1.1 (2022/04/13) +------------------------------------- Version 4.1.1 fixes a number of bugs: @@ -79,14 +1044,14 @@ in this release. .. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 .. _PyMongo 4.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=33290 -Changes in Version 4.1 ----------------------- +Changes in Version 4.1 (2021/12/07) +----------------------------------- .. warning:: PyMongo 4.1 drops support for Python 3.6.0 and 3.6.1, Python 3.6.2+ is now required. PyMongo 4.1 brings a number of improvements including: -- Type Hinting support (formerly provided by `pymongo-stubs`_). See :doc:`examples/type_hints` for more information. +- Type Hinting support (formerly provided by `pymongo-stubs`_). See `Type Hints `_ for more information. - Added support for the ``comment`` parameter to all helpers. For example see :meth:`~pymongo.collection.Collection.insert_one`. - Added support for the ``let`` parameter to @@ -134,8 +1099,18 @@ in this release. .. _PYTHON-3186: https://jira.mongodb.org/browse/PYTHON-3186 .. _pymongo-stubs: https://github.com/mongodb-labs/pymongo-stubs -Changes in Version 4.0 ----------------------- +Changes in Version 4.0.2 (2022/03/03) +------------------------------------- + +- No changes + +Changes in Version 4.0.1 (2021/12/07) +------------------------------------- + +- No changes + +Changes in Version 4.0 (2021/11/29) +----------------------------------- .. warning:: PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. @@ -165,7 +1140,7 @@ Breaking Changes in 4.0 :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. - See :ref:`handling-uuid-data-example` for details. + See `UUID representations `_ for details. - Removed the ``waitQueueMultiple`` keyword argument to :class:`~pymongo.mongo_client.MongoClient` and removed :exc:`pymongo.errors.ExceededMaxWaiters`. @@ -322,7 +1297,7 @@ Breaking Changes in 4.0 - Comparing two :class:`~pymongo.mongo_client.MongoClient` instances now uses a set of immutable properties rather than :attr:`~pymongo.mongo_client.MongoClient.address` which can change. -- Removed the `disable_md5` parameter for :class:`~gridfs.GridFSBucket` and +- Removed the ``disable_md5`` parameter for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. See :ref:`removed-gridfs-checksum` for details. - pymongocrypt 1.2.0 or later is now required for client side field level encryption support. @@ -335,10 +1310,10 @@ Notable improvements - Added the ``maxConnecting`` URI and :class:`~pymongo.mongo_client.MongoClient` keyword argument. - :class:`~pymongo.mongo_client.MongoClient` now accepts a URI and keyword - argument `srvMaxHosts` that limits the number of mongos-like hosts a client + argument ``srvMaxHosts`` that limits the number of mongos-like hosts a client will connect to. More specifically, when a mongodb+srv:// connection string - resolves to more than `srvMaxHosts` number of hosts, the client will randomly - choose a `srvMaxHosts` sized subset of hosts. + resolves to more than ``srvMaxHosts`` number of hosts, the client will randomly + choose a ``srvMaxHosts`` sized subset of hosts. - Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access to a client's configuration options. - Support for the "kmip" KMS provider for client side field level encryption. @@ -352,10 +1327,139 @@ See the `PyMongo 4.0 release notes in JIRA`_ for the list of resolved issues in this release. .. _PyMongo 4.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18463 -.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md + +Changes in Version 3.13.0 (2022/11/01) +-------------------------------------- + +Version 3.13 provides an upgrade path to PyMongo 4.x. Most of the API changes +from PyMongo 4.0 have been backported in a backward compatible way, allowing +applications to be written against PyMongo >= 3.13, rather then PyMongo 3.x or +PyMongo 4.x. See the `PyMongo 4 Migration Guide`_ for detailed examples. -Changes in Version 3.12.0 -------------------------- +Notable improvements +.................... +- Added :attr:`pymongo.mongo_client.MongoClient.options` for read-only access + to a client's configuration options. + + +Issues Resolved +............... + +PyMongo 3.13 drops support for Python 3.4. + +Bug fixes +......... + +- Fixed a memory leak bug when calling :func:`~bson.decode_all` without a + ``codec_options`` argument (`PYTHON-3222`_). +- Fixed a bug where :func:`~bson.decode_all` did not accept ``codec_options`` + as a keyword argument (`PYTHON-3222`_). + +Deprecations +............ +- Deprecated :meth:`~pymongo.collection.Collection.map_reduce` and + :meth:`~pymongo.collection.Collection.inline_map_reduce`. + Use :meth:`~pymongo.collection.Collection.aggregate` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.event_listeners`. + Use :attr:`~pymongo.mongo_client.options.event_listeners` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_pool_size`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_pool_size` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_idle_time_ms`. + Use :attr:`~pymongo.mongo_client.options.pool_options.max_idle_time_seconds` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.local_threshold_ms`. + Use :attr:`~pymongo.mongo_client.options.local_threshold_ms` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.server_selection_timeout`. + Use :attr:`~pymongo.mongo_client.options.server_selection_timeout` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_writes`. + Use :attr:`~pymongo.mongo_client.options.retry_writes` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.retry_reads`. + Use :attr:`~pymongo.mongo_client.options.retry_reads` instead. +- Deprecated :attr:`pymongo.mongo_client.MongoClient.max_bson_size`, + :attr:`pymongo.mongo_client.MongoClient.max_message_size`, and + :attr:`pymongo.mongo_client.MongoClient.max_write_batch_size`. These helpers + were incorrect when in ``loadBalanced=true mode`` and ambiguous in clusters + with mixed versions. Use the `hello command`_ to get the authoritative + value from the remote server instead. Code like this:: + + max_bson_size = client.max_bson_size + max_message_size = client.max_message_size + max_write_batch_size = client.max_write_batch_size + +can be changed to this:: + + doc = client.admin.command('hello') + max_bson_size = doc['maxBsonObjectSize'] + max_message_size = doc['maxMessageSizeBytes'] + max_write_batch_size = doc['maxWriteBatchSize'] + +.. _hello command: https://docs.mongodb.com/manual/reference/command/hello/ + +See the `PyMongo 3.13.0 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PyMongo 4 Migration Guide: https://pymongo.readthedocs.io/en/stable/migrate-to-pymongo4.html +.. _PYTHON-3222: https://jira.mongodb.org/browse/PYTHON-3222 +.. _PyMongo 3.13.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31570 + +Changes in Version 3.12.3 (2021/12/07) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.3 fixes a bug that prevented :meth:`bson.json_util.loads` from +decoding a document with a non-string "$regex" field (`PYTHON-3028`_). + +See the `PyMongo 3.12.3 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-3028: https://jira.mongodb.org/browse/PYTHON-3028 +.. _PyMongo 3.12.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32505 + +Changes in Version 3.12.2 (2021/11/29) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.2 fixes a number of bugs: + +- Fixed a bug that prevented PyMongo from retrying bulk writes + after a ``writeConcernError`` on MongoDB 4.4+ (`PYTHON-2984`_). +- Fixed a bug that could cause the driver to hang during automatic + client side field level encryption (`PYTHON-3017`_). + +See the `PyMongo 3.12.2 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2984: https://jira.mongodb.org/browse/PYTHON-2984 +.. _PYTHON-3017: https://jira.mongodb.org/browse/PYTHON-3017 +.. _PyMongo 3.12.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=32310 + +Changes in Version 3.12.1 (2021/10/19) +-------------------------------------- + +Issues Resolved +............... + +Version 3.12.1 fixes a number of bugs: + +- Fixed a bug that caused a multi-document transaction to fail when the first + operation was large bulk write (>48MB) that required splitting a batched + write command (`PYTHON-2915`_). +- Fixed a bug that caused the ``tlsDisableOCSPEndpointCheck`` URI option to + be applied incorrectly (`PYTHON-2866`_). + +See the `PyMongo 3.12.1 release notes in JIRA`_ for the list of resolved issues +in this release. + +.. _PYTHON-2915: https://jira.mongodb.org/browse/PYTHON-2915 +.. _PYTHON-2866: https://jira.mongodb.org/browse/PYTHON-2866 +.. _PyMongo 3.12.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=31527 + +Changes in Version 3.12.0 (2021/07/13) +-------------------------------------- .. warning:: PyMongo 3.12.0 deprecates support for Python 2.7, 3.4 and 3.5. These Python versions will not be supported by PyMongo 4. @@ -368,14 +1472,14 @@ Changes in Version 3.12.0 - Iterating over :class:`gridfs.grid_file.GridOut` now moves through the file line by line instead of chunk by chunk, and does not restart at the top for subsequent iterations on the same object. - Call `seek(0)` to reset the iterator. + Call ``seek(0)`` to reset the iterator. Notable improvements .................... - Added support for MongoDB 5.0. - Support for MongoDB Stable API, see :class:`~pymongo.server_api.ServerApi`. -- Support for snapshot reads on secondaries (see :ref:`snapshot-reads-ref`). +- Support for snapshot reads on secondaries (see `snapshot reads `_). - Support for Azure and GCP KMS providers for client side field level encryption. See the docstring for :class:`~pymongo.mongo_client.MongoClient`, :class:`~pymongo.encryption_options.AutoEncryptionOpts`, @@ -432,7 +1536,7 @@ Deprecations same API. - Deprecated the :mod:`pymongo.messeage` module. - Deprecated the ``ssl_keyfile`` and ``ssl_certfile`` URI options in favor - of ``tlsCertificateKeyFile`` (see :doc:`examples/tls`). + of ``tlsCertificateKeyFile`` (see `TLS `_). .. _PYTHON-2466: https://jira.mongodb.org/browse/PYTHON-2466 .. _PYTHON-1690: https://jira.mongodb.org/browse/PYTHON-1690 @@ -447,8 +1551,8 @@ in this release. .. _PyMongo 3.12.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29594 -Changes in Version 3.11.3 -------------------------- +Changes in Version 3.11.3 (2021/02/02) +-------------------------------------- Issues Resolved ............... @@ -462,8 +1566,8 @@ in this release. .. _PYTHON-2452: https://jira.mongodb.org/browse/PYTHON-2452 .. _PyMongo 3.11.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30355 -Changes in Version 3.11.2 -------------------------- +Changes in Version 3.11.2 (2020/12/02) +-------------------------------------- Issues Resolved ............... @@ -490,8 +1594,8 @@ in this release. .. _PYTHON-2443: https://jira.mongodb.org/browse/PYTHON-2443 .. _PyMongo 3.11.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=30315 -Changes in Version 3.11.1 -------------------------- +Changes in Version 3.11.1 (2020/11/17) +-------------------------------------- Version 3.11.1 adds support for Python 3.9 and includes a number of bugfixes. Highlights include: @@ -524,18 +1628,18 @@ in this release. .. _PyMongo 3.11.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=29997 -Changes in Version 3.11.0 -------------------------- +Changes in Version 3.11.0 (2020/07/30) +-------------------------------------- Version 3.11 adds support for MongoDB 4.4 and includes a number of bug fixes. Highlights include: -- Support for :ref:`OCSP` (Online Certificate Status Protocol). +- Support for `OCSP `_ (Online Certificate Status Protocol). - Support for `PyOpenSSL `_ as an - alternative TLS implementation. PyOpenSSL is required for :ref:`OCSP` + alternative TLS implementation. PyOpenSSL is required for `OCSP `_ support. It will also be installed when using the "tls" extra if the version of Python in use is older than 2.7.9. -- Support for the :ref:`MONGODB-AWS` authentication mechanism. +- Support for the `MONGODB-AWS `_ authentication mechanism. - Support for the ``directConnection`` URI option and kwarg to :class:`~pymongo.mongo_client.MongoClient`. - Support for speculative authentication attempts in connection handshakes @@ -561,7 +1665,7 @@ Highlights include: - Added support for :data:`bson.binary.UuidRepresentation.UNSPECIFIED` and ``MongoClient(uuidRepresentation='unspecified')`` which will become the default UUID representation starting in PyMongo 4.0. See - :ref:`handling-uuid-data-example` for details. + `UUID representations `_ for details. - New methods :meth:`bson.binary.Binary.from_uuid` and :meth:`bson.binary.Binary.as_uuid`. - Added the ``background`` parameter to @@ -619,8 +1723,8 @@ in this release. .. _PyMongo 3.11.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=24799 -Changes in Version 3.10.1 -------------------------- +Changes in Version 3.10.1 (2020/01/07) +-------------------------------------- Version 3.10.1 fixes the following issues discovered since the release of 3.10.0: @@ -638,14 +1742,14 @@ in this release. .. _PyMongo 3.10.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=25039 -Changes in Version 3.10.0 -------------------------- +Changes in Version 3.10.0 (2019/12/10) +-------------------------------------- Version 3.10 includes a number of improvements and bug fixes. Highlights include: - Support for Client-Side Field Level Encryption with MongoDB 4.2. See - :doc:`examples/encryption` for examples. + `Client-Side Field Level Encryption `_ for examples. - Support for Python 3.8. - Added :attr:`pymongo.client_session.ClientSession.in_transaction`. - Do not hold the Topology lock while creating connections in a MongoClient's @@ -665,13 +1769,13 @@ in this release. .. _PyMongo 3.10 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=23944 -Changes in Version 3.9.0 ------------------------- +Changes in Version 3.9.0 (2019/08/13) +------------------------------------- Version 3.9 adds support for MongoDB 4.2. Highlights include: - Support for MongoDB 4.2 sharded transactions. Sharded transactions have - the same API as replica set transactions. See :ref:`transactions-ref`. + the same API as replica set transactions. See `Transactions `_. - New method :meth:`pymongo.client_session.ClientSession.with_transaction` to support conveniently running a transaction in a session with automatic retries and at-most-once semantics. @@ -758,7 +1862,7 @@ Unavoidable breaking changes: bumped to 1.16.0. This is a breaking change for applications that use PyMongo's SRV support with a version of ``dnspython`` older than 1.16.0. -.. _URI options specification: https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.rst +.. _URI options specification: https://github.com/mongodb/specifications/blob/master/source/uri-options/uri-options.md Issues Resolved @@ -769,12 +1873,11 @@ in this release. .. _PyMongo 3.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21787 -Changes in Version 3.8.0 ------------------------- +Changes in Version 3.8.0 (2019/04/22) +------------------------------------- .. warning:: PyMongo no longer supports Python 2.6. RHEL 6 users should install - Python 2.7 or newer from `Red Hat Software Collections - `_. + Python 2.7 or newer from Red Hat Software Collections. CentOS 6 users should install Python 2.7 or newer from `SCL `_ @@ -782,7 +1885,7 @@ Changes in Version 3.8.0 must upgrade to PyPy3.5+. - :class:`~bson.objectid.ObjectId` now implements the `ObjectID specification - version 0.2 `_. + version 0.2 `_. - For better performance and to better follow the GridFS spec, :class:`~gridfs.grid_file.GridOut` now uses a single cursor to read all the chunks in the file. Previously, each chunk in the file was queried @@ -800,7 +1903,7 @@ Changes in Version 3.8.0 - Custom types can now be directly encoded to, and decoded from MongoDB using the :class:`~bson.codec_options.TypeCodec` and :class:`~bson.codec_options.TypeRegistry` APIs. For more information, see - the :doc:`custom type example `. + `Custom Types `_. - Attempting a multi-document transaction on a sharded cluster now raises a :exc:`~pymongo.errors.ConfigurationError`. - :meth:`pymongo.cursor.Cursor.distinct` and @@ -821,16 +1924,16 @@ Changes in Version 3.8.0 is expected to require a :meth:`~pymongo.cursor.Cursor.hint` when using min/max starting in MongoDB 4.2. - Documented support for the uuidRepresentation URI option, which has been - supported since PyMongo 2.7. Valid values are `pythonLegacy` (the default), - `javaLegacy`, `csharpLegacy` and `standard`. New applications should consider - setting this to `standard` for cross language compatibility. + supported since PyMongo 2.7. Valid values are ``pythonLegacy`` (the default), + ``javaLegacy``, ``csharpLegacy`` and ``standard``. New applications should consider + setting this to ``standard`` for cross language compatibility. - :class:`~bson.raw_bson.RawBSONDocument` now validates that the ``bson_bytes`` passed in represent a single bson document. Earlier versions would mistakenly accept multiple bson documents. - Iterating over a :class:`~bson.raw_bson.RawBSONDocument` now maintains the same field order of the underlying raw BSON document. - Applications can now register a custom server selector. For more information - see the :doc:`server selector example `. + see `Customize Server Selection `_. - The connection pool now implements a LIFO policy. Unavoidable breaking changes: @@ -848,8 +1951,8 @@ in this release. .. _PyMongo 3.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19904 -Changes in Version 3.7.2 ------------------------- +Changes in Version 3.7.2 (2018/10/10) +------------------------------------- Version 3.7.2 fixes a few issues discovered since the release of 3.7.1. @@ -872,8 +1975,8 @@ in this release. .. _PyMongo 3.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21519 -Changes in Version 3.7.1 ------------------------- +Changes in Version 3.7.1 (2018/07/16) +------------------------------------- Version 3.7.1 fixes a few issues discovered since the release of 3.7.0. @@ -892,15 +1995,15 @@ in this release. .. _PyMongo 3.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=21096 -Changes in Version 3.7.0 ------------------------- +Changes in Version 3.7.0 (2018/06/26) +------------------------------------- Version 3.7 adds support for MongoDB 4.0. Highlights include: - Support for single replica set multi-document ACID transactions. - See :ref:`transactions-ref`. -- Support for wire protocol compression. See the - :meth:`~pymongo.mongo_client.MongoClient` documentation for details. + See `transactions `_. +- Support for wire protocol compression via the new ``compressors`` URI and keyword argument to + :meth:`~pymongo.mongo_client.MongoClient`. See `network compression `_ for details. - Support for Python 3.7. - New count methods, :meth:`~pymongo.collection.Collection.count_documents` and :meth:`~pymongo.collection.Collection.estimated_document_count`. @@ -921,12 +2024,12 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: the following features and changes allow PyMongo to function when MD5 support is disabled in OpenSSL by the FIPS Object Module: - - Support for the :ref:`SCRAM-SHA-256 ` - authentication mechanism. The :ref:`GSSAPI `, - :ref:`PLAIN `, and :ref:`MONGODB-X509 ` + - Support for the `SCRAM-SHA-256 `_ + authentication mechanism. The `GSSAPI `_, + `PLAIN `_, and `MONGODB-X509 `_ mechanisms can also be used to avoid issues with OpenSSL in FIPS environments. - - MD5 checksums are now optional in GridFS. See the `disable_md5` option + - MD5 checksums are now optional in GridFS. See the ``disable_md5`` option of :class:`~gridfs.GridFS` and :class:`~gridfs.GridFSBucket`. - :class:`~bson.objectid.ObjectId` machine bytes are now hashed using `FNV-1a @@ -941,11 +2044,11 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: class which is a subclass of :class:`~pymongo.change_stream.ChangeStream`. - SCRAM client and server keys are cached for improved performance, following `RFC 5802 `_. -- If not specified, the authSource for the :ref:`PLAIN ` +- If not specified, the authSource for the `PLAIN `_ authentication mechanism defaults to $external. - wtimeoutMS is once again supported as a URI option. - When using unacknowledged write concern and connected to MongoDB server - version 3.6 or greater, the `bypass_document_validation` option is now + version 3.6 or greater, the ``bypass_document_validation`` option is now supported in the following write helpers: :meth:`~pymongo.collection.Collection.insert_one`, :meth:`~pymongo.collection.Collection.replace_one`, @@ -955,7 +2058,7 @@ Version 3.7 adds support for MongoDB 4.0. Highlights include: Deprecations: - Deprecated :meth:`pymongo.collection.Collection.count` and - :meth:`pymongo.cursor.Cursor.count`. These two methods use the `count` + :meth:`pymongo.cursor.Cursor.count`. These two methods use the ``count`` command and `may or may not be accurate `_, depending on the options used and connected MongoDB topology. Use @@ -965,7 +2068,7 @@ Deprecations: deprecated in MongoDB 3.6 and removed in MongoDB 4.0. - Deprecated the max_scan option of :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one`. The option was - deprecated in MongoDB 4.0. Use `maxTimeMS` instead. + deprecated in MongoDB 4.0. Use ``maxTimeMS`` instead. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.close_cursor`. Use :meth:`~pymongo.cursor.Cursor.close` instead. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.database_names`. Use @@ -1002,8 +2105,8 @@ in this release. .. _PyMongo 3.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19287 -Changes in Version 3.6.1 ------------------------- +Changes in Version 3.6.1 (2018/03/01) +------------------------------------- Version 3.6.1 fixes bugs reported since the release of 3.6.0: @@ -1028,8 +2131,8 @@ in this release. .. _PyMongo 3.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=19438 -Changes in Version 3.6.0 ------------------------- +Changes in Version 3.6.0 (2017/08/23) +------------------------------------- Version 3.6 adds support for MongoDB 3.6, drops support for CPython 3.3 (PyPy3 is still supported), and drops support for MongoDB versions older than 2.6. If @@ -1067,7 +2170,7 @@ Highlights include: Deprecations: -- The `useCursor` option for :meth:`~pymongo.collection.Collection.aggregate` +- The ``useCursor`` option for :meth:`~pymongo.collection.Collection.aggregate` is deprecated. The option was only necessary when upgrading from MongoDB 2.4 to MongoDB 2.6. MongoDB 2.4 is no longer supported. - The :meth:`~pymongo.database.Database.add_user` and @@ -1100,8 +2203,8 @@ in this release. .. _PyMongo 3.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18043 -Changes in Version 3.5.1 ------------------------- +Changes in Version 3.5.1 (2017/08/23) +------------------------------------- Version 3.5.1 fixes bugs reported since the release of 3.5.0: @@ -1119,8 +2222,8 @@ in this release. .. _PyMongo 3.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=18721 -Changes in Version 3.5 ----------------------- +Changes in Version 3.5.0 (2017/08/08) +------------------------------------- Version 3.5 implements a number of improvements and bug fixes: @@ -1132,19 +2235,19 @@ Highlights include: - Increased the performance of using :class:`~bson.raw_bson.RawBSONDocument`. - Increased the performance of :meth:`~pymongo.mongo_client.MongoClient.database_names` by using the - `nameOnly` option for listDatabases when available. + ``nameOnly`` option for listDatabases when available. - Increased the performance of :meth:`~pymongo.collection.Collection.bulk_write` by reducing the memory overhead of :class:`~pymongo.operations.InsertOne`, :class:`~pymongo.operations.DeleteOne`, and :class:`~pymongo.operations.DeleteMany`. -- Added the `collation` option to :class:`~pymongo.operations.DeleteOne`, +- Added the ``collation`` option to :class:`~pymongo.operations.DeleteOne`, :class:`~pymongo.operations.DeleteMany`, :class:`~pymongo.operations.ReplaceOne`, :class:`~pymongo.operations.UpdateOne`, and :class:`~pymongo.operations.UpdateMany`. - Implemented the `MongoDB Extended JSON - `_ + `_ specification. - :class:`~bson.decimal128.Decimal128` now works when cdecimal is installed. - PyMongo is now tested against a wider array of operating systems and CPU @@ -1152,13 +2255,13 @@ Highlights include: Changes and Deprecations: -- :meth:`~pymongo.collection.Collection.find` has new options `return_key`, - `show_record_id`, `snapshot`, `hint`, `max_time_ms`, `max_scan`, `min`, `max`, - and `comment`. Deprecated the option `modifiers`. +- :meth:`~pymongo.collection.Collection.find` has new options ``return_key``, + ``show_record_id``, ``snapshot``, ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, + and ``comment``. Deprecated the option ``modifiers``. - Deprecated :meth:`~pymongo.collection.Collection.group`. The group command was deprecated in MongoDB 3.4 and is expected to be removed in MongoDB 3.6. Applications should use :meth:`~pymongo.collection.Collection.aggregate` - with the `$group` pipeline stage instead. + with the ``$group`` pipeline stage instead. - Deprecated :meth:`~pymongo.database.Database.authenticate`. Authenticating multiple users conflicts with support for logical sessions in MongoDB 3.6. To authenticate as multiple users, create multiple instances of @@ -1168,9 +2271,9 @@ Changes and Deprecations: - Deprecated :class:`~pymongo.database.SystemJS`. - Deprecated :meth:`~pymongo.mongo_client.MongoClient.get_default_database`. Applications should use - :meth:`~pymongo.mongo_client.MongoClient.get_database` without the `name` + :meth:`~pymongo.mongo_client.MongoClient.get_database` without the ```name``` parameter instead. -- Deprecated the MongoClient option `socketKeepAlive`. It now defaults to true +- Deprecated the MongoClient option ``socketKeepAlive```. It now defaults to true and disabling it is not recommended, see `does TCP keepalive time affect MongoDB Deployments? `_ @@ -1182,7 +2285,7 @@ Changes and Deprecations: :const:`~bson.json_util.RELAXED_JSON_OPTIONS` or :const:`~bson.json_util.CANONICAL_JSON_OPTIONS` instead. - If a custom :class:`~bson.codec_options.CodecOptions` is passed to - :class:`RawBSONDocument`, its `document_class` must be + :class:`RawBSONDocument`, its ``document_class``` must be :class:`RawBSONDocument`. - :meth:`~pymongo.collection.Collection.list_indexes` no longer raises OperationFailure when the collection (or database) does not exist on @@ -1191,7 +2294,7 @@ Changes and Deprecations: consistent across all MongoDB versions. - In Python 3, :meth:`~bson.json_util.loads` now automatically decodes JSON $binary with a subtype of 0 into :class:`bytes` instead of - :class:`~bson.binary.Binary`. See the :doc:`/python3` for more details. + :class:`~bson.binary.Binary`. - :meth:`~bson.json_util.loads` now raises ``TypeError`` or ``ValueError`` when parsing JSON type wrappers with values of the wrong type or any extra keys. @@ -1210,8 +2313,8 @@ in this release. .. _PyMongo 3.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17590 -Changes in Version 3.4 ----------------------- +Changes in Version 3.4.0 (2016/11/29) +------------------------------------- Version 3.4 implements the new server features introduced in MongoDB 3.4 and a whole lot more: @@ -1220,7 +2323,7 @@ Highlights include: - Complete support for MongoDB 3.4: - - Unicode aware string comparison using :doc:`examples/collations`. + - Unicode aware string comparison using `Collation `_. - Support for the new :class:`~bson.decimal128.Decimal128` BSON type. - A new maxStalenessSeconds read preference option. - A username is no longer required for the MONGODB-X509 authentication @@ -1282,8 +2385,8 @@ in this release. .. _PyMongo 3.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16594 -Changes in Version 3.3.1 ------------------------- +Changes in Version 3.3.1 (2016/10/27) +------------------------------------- Version 3.3.1 fixes a memory leak when decoding elements inside of a :class:`~bson.raw_bson.RawBSONDocument`. @@ -1296,8 +2399,8 @@ in this release. .. _PyMongo 3.3.1 release notes in Jira: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17636 -Changes in Version 3.3 ----------------------- +Changes in Version 3.3.0 (2016/07/12) +------------------------------------- Version 3.3 adds the following major new features: @@ -1323,11 +2426,11 @@ in this release. .. _PyMongo 3.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16005 -Changes in Version 3.2.2 ------------------------- +Changes in Version 3.2.2 (2016/03/15) +------------------------------------- Version 3.2.2 fixes a few issues reported since the release of 3.2.1, including -a fix for using the `connect` option in the MongoDB URI and support for setting +a fix for using the ``connect`` option in the MongoDB URI and support for setting the batch size for a query to 1 when using MongoDB 3.2+. Issues Resolved @@ -1339,8 +2442,8 @@ in this release. .. _PyMongo 3.2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16538 -Changes in Version 3.2.1 ------------------------- +Changes in Version 3.2.1 (2016/02/02) +------------------------------------- Version 3.2.1 fixes a few issues reported since the release of 3.2, including running the mapreduce command twice when calling the @@ -1357,8 +2460,8 @@ in this release. .. _PyMongo 3.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16312 -Changes in Version 3.2 ----------------------- +Changes in Version 3.2 (2015/12/07) +----------------------------------- Version 3.2 implements the new server features introduced in MongoDB 3.2. @@ -1371,7 +2474,7 @@ Highlights include: :meth:`~pymongo.collection.Collection.find_one_and_replace`, :meth:`~pymongo.collection.Collection.find_one_and_update`, and :meth:`~pymongo.collection.Collection.find_one_and_delete`. - - Support for the new `bypassDocumentValidation` option in write + - Support for the new ``bypassDocumentValidation`` option in write helpers. - Support for reading and writing raw BSON with @@ -1390,8 +2493,8 @@ in this release. .. _PyMongo 3.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15612 -Changes in Version 3.1.1 ------------------------- +Changes in Version 3.1.1 (2015/11/17) +------------------------------------- Version 3.1.1 fixes a few issues reported since the release of 3.1, including a regression in error handling for oversize command documents and interrupt @@ -1405,8 +2508,8 @@ in this release. .. _PyMongo 3.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16211 -Changes in Version 3.1 ----------------------- +Changes in Version 3.1 (2015/11/02) +----------------------------------- Version 3.1 implements a few new features and fixes bugs reported since the release of 3.0.3. @@ -1415,10 +2518,10 @@ Highlights include: - Command monitoring support. See :mod:`~pymongo.monitoring` for details. - Configurable error handling for :exc:`UnicodeDecodeError`. See the - `unicode_decode_error_handler` option of + ``unicode_decode_error_handler`` option of :class:`~bson.codec_options.CodecOptions`. - Optional automatic timezone conversion when decoding BSON datetime. See the - `tzinfo` option of :class:`~bson.codec_options.CodecOptions`. + ``tzinfo`` option of :class:`~bson.codec_options.CodecOptions`. - An implementation of :class:`~gridfs.GridFSBucket` from the new GridFS spec. - Compliance with the new Connection String spec. - Reduced idle CPU usage in Python 2. @@ -1437,8 +2540,8 @@ in this release. .. _PyMongo 3.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14796 -Changes in Version 3.0.3 ------------------------- +Changes in Version 3.0.3 (2015/06/30) +------------------------------------- Version 3.0.3 fixes issues reported since the release of 3.0.2, including a feature breaking bug in the GSSAPI implementation. @@ -1451,8 +2554,8 @@ in this release. .. _PyMongo 3.0.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15528 -Changes in Version 3.0.2 ------------------------- +Changes in Version 3.0.2 (2015/05/12) +------------------------------------- Version 3.0.2 fixes issues reported since the release of 3.0.1, most importantly a bug that could route operations to replica set members @@ -1469,8 +2572,8 @@ in this release. .. _PyMongo 3.0.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15430 -Changes in Version 3.0.1 ------------------------- +Changes in Version 3.0.1 (2015/04/21) +------------------------------------- Version 3.0.1 fixes issues reported since the release of 3.0, most importantly a bug in GridFS.delete that could prevent file chunks from @@ -1484,8 +2587,8 @@ in this release. .. _PyMongo 3.0.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15322 -Changes in Version 3.0 ----------------------- +Changes in Version 3.0 (2015/04/07) +----------------------------------- PyMongo 3.0 is a partial rewrite of PyMongo bringing a large number of improvements: @@ -1541,7 +2644,7 @@ applied to documents returned by the new methods SSL/TLS changes ............... -When `ssl` is ``True`` the `ssl_cert_reqs` option now defaults to +When ``ssl`` is ``True`` the ``ssl_cert_reqs`` option now defaults to :attr:`ssl.CERT_REQUIRED` if not provided. PyMongo will attempt to load OS provided CA certificates to verify the server, raising :exc:`~pymongo.errors.ConfigurationError` if it cannot. @@ -1558,7 +2661,7 @@ In PyMongo 3.0, the ``use_greenlets`` option is gone. To use PyMongo with Gevent simply call ``gevent.monkey.patch_all()``. For more information, -see :doc:`PyMongo's Gevent documentation `. +see `Gevent `_. :class:`~pymongo.mongo_client.MongoClient` changes .................................................. @@ -1602,7 +2705,7 @@ the list, and used it until a network error prompted it to re-evaluate all mongoses' latencies and reconnect to one of them. In PyMongo 3, the client monitors its network latency to all the mongoses continuously, and distributes operations evenly among those with the lowest latency. -See :ref:`mongos-load-balancing` for more information. +See `load balancing `_ for more information. The client methods ``start_request``, ``in_request``, and ``end_request`` are removed, and so is the ``auto_start_request`` option. Requests were @@ -1610,7 +2713,7 @@ designed to make read-your-writes consistency more likely with the ``w=0`` write concern. Additionally, a thread in a request used the same member for all secondary reads in a replica set. To ensure read-your-writes consistency in PyMongo 3.0, do not override the default write concern with ``w=0``, and -do not override the default :ref:`read preference ` of +do not override the default `read preference `_ of PRIMARY. Support for the ``slaveOk`` (or ``slave_okay``), ``safe``, and @@ -1624,8 +2727,7 @@ The ``max_pool_size`` option has been removed. It is replaced by the ``maxPoolSize`` MongoDB URI option. ``maxPoolSize`` is now a supported URI option in PyMongo and can be passed as a keyword argument. -The ``copy_database`` method is removed, see the -:doc:`copy_database examples ` for alternatives. +The ``copy_database`` method is removed, see `Copy and Clone Databases `_ for alternatives. The ``disconnect`` method is removed. Use :meth:`~pymongo.mongo_client.MongoClient.close` instead. @@ -1688,12 +2790,12 @@ Cursor management changes :meth:`~pymongo.mongo_client.MongoClient.set_cursor_manager` are no longer deprecated. If you subclass :class:`~pymongo.cursor_manager.CursorManager` your implementation of :meth:`~pymongo.cursor_manager.CursorManager.close` -must now take a second parameter, `address`. The ``BatchCursorManager`` class +must now take a second parameter, ``address``. The ``BatchCursorManager`` class is removed. The second parameter to :meth:`~pymongo.mongo_client.MongoClient.close_cursor` is renamed from ``_conn_id`` to ``address``. -:meth:`~pymongo.mongo_client.MongoClient.kill_cursors` now accepts an `address` +:meth:`~pymongo.mongo_client.MongoClient.kill_cursors` now accepts an ``address`` parameter. :class:`~pymongo.database.Database` changes @@ -1728,13 +2830,13 @@ The following methods have been added: The following methods have been changed: -- :meth:`~pymongo.database.Database.command`. Support for `as_class`, - `uuid_subtype`, `tag_sets`, and `secondary_acceptable_latency_ms` have been +- :meth:`~pymongo.database.Database.command`. Support for ``as_class``, + ``uuid_subtype``, ``tag_sets``, and ``secondary_acceptable_latency_ms`` have been removed. You can instead pass an instance of - :class:`~bson.codec_options.CodecOptions` as `codec_options` and an instance + :class:`~bson.codec_options.CodecOptions` as ``codec_options`` and an instance of a read preference class from :mod:`~pymongo.read_preferences` as - `read_preference`. The `fields` and `compile_re` options are also removed. - The `fields` options was undocumented and never really worked. Regular + ``read_preference``. The ``fields`` and ``compile_re`` options are also removed. + The ``fields`` options was undocumented and never really worked. Regular expressions are always decoded to :class:`~bson.regex.Regex`. The following methods have been deprecated: @@ -1798,9 +2900,9 @@ The following methods have changed: - :meth:`~pymongo.collection.Collection.distinct` now optionally takes a filter argument. - :meth:`~pymongo.collection.Collection.create_index` no longer caches - indexes, therefore the `cache_for` parameter has been removed. It also - no longer supports the `bucket_size` and `drop_dups` aliases for `bucketSize` - and `dropDups`. + indexes, therefore the ``cache_for`` parameter has been removed. It also + no longer supports the ``bucket_size`` and ``drop_dups`` aliases for ``bucketSize`` + and ``dropDups``. The following methods are deprecated: @@ -1848,13 +2950,13 @@ The following find/find_one options have been removed: - tag_sets (use one of the read preference classes from :mod:`~pymongo.read_preferences` and :meth:`~pymongo.collection.Collection.with_options` instead) -- secondary_acceptable_latency_ms (use the `localThresholdMS` URI option +- secondary_acceptable_latency_ms (use the ``localThresholdMS`` URI option instead) -- max_scan (use the new `modifiers` option instead) -- snapshot (use the new `modifiers` option instead) -- tailable (use the new `cursor_type` option instead) -- await_data (use the new `cursor_type` option instead) -- exhaust (use the new `cursor_type` option instead) +- max_scan (use the new ``modifiers`` option instead) +- snapshot (use the new ``modifiers`` option instead) +- tailable (use the new ``cursor_type`` option instead) +- await_data (use the new ``cursor_type`` option instead) +- exhaust (use the new ``cursor_type`` option instead) - as_class (use :meth:`~pymongo.collection.Collection.with_options` with :class:`~bson.codec_options.CodecOptions` instead) - compile_re (BSON regular expressions are always decoded to @@ -1867,9 +2969,9 @@ The following find/find_one options are deprecated: The following renames need special handling. - timeout -> no_cursor_timeout - - The default for `timeout` was True. The default for `no_cursor_timeout` is - False. If you were previously passing False for `timeout` you must pass - **True** for `no_cursor_timeout` to keep the previous behavior. + The default for ``timeout`` was True. The default for ``no_cursor_timeout`` is + False. If you were previously passing False for ``t`imeout`` you must pass + **True** for ``no_cursor_timeout`` to keep the previous behavior. :mod:`~pymongo.errors` changes .............................. @@ -1886,7 +2988,7 @@ The unsupported methods, the class, and the exception are all deleted. :mod:`~bson` changes .................... -The `compile_re` option is removed from all methods +The ``compile_re`` option is removed from all methods that accepted it in :mod:`~bson` and :mod:`~bson.json_util`. Additionally, it is removed from :meth:`~pymongo.collection.Collection.find`, :meth:`~pymongo.collection.Collection.find_one`, @@ -1904,7 +3006,7 @@ allows BSON int64 to be round tripped without losing type information in python 3. Note that if you store a python long (or a python int larger than 4 bytes) it will be returned from PyMongo as :class:`~bson.int64.Int64`. -The `as_class`, `tz_aware`, and `uuid_subtype` options are removed from all +The ``as_class``, ``tz_aware``, and ``uuid_subtype`` options are removed from all BSON encoding and decoding methods. Use :class:`~bson.codec_options.CodecOptions` to configure these options. The APIs affected are: @@ -1930,8 +3032,8 @@ in this release. .. _PyMongo 3.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12501 -Changes in Version 2.9.5 ------------------------- +Changes in Version 2.9.5 (2017/06/30) +------------------------------------- Version 2.9.5 works around ssl module deprecations in Python 3.6, and expected future ssl module deprecations. It also fixes bugs found since the release of @@ -1953,8 +3055,8 @@ in this release. .. _PyMongo 2.9.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=17605 -Changes in Version 2.9.4 ------------------------- +Changes in Version 2.9.4 (2016/09/30) +------------------------------------- Version 2.9.4 fixes issues reported since the release of 2.9.3. @@ -1962,7 +3064,7 @@ Version 2.9.4 fixes issues reported since the release of 2.9.3. - Fixed :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` handling of uuidRepresentation. - Fixed building and testing the documentation with python 3.x. -- New documentation for :doc:`examples/tls` and :doc:`atlas`. +- New documentation for `TLS `_ and `Atlas `_. Issues Resolved ............... @@ -1972,8 +3074,8 @@ in this release. .. _PyMongo 2.9.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16885 -Changes in Version 2.9.3 ------------------------- +Changes in Version 2.9.3 (2016/03/15) +------------------------------------- Version 2.9.3 fixes a few issues reported since the release of 2.9.2 including thread safety issues in :meth:`~pymongo.collection.Collection.ensure_index`, @@ -1988,15 +3090,15 @@ in this release. .. _PyMongo 2.9.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16539 -Changes in Version 2.9.2 ------------------------- +Changes in Version 2.9.2 (2016/02/16) +------------------------------------- Version 2.9.2 restores Python 3.1 support, which was broken in PyMongo 2.8. It improves an error message when decoding BSON as well as fixes a couple other issues including :meth:`~pymongo.collection.Collection.aggregate` ignoring :attr:`~pymongo.collection.Collection.codec_options` and :meth:`~pymongo.database.Database.command` raising a superfluous -`DeprecationWarning`. +``DeprecationWarning``. Issues Resolved ............... @@ -2006,8 +3108,8 @@ in this release. .. _PyMongo 2.9.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16303 -Changes in Version 2.9.1 ------------------------- +Changes in Version 2.9.1 (2015/11/17) +------------------------------------- Version 2.9.1 fixes two interrupt handling issues in the C extensions and adapts a test case for a behavior change in MongoDB 3.2. @@ -2020,8 +3122,8 @@ in this release. .. _PyMongo 2.9.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=16208 -Changes in Version 2.9 ----------------------- +Changes in Version 2.9 (2015/09/30) +----------------------------------- Version 2.9 provides an upgrade path to PyMongo 3.x. Most of the API changes from PyMongo 3.0 have been backported in a backward compatible way, allowing @@ -2098,8 +3200,8 @@ in this release. .. _PyMongo 2.9 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14795 -Changes in Version 2.8.1 ------------------------- +Changes in Version 2.8.1 (2015/05/11) +------------------------------------- Version 2.8.1 fixes a number of issues reported since the release of PyMongo 2.8. It is a recommended upgrade for all users of PyMongo 2.x. @@ -2112,8 +3214,8 @@ in this release. .. _PyMongo 2.8.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=15324 -Changes in Version 2.8 ----------------------- +Changes in Version 2.8 (2015/01/28) +----------------------------------- Version 2.8 is a major release that provides full support for MongoDB 3.0 and fixes a number of bugs. @@ -2162,8 +3264,8 @@ in this release. .. _PyMongo 2.8 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14223 -Changes in Version 2.7.2 ------------------------- +Changes in Version 2.7.2 (2014/07/29) +------------------------------------- Version 2.7.2 includes fixes for upsert reporting in the bulk API for MongoDB versions previous to 2.6, a regression in how son manipulators are applied in @@ -2179,8 +3281,8 @@ in this release. .. _PyMongo 2.7.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=14005 -Changes in Version 2.7.1 ------------------------- +Changes in Version 2.7.1 (2014/05/23) +------------------------------------- Version 2.7.1 fixes a number of issues reported since the release of 2.7, most importantly a fix for creating indexes and manipulating users through @@ -2194,14 +3296,14 @@ in this release. .. _PyMongo 2.7.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13823 -Changes in Version 2.7 ----------------------- +Changes in Version 2.7 (2014/04/03) +----------------------------------- PyMongo 2.7 is a major release with a large number of new features and bug fixes. Highlights include: - Full support for MongoDB 2.6. -- A new :doc:`bulk write operations API `. +- A new `bulk write operations API `_. - Support for server side query timeouts using :meth:`~pymongo.cursor.Cursor.max_time_ms`. - Support for writing :meth:`~pymongo.collection.Collection.aggregate` @@ -2212,9 +3314,9 @@ fixes. Highlights include: error details from the server. - A new GridFS :meth:`~gridfs.GridFS.find` method that returns a :class:`~gridfs.grid_file.GridOutCursor`. -- Greatly improved :doc:`support for mod_wsgi ` when using +- Greatly improved `support for mod_wsgi `_ when using PyMongo's C extensions. Read `Jesse's blog post - `_ for details. + `_ for details. - Improved C extension support for ARM little endian. Breaking changes @@ -2231,8 +3333,8 @@ in this release. .. _PyMongo 2.7 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12892 -Changes in Version 2.6.3 ------------------------- +Changes in Version 2.6.3 (2013/10/11) +------------------------------------- Version 2.6.3 fixes issues reported since the release of 2.6.2, most importantly a semaphore leak when a connection to the server fails. @@ -2245,8 +3347,8 @@ in this release. .. _PyMongo 2.6.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=13098 -Changes in Version 2.6.2 ------------------------- +Changes in Version 2.6.2 (2013/09/06) +------------------------------------- Version 2.6.2 fixes a :exc:`TypeError` problem when max_pool_size=None is used in Python 3. @@ -2259,8 +3361,8 @@ in this release. .. _PyMongo 2.6.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12910 -Changes in Version 2.6.1 ------------------------- +Changes in Version 2.6.1 (2013/09/03) +------------------------------------- Version 2.6.1 fixes a reference leak in the :meth:`~pymongo.collection.Collection.insert` method. @@ -2273,8 +3375,8 @@ in this release. .. _PyMongo 2.6.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12905 -Changes in Version 2.6 ----------------------- +Changes in Version 2.6 (2013/08/19) +----------------------------------- Version 2.6 includes some frequently requested improvements and adds support for some early MongoDB 2.6 features. @@ -2292,19 +3394,19 @@ Important new features: ``waitQueueTimeoutMS`` is set, an operation that blocks waiting for a socket will raise :exc:`~pymongo.errors.ConnectionFailure` after the timeout. By default ``waitQueueTimeoutMS`` is not set. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. - The :meth:`~pymongo.collection.Collection.insert` method automatically splits large batches of documents into multiple insert messages based on :attr:`~pymongo.mongo_client.MongoClient.max_message_size` - Support for the exhaust cursor flag. See :meth:`~pymongo.collection.Collection.find` for details and caveats. - Support for the PLAIN and MONGODB-X509 authentication mechanisms. - See :doc:`the authentication docs ` for more + See `the authentication docs `_ for more information. - Support aggregation output as a :class:`~pymongo.cursor.Cursor`. See :meth:`~pymongo.collection.Collection.aggregate` for details. -.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, `max_pool_size` +.. warning:: SIGNIFICANT BEHAVIOR CHANGE in 2.6. Previously, ``max_pool_size`` would limit only the idle sockets the pool would hold onto, not the number of open sockets. The default has also changed, from 10 to 100. If you pass a value for ``max_pool_size`` make sure it is large enough for @@ -2312,7 +3414,7 @@ Important new features: to having a ``max_pool_size`` larger than necessary. Err towards a larger value.) If your application accepts the default, continue to do so. - See :ref:`connection-pooling` for more information. + See `connection pooling `_ for more information. Issues Resolved ............... @@ -2322,8 +3424,8 @@ in this release. .. _PyMongo 2.6 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12380 -Changes in Version 2.5.2 ------------------------- +Changes in Version 2.5.2 (2013/06/01) +------------------------------------- Version 2.5.2 fixes a NULL pointer dereference issue when decoding an invalid :class:`~bson.dbref.DBRef`. @@ -2336,8 +3438,8 @@ in this release. .. _PyMongo 2.5.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12581 -Changes in Version 2.5.1 ------------------------- +Changes in Version 2.5.1 (2013/05/13) +------------------------------------- Version 2.5.1 is a minor release that fixes issues discovered after the release of 2.5. Most importantly, this release addresses some race @@ -2351,14 +3453,14 @@ in this release. .. _PyMongo 2.5.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12484 -Changes in Version 2.5 ----------------------- +Changes in Version 2.5 (2013/03/22) +----------------------------------- Version 2.5 includes changes to support new features in MongoDB 2.4. Important new features: -- Support for :ref:`GSSAPI (Kerberos) authentication `. +- Support for `GSSAPI (Kerberos) `_. - Support for SSL certificate validation with hostname matching. - Support for delegated and role based authentication. - New GEOSPHERE (2dsphere) and HASHED index constants. @@ -2375,8 +3477,8 @@ in this release. .. _PyMongo 2.5 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11981 -Changes in Version 2.4.2 ------------------------- +Changes in Version 2.4.2 (2013/01/23) +------------------------------------- Version 2.4.2 is a minor release that fixes issues discovered after the release of 2.4.1. Most importantly, PyMongo will no longer select a replica @@ -2390,8 +3492,8 @@ in this release. .. _PyMongo 2.4.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12299 -Changes in Version 2.4.1 ------------------------- +Changes in Version 2.4.1 (2012/12/06) +------------------------------------- Version 2.4.1 is a minor release that fixes issues discovered after the release of 2.4. Most importantly, this release fixes a regression using @@ -2406,8 +3508,8 @@ in this release. .. _PyMongo 2.4.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=12286 -Changes in Version 2.4 ----------------------- +Changes in Version 2.4 (2012/11/27) +----------------------------------- Version 2.4 includes a few important new features and a large number of bug fixes. @@ -2429,7 +3531,7 @@ Important new features: - :class:`~pymongo.cursor.Cursor` can be copied with functions from the :mod:`copy` module. - The :meth:`~pymongo.database.Database.set_profiling_level` method now supports - a `slow_ms` option. + a ``slow_ms`` option. - The replica set monitor task (used by :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` and :class:`~pymongo.replica_set_connection.ReplicaSetConnection`) is a daemon thread @@ -2456,8 +3558,8 @@ in this release. .. _PyMongo 2.4 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11485 -Changes in Version 2.3 ----------------------- +Changes in Version 2.3 (2012/08/29) +----------------------------------- Version 2.3 adds support for new features and behavior changes in MongoDB 2.2. @@ -2465,11 +3567,11 @@ Version 2.3 adds support for new features and behavior changes in MongoDB Important New Features: - Support for expanded read preferences including directing reads to tagged - servers - See :ref:`secondary-reads` for more information. + servers - See `secondary reads `_ for more information. - Support for mongos failover. - A new :meth:`~pymongo.collection.Collection.aggregate` method to support MongoDB's new `aggregation framework - `_. + `_. - Support for legacy Java and C# byte order when encoding and decoding UUIDs. - Support for connecting directly to an arbiter. @@ -2490,8 +3592,8 @@ in this release. .. _PyMongo 2.3 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11146 -Changes in Version 2.2.1 ------------------------- +Changes in Version 2.2.1 (2012/07/06) +------------------------------------- Version 2.2.1 is a minor release that fixes issues discovered after the release of 2.2. Most importantly, this release fixes an incompatibility @@ -2506,8 +3608,8 @@ in this release. .. _PyMongo 2.2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=11185 -Changes in Version 2.2 ----------------------- +Changes in Version 2.2 (2012/04/30) +----------------------------------- Version 2.2 adds a few more frequently requested features and fixes a number of bugs. @@ -2519,10 +3621,10 @@ to this release. Important New Features: -- Support for Python 3 - - See the :doc:`python3` for more information. +- Support for Python 3. + See `Python 3 `_ for more information. - Support for Gevent - - See :doc:`examples/gevent` for more information. + See `Gevent `_ for more information. - Improved connection pooling. See `PYTHON-287 `_. @@ -2551,8 +3653,8 @@ in this release. .. _PyMongo 2.2 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10584 -Changes in Version 2.1.1 ------------------------- +Changes in Version 2.1.1 (2012/01/04) +------------------------------------- Version 2.1.1 is a minor release that fixes a few issues discovered after the release of 2.1. You can now use @@ -2571,8 +3673,8 @@ in this release. .. _PyMongo 2.1.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?version=11081&styleName=Html&projectId=10004 -Changes in Version 2.1 ----------------------- +Changes in Version 2.1 (2011/12/07) +----------------------------------- Version 2.1 adds a few frequently requested features and includes the usual round of bug fixes and improvements. @@ -2614,8 +3716,8 @@ See the `PyMongo 2.1 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.1 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10583 -Changes in Version 2.0.1 ------------------------- +Changes in Version 2.0.1 (2011/08/15) +------------------------------------- Version 2.0.1 fixes a regression in :class:`~gridfs.grid_file.GridIn` when writing pre-chunked strings. Thanks go to Alexey Borzenkov for reporting the @@ -2627,8 +3729,8 @@ Issues Resolved - `PYTHON-271 `_: Regression in GridFS leads to serious loss of data. -Changes in Version 2.0 ----------------------- +Changes in Version 2.0 (2011/08/05) +----------------------------------- Version 2.0 adds a large number of features and fixes a number of issues. @@ -2649,16 +3751,16 @@ Important New Features: independently at the connection, database, collection or query level. Each level will inherit settings from the previous level and each level can override the previous level's setting. -- PyMongo now supports the `await_data` and `partial` cursor flags. If the - `await_data` flag is set on a `tailable` cursor the server will block for - some extra time waiting for more data to return. The `partial` flag tells +- PyMongo now supports the ``await_data`` and ``partial`` cursor flags. If the + ``await_data`` flag is set on a ``tailable`` cursor the server will block for + some extra time waiting for more data to return. The ``partial`` flag tells a mongos to return partial data for a query if not all shards are available. -- :meth:`~pymongo.collection.Collection.map_reduce` will accept a `dict` or - instance of :class:`~bson.son.SON` as the `out` parameter. +- :meth:`~pymongo.collection.Collection.map_reduce` will accept a ``dict`` or + instance of :class:`~bson.son.SON` as the ``out`` parameter. - The URI parser has been moved into its own module and can be used directly by application code. - AutoReconnect exception now provides information about the error that - actually occured instead of a generic failure message. + actually occurred instead of a generic failure message. - A number of new helper methods have been added with options for setting and unsetting cursor flags, re-indexing a collection, fsync and locking a server, and getting the server's current operations. @@ -2666,9 +3768,9 @@ Important New Features: API changes: - If only one host:port pair is specified :class:`~pymongo.connection.Connection` - will make a direct connection to only that host. Please note that `slave_okay` - must be `True` in order to query from a secondary. -- If more than one host:port pair is specified or the `replicaset` option is + will make a direct connection to only that host. Please note that ``slave_okay`` + must be ``True`` in order to query from a secondary. +- If more than one host:port pair is specified or the ``replicaset`` option is used PyMongo will treat the specified host:port pair(s) as a seed list and connect using replica set behavior. @@ -2685,15 +3787,15 @@ See the `PyMongo 2.0 release notes in JIRA`_ for the list of resolved issues in .. _PyMongo 2.0 release notes in JIRA: https://jira.mongodb.org/secure/ReleaseNote.jspa?projectId=10004&version=10274 -Changes in Version 1.11 ------------------------ +Changes in Version 1.11 (2011/05/05) +------------------------------------ Version 1.11 adds a few new features and fixes a few more bugs. New Features: - Basic IPv6 support: pymongo prefers IPv4 but will try IPv6. You can - also specify an IPv6 address literal in the `host` parameter or a + also specify an IPv6 address literal in the ``host`` parameter or a MongoDB URI provided it is enclosed in '[' and ']'. - max_pool_size option: previously pymongo had a hard coded pool size of 10 connections. With this change you can specify a different pool @@ -2711,10 +3813,10 @@ API changes: - :meth:`~pymongo.database.Database.validate_collection` now returns a dict instead of a string. This change was required to deal with an API change on the server. This method also now takes the optional - `scandata` and `full` parameters. See the documentation for more + ``scandata`` and ``full`` parameters. See the documentation for more details. -.. warning:: The `pool_size`, `auto_start_request`, and `timeout` parameters +.. warning:: The ``pool_size``, ``auto_start_request```, and ``timeout`` parameters for :class:`~pymongo.connection.Connection` have been completely removed in this release. They were deprecated in pymongo-1.4 and have had no effect since then. Please make sure that your code @@ -2739,8 +3841,8 @@ Issues resolved - `PYTHON-138 `_: Find method for GridFS -Changes in Version 1.10.1 -------------------------- +Changes in Version 1.10.1 (2011/04/07) +-------------------------------------- Version 1.10.1 is primarily a bugfix release. It fixes a regression in version 1.10 that broke pickling of ObjectIds. A number of other bugs @@ -2756,9 +3858,9 @@ There are two behavior changes to be aware of: Previously the read would be sent to one randomly chosen slave and :class:`~pymongo.errors.AutoReconnect` was immediately raised in case of a connection failure. -- A Python `long` is now always BSON encoded as an int64. Previously the - encoding was based only on the value of the field and a `long` with a - value less than `2147483648` or greater than `-2147483649` would always +- A Python ``long`` is now always BSON encoded as an int64. Previously the + encoding was based only on the value of the field and a ``long`` with a + value less than ``2147483648`` or greater than ``-2147483649`` would always be BSON encoded as an int32. Issues resolved @@ -2783,12 +3885,12 @@ Issues resolved - `PYTHON-186 `_: When storing integers, type is selected according to value instead of type - `PYTHON-173 `_: - as_class option is not propogated by Cursor.clone + as_class option is not propagated by Cursor.clone - `PYTHON-113 `_: Redunducy in MasterSlaveConnection -Changes in Version 1.10 ------------------------ +Changes in Version 1.10 (2011/03/30) +------------------------------------ Version 1.10 includes changes to support new features in MongoDB 1.8.x. Highlights include a modified map/reduce API including an inline map/reduce @@ -2803,7 +3905,7 @@ server for the maximum BSON document size it supports. collections for map/reduce results. An output collection name must be provided and the output will replace any existing output collection with the same name. :meth:`~pymongo.collection.Collection.map_reduce` now - requires the `out` parameter. + requires the ``out`` parameter. Issues resolved ............... @@ -2826,14 +3928,14 @@ Issues resolved - PYTHON-169: Support deepcopy of DBRef. - PYTHON-167: Duplicate of PYTHON-166. - PYTHON-166: Fixes a concurrency issue. -- PYTHON-158: Add code and err string to `db assertion` messages. +- PYTHON-158: Add code and err string to ``db assertion`` messages. -Changes in Version 1.9 ----------------------- +Changes in Version 1.9 (2010/09/28) +----------------------------------- Version 1.9 adds a new package to the PyMongo distribution, :mod:`bson`. :mod:`bson` contains all of the `BSON -`_ encoding and decoding logic, and the BSON +`_ encoding and decoding logic, and the BSON types that were formerly in the :mod:`pymongo` package. The following modules have been renamed: @@ -2903,15 +4005,15 @@ rather than :class:`pymongo.errors.PyMongoError`. :class:`~pymongo.connection.Connection` has been idle for a while. - added :meth:`~pymongo.database.SystemJS.list` to :class:`~pymongo.database.SystemJS`. -- added `file_document` argument to :meth:`~gridfs.grid_file.GridOut` +- added ``file_document`` argument to :meth:`~gridfs.grid_file.GridOut` to allow initializing from an existing file document. - raise :class:`~pymongo.errors.TimeoutError` even if the ``getLastError`` command was run manually and not through "safe" mode. - added :class:`uuid` support to :mod:`~bson.json_util`. -Changes in Version 1.8.1 ------------------------- +Changes in Version 1.8.1 (2010/08/13) +------------------------------------- - fixed a typo in the C extension that could cause safe-mode operations to report a failure (:class:`SystemError`) even when none @@ -2919,17 +4021,17 @@ Changes in Version 1.8.1 - added a :meth:`__ne__` implementation to any class where we define :meth:`__eq__`. -Changes in Version 1.8 ----------------------- +Changes in Version 1.8 (2010/08/05) +----------------------------------- Version 1.8 adds support for connecting to replica sets, specifying -per-operation values for `w` and `wtimeout`, and decoding to +per-operation values for ``w`` and ``wtimeout``, and decoding to timezone-aware datetimes. - fixed a reference leak in the C extension when decoding a :class:`~bson.dbref.DBRef`. -- added support for `w`, `wtimeout`, and `fsync` (and any other - options for `getLastError`) to "safe mode" operations. +- added support for ``w``, ``wtimeout``, and ``fsync`` (and any other + options for ``getLastError``) to "safe mode" operations. - added :attr:`~pymongo.connection.Connection.nodes` property. - added a maximum pool size of 10 sockets. - added support for replica sets. @@ -2947,9 +4049,9 @@ timezone-aware datetimes. :class:`~bson.max_key.MaxKey` and :class:`~bson.timestamp.Timestamp` to :mod:`~bson.json_util`. - added support for decoding datetimes as aware (UTC) - it is highly - recommended to enable this by setting the `tz_aware` parameter to + recommended to enable this by setting the ``tz_aware`` parameter to :meth:`~pymongo.connection.Connection` to ``True``. -- added `network_timeout` option for individual calls to +- added ``network_timeout`` option for individual calls to :meth:`~pymongo.collection.Collection.find` and :meth:`~pymongo.collection.Collection.find_one`. - added :meth:`~gridfs.GridFS.exists` to check if a file exists in @@ -2960,13 +4062,13 @@ timezone-aware datetimes. :class:`~pymongo.errors.OperationFailure` exceptions. - fixed serialization of int and float subclasses in the C extension. -Changes in Version 1.7 ----------------------- +Changes in Version 1.7 (2010/06/17) +----------------------------------- Version 1.7 is a recommended upgrade for all PyMongo users. The full release notes are below, and some more in depth discussion of the highlights is `here -`_. +`_. - no longer attempt to build the C extension on big-endian systems. - added :class:`~bson.min_key.MinKey` and @@ -2980,11 +4082,11 @@ highlights is `here support for querying unique status and other index information. - added :attr:`~pymongo.connection.Connection.document_class`, to specify class for returned documents. -- added `as_class` argument for +- added ``as_class`` argument for :meth:`~pymongo.collection.Collection.find`, and in the BSON decoder. - added support for creating :class:`~bson.timestamp.Timestamp` instances using a :class:`~datetime.datetime`. -- allow `dropTarget` argument for +- allow ``dropTarget`` argument for :class:`~pymongo.collection.Collection.rename`. - handle aware :class:`~datetime.datetime` instances, by converting to UTC. @@ -2994,13 +4096,13 @@ highlights is `here - use `y2038 `_ for time handling in the C extension - eliminates 2038 problems when extension is installed. -- added `sort` parameter to +- added ``sort`` parameter to :meth:`~pymongo.collection.Collection.find` - finalized deprecation of changes from versions **<= 1.4** - take any non-:class:`dict` as an ``"_id"`` query for :meth:`~pymongo.collection.Collection.find_one` or :meth:`~pymongo.collection.Collection.remove` -- added ability to pass a :class:`dict` for `fields` argument to +- added ability to pass a :class:`dict` for ``fields`` argument to :meth:`~pymongo.collection.Collection.find` (supports ``"$slice"`` and field negation) - simplified code to find master, since paired setups don't always have @@ -3010,14 +4112,14 @@ highlights is `here - don't transparently map ``"filename"`` key to :attr:`name` attribute for GridFS. -Changes in Version 1.6 ----------------------- +Changes in Version 1.6 (2010/04/14) +----------------------------------- The biggest change in version 1.6 is a complete re-implementation of :mod:`gridfs` with a lot of improvements over the old implementation. There are many details and examples of using the new API in `this blog post -`_. The +`_. The old API has been removed in this version, so existing code will need to be modified before upgrading to 1.6. @@ -3032,17 +4134,17 @@ to be modified before upgrading to 1.6. on non-existent collections. - disallow empty bulk inserts. -Changes in Version 1.5.2 ------------------------- +Changes in Version 1.5.2 (2010/03/31) +------------------------------------- - fixed response handling to ignore unknown response flags in queries. - handle server versions containing '-pre-'. -Changes in Version 1.5.1 ------------------------- +Changes in Version 1.5.1 (2010/03/17) +------------------------------------- - added :data:`~gridfs.grid_file.GridFile._id` property for :class:`~gridfs.grid_file.GridFile` instances. - fix for making a :class:`~pymongo.connection.Connection` (with - `slave_okay` set) directly to a slave in a replica pair. + ``slave_okay`` set) directly to a slave in a replica pair. - accept kwargs for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index` to support all @@ -3051,10 +4153,10 @@ Changes in Version 1.5.1 - improvements to Python code caching in C extension - should improve behavior on mod_wsgi. -Changes in Version 1.5 ----------------------- +Changes in Version 1.5 (2010/03/10) +----------------------------------- - added subtype constants to :mod:`~bson.binary` module. -- DEPRECATED `options` argument to +- DEPRECATED ``options`` argument to :meth:`~pymongo.collection.Collection` and :meth:`~pymongo.database.Database.create_collection` in favor of kwargs. @@ -3064,7 +4166,7 @@ Changes in Version 1.5 might have more data to return (useful for tailable cursors). - added :class:`~bson.timestamp.Timestamp` to better support dealing with internal MongoDB timestamps. -- added `name` argument for +- added ``name`` argument for :meth:`~pymongo.collection.Collection.create_index` and :meth:`~pymongo.collection.Collection.ensure_index`. - fixed connection pooling w/ fork @@ -3086,8 +4188,8 @@ Changes in Version 1.5 - added :class:`~gridfs.errors.GridFSError` as base class for :mod:`gridfs` exceptions. -Changes in Version 1.4 ----------------------- +Changes in Version 1.4 (2010/01/17) +----------------------------------- Perhaps the most important change in version 1.4 is that we have decided to **no longer support Python 2.3**. The most immediate reason @@ -3116,7 +4218,7 @@ Other changes: for example. - added :class:`~pymongo.errors.DuplicateKeyError` for calls to :meth:`~pymongo.collection.Collection.insert` or - :meth:`~pymongo.collection.Collection.update` with `safe` set to + :meth:`~pymongo.collection.Collection.update` with ``safe`` set to ``True``. - removed :mod:`~pymongo.thread_util`. - added :meth:`~pymongo.database.Database.add_user` and @@ -3128,8 +4230,8 @@ Other changes: - clean up all cases where :class:`~pymongo.errors.ConnectionFailure` is raised. - simplification of connection pooling - makes driver ~2x faster for - simple benchmarks. see :ref:`connection-pooling` for more information. -- DEPRECATED `pool_size`, `auto_start_request` and `timeout` + simple benchmarks. see `connection pooling `_ for more information. +- DEPRECATED ``pool_size``, ``auto_start_request`` and ``timeout`` parameters to :class:`~pymongo.connection.Connection`. DEPRECATED :meth:`~pymongo.connection.Connection.start_request`. - use :meth:`socket.sendall`. @@ -3140,7 +4242,7 @@ Other changes: - deprecate :meth:`~pymongo.database.Database._command` in favor of :meth:`~pymongo.database.Database.command`. - send all commands without wrapping as ``{"query": ...}``. -- support string as `key` argument to +- support string as ``key`` argument to :meth:`~pymongo.collection.Collection.group` (keyf) and run all groups as commands. - support for equality testing for :class:`~bson.code.Code` @@ -3148,8 +4250,8 @@ Other changes: - allow the NULL byte in strings and disallow it in key names or regex patterns -Changes in Version 1.3 ----------------------- +Changes in Version 1.3 (2009/12/16) +----------------------------------- - DEPRECATED running :meth:`~pymongo.collection.Collection.group` as :meth:`~pymongo.database.Database.eval`, also changed default for :meth:`~pymongo.collection.Collection.group` to running as a command @@ -3174,8 +4276,8 @@ Changes in Version 1.3 usual, as it carries some performance implications. - added :meth:`~pymongo.connection.Connection.disconnect` -Changes in Version 1.2.1 ------------------------- +Changes in Version 1.2.1 (2009/12/10) +------------------------------------- - added :doc:`changelog` to docs - added ``setup.py doc --test`` to run doctests for tutorial, examples - moved most examples to Sphinx docs (and remove from *examples/* @@ -3186,16 +4288,16 @@ Changes in Version 1.2.1 characters - allow :class:`unicode` instances for :class:`~bson.objectid.ObjectId` init -Changes in Version 1.2 ----------------------- -- `spec` parameter for :meth:`~pymongo.collection.Collection.remove` is +Changes in Version 1.2 (2009/12/09) +----------------------------------- +- ``spec`` parameter for :meth:`~pymongo.collection.Collection.remove` is now optional to allow for deleting all documents in a :class:`~pymongo.collection.Collection` - always wrap queries with ``{query: ...}`` even when no special options - get around some issues with queries on fields named ``query`` - enforce 4MB document limit on the client side - added :meth:`~pymongo.collection.Collection.map_reduce` helper - see - :doc:`example ` + `Aggregation `_ - added :meth:`~pymongo.cursor.Cursor.distinct` method on :class:`~pymongo.cursor.Cursor` instances to allow distinct with queries @@ -3213,29 +4315,29 @@ Changes in Version 1.2 - some minor fixes for installation process - added support for datetime and regex in :mod:`~bson.json_util` -Changes in Version 1.1.2 ------------------------- +Changes in Version 1.1.2 (2009/11/23) +------------------------------------- - improvements to :meth:`~pymongo.collection.Collection.insert` speed (using C for insert message creation) - use random number for request_id - fix some race conditions with :class:`~pymongo.errors.AutoReconnect` -Changes in Version 1.1.1 ------------------------- -- added `multi` parameter for +Changes in Version 1.1.1 (2009/11/14) +------------------------------------- +- added ``multi`` parameter for :meth:`~pymongo.collection.Collection.update` - fix unicode regex patterns with C extension - added :meth:`~pymongo.collection.Collection.distinct` -- added `database` support for :class:`~bson.dbref.DBRef` +- added ``database`` support for :class:`~bson.dbref.DBRef` - added :mod:`~bson.json_util` with helpers for encoding / decoding special types to JSON - DEPRECATED :meth:`pymongo.cursor.Cursor.__len__` in favor of - :meth:`~pymongo.cursor.Cursor.count` with `with_limit_and_skip` set + :meth:`~pymongo.cursor.Cursor.count` with ``with_limit_and_skip`` set to ``True`` due to performance regression - switch documentation to Sphinx -Changes in Version 1.1 ----------------------- +Changes in Version 1.1 (2009/10/21) +----------------------------------- - added :meth:`__hash__` for :class:`~bson.dbref.DBRef` and :class:`~bson.objectid.ObjectId` - bulk :meth:`~pymongo.collection.Collection.insert` works with any @@ -3243,35 +4345,35 @@ Changes in Version 1.1 - fix :class:`~bson.objectid.ObjectId` generation when using :mod:`multiprocessing` - added :attr:`~pymongo.cursor.Cursor.collection` -- added `network_timeout` parameter for +- added ``network_timeout`` parameter for :meth:`~pymongo.connection.Connection` -- DEPRECATED `slave_okay` parameter for individual queries -- fix for `safe` mode when multi-threaded -- added `safe` parameter for :meth:`~pymongo.collection.Collection.remove` -- added `tailable` parameter for :meth:`~pymongo.collection.Collection.find` +- DEPRECATED ``slave_okay`` parameter for individual queries +- fix for ``safe`` mode when multi-threaded +- added ``safe`` parameter for :meth:`~pymongo.collection.Collection.remove` +- added ``tailable`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 1.0 ----------------------- +Changes in Version 1.0 (2009/09/30) +----------------------------------- - fixes for :class:`~pymongo.master_slave_connection.MasterSlaveConnection` -- added `finalize` parameter for :meth:`~pymongo.collection.Collection.group` +- added ``finalize`` parameter for :meth:`~pymongo.collection.Collection.group` - improvements to :meth:`~pymongo.collection.Collection.insert` speed - improvements to :mod:`gridfs` speed - added :meth:`~pymongo.cursor.Cursor.__getitem__` and :meth:`~pymongo.cursor.Cursor.__len__` for :class:`~pymongo.cursor.Cursor` instances -Changes in Version 0.16 ------------------------ +Changes in Version 0.16 (2009/09/16) +------------------------------------ - support for encoding/decoding :class:`uuid.UUID` instances - fix for :meth:`~pymongo.cursor.Cursor.explain` with limits -Changes in Version 0.15.2 -------------------------- +Changes in Version 0.15.2 (2009/09/09) +-------------------------------------- - documentation changes only -Changes in Version 0.15.1 -------------------------- +Changes in Version 0.15.1 (2009/09/02) +-------------------------------------- - various performance improvements - API CHANGE no longer need to specify direction for :meth:`~pymongo.collection.Collection.create_index` and @@ -3280,34 +4382,34 @@ Changes in Version 0.15.1 - support for encoding :class:`tuple` instances as :class:`list` instances -Changes in Version 0.15 ------------------------ +Changes in Version 0.15 (2009/08/26) +------------------------------------ - fix string representation of :class:`~bson.objectid.ObjectId` instances -- added `timeout` parameter for +- added ``timeout`` parameter for :meth:`~pymongo.collection.Collection.find` -- allow scope for `reduce` function in +- allow scope for ``reduce`` function in :meth:`~pymongo.collection.Collection.group` -Changes in Version 0.14.2 -------------------------- +Changes in Version 0.14.2 (2009/08/24) +-------------------------------------- - minor bugfixes -Changes in Version 0.14.1 -------------------------- +Changes in Version 0.14.1 (2009/08/21) +-------------------------------------- - :meth:`~gridfs.grid_file.GridFile.seek` and :meth:`~gridfs.grid_file.GridFile.tell` for (read mode) :class:`~gridfs.grid_file.GridFile` instances -Changes in Version 0.14 ------------------------ +Changes in Version 0.14 (2009/08/19) +------------------------------------ - support for long in :class:`~bson.BSON` - added :meth:`~pymongo.collection.Collection.rename` -- added `snapshot` parameter for +- added ``snapshot`` parameter for :meth:`~pymongo.collection.Collection.find` -Changes in Version 0.13 ------------------------ +Changes in Version 0.13 (2009/07/29) +------------------------------------ - better :class:`~pymongo.master_slave_connection.MasterSlaveConnection` support @@ -3317,38 +4419,38 @@ Changes in Version 0.13 - DEPRECATED passing an index name to :meth:`~pymongo.cursor.Cursor.hint` -Changes in Version 0.12 ------------------------ +Changes in Version 0.12 (2009/07/08) +------------------------------------ - improved :class:`~bson.objectid.ObjectId` generation - added :class:`~pymongo.errors.AutoReconnect` exception for when reconnection is possible - make :mod:`gridfs` thread-safe - fix for :mod:`gridfs` with non :class:`~bson.objectid.ObjectId` ``_id`` -Changes in Version 0.11.3 -------------------------- +Changes in Version 0.11.3 (2009/06/18) +-------------------------------------- - don't allow NULL bytes in string encoder - fixes for Python 2.3 -Changes in Version 0.11.2 -------------------------- +Changes in Version 0.11.2 (2009/06/08) +-------------------------------------- - PEP 8 - updates for :meth:`~pymongo.collection.Collection.group` - VS build -Changes in Version 0.11.1 -------------------------- +Changes in Version 0.11.1 (2009/06/04) +-------------------------------------- - fix for connection pooling under Python 2.5 -Changes in Version 0.11 ------------------------ +Changes in Version 0.11 (2009/06/03) +------------------------------------ - better build failure detection - driver support for selecting fields in sub-documents - disallow insertion of invalid key names -- added `timeout` parameter for :meth:`~pymongo.connection.Connection` +- added ``timeout`` parameter for :meth:`~pymongo.connection.Connection` -Changes in Version 0.10.3 -------------------------- +Changes in Version 0.10.3 (2009/05/27) +-------------------------------------- - fix bug with large :meth:`~pymongo.cursor.Cursor.limit` - better exception when modules get reloaded out from underneath the C extension @@ -3356,29 +4458,23 @@ Changes in Version 0.10.3 :class:`~pymongo.collection.Collection` or :class:`~pymongo.database.Database` instance -Changes in Version 0.10.2 -------------------------- +Changes in Version 0.10.2 (2009/05/22) +-------------------------------------- - support subclasses of :class:`dict` in C encoder -Changes in Version 0.10.1 -------------------------- +Changes in Version 0.10.1 (2009/05/18) +-------------------------------------- - alias :class:`~pymongo.connection.Connection` as :attr:`pymongo.Connection` - raise an exception rather than silently overflowing in encoder -Changes in Version 0.10 ------------------------ +Changes in Version 0.10 (2009/05/14) +------------------------------------ - added :meth:`~pymongo.collection.Collection.ensure_index` -Changes in Version 0.9.7 ------------------------- +Changes in Version 0.9.7 (2009/05/13) +------------------------------------- - allow sub-collections of *$cmd* as valid :class:`~pymongo.collection.Collection` names - add version as :attr:`pymongo.version` - add ``--no_ext`` command line option to *setup.py* - -.. toctree:: - :hidden: - - python3 - examples/gevent diff --git a/doc/common-issues.rst b/doc/common-issues.rst deleted file mode 100644 index 1571b985e0..0000000000 --- a/doc/common-issues.rst +++ /dev/null @@ -1,98 +0,0 @@ -Frequently Encountered Issues -============================= - -Also see the :ref:`TLSErrors` section. - -.. contents:: - -Server reports wire version X, PyMongo requires Y -------------------------------------------------- - -When one attempts to connect to a <=3.4 version server, PyMongo will throw the following error:: - - >>> client.admin.command('ping') - ... - pymongo.errors.ConfigurationError: Server at localhost:27017 reports wire version 5, but this version of PyMongo requires at least 6 (MongoDB 3.6). - -This is caused by the driver being too new for the server it is being run against. -To resolve this issue either upgrade your database to version >= 3.6 or downgrade to PyMongo 3.x which supports MongoDB >= 2.6. - - -'Cursor' object has no attribute '_Cursor__killed' --------------------------------------------------- - -On versions of PyMongo <3.9, when supplying invalid arguments the constructor of Cursor, -there will be a TypeError raised, and an AttributeError printed to ``stderr``. The AttributeError is not relevant, -instead look at the TypeError for debugging information:: - - >>> coll.find(wrong=1) - Exception ignored in: - ... - AttributeError: 'Cursor' object has no attribute '_Cursor__killed' - ... - TypeError: __init__() got an unexpected keyword argument 'wrong' - -To fix this, make sure that you are supplying the correct keyword arguments. -In addition, you can also upgrade to PyMongo >=3.9, which will remove the spurious error. - - -MongoClient fails ConfigurationError ------------------------------------- - -This is a common issue stemming from using incorrect keyword argument names. - - >>> client = MongoClient(wrong=1) - ... - pymongo.errors.ConfigurationError: Unknown option wrong - -To fix this, check your spelling and make sure that the keyword argument you are specifying exists. - - -DeprecationWarning: count is deprecated ---------------------------------------- - -PyMongo no longer supports :meth:`pymongo.cursor.count`. -Instead, use :meth:`pymongo.collection.count_documents`:: - - >>> client = MongoClient() - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> list(client.db.coll.find({"date": {"$lt": d}}, limit=2)) - [{'_id': ObjectId('6247b058cebb8b179b7039f8'), 'date': datetime.datetime(1, 1, 1, 0, 0)}, {'_id': ObjectId('6247b059cebb8b179b7039f9'), 'date': datetime.datetime(1, 1, 1, 0, 0)}] - >>> client.db.coll.count_documents({"date": {"$lt": d}}, limit=2) - 2 - -Note that this is NOT the same as ``Cursor.count_documents`` (which does not exist), -this is a method of the Collection class, so you must call it on a collection object -or you will receive the following error:: - - >>> Cursor(MongoClient().db.coll).count() - Traceback (most recent call last): - File "", line 1, in - AttributeError: 'Cursor' object has no attribute 'count' - >>> - -Timeout when accessing MongoDB from PyMongo with tunneling ----------------------------------------------------------- - -When attempting to connect to a replica set MongoDB instance over an SSH tunnel you -will receive the following error:: - - File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1560, in count - return self._count(cmd, collation, session) - File "/Library/Python/2.7/site-packages/pymongo/collection.py", line 1504, in _count - with self._socket_for_reads() as (sock_info, slave_ok): - File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ - return self.gen.next() - File "/Library/Python/2.7/site-packages/pymongo/mongo_client.py", line 982, in _socket_for_reads - server = topology.select_server(read_preference) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 224, in select_server - address)) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 183, in select_servers - selector, server_timeout, address) - File "/Library/Python/2.7/site-packages/pymongo/topology.py", line 199, in _select_servers_loop - self._error_message(selector)) - pymongo.errors.ServerSelectionTimeoutError: localhost:27017: timed out - -This is due to the fact that PyMongo discovers replica set members using the response from the isMaster command which -then contains the address and ports of the other members. However, these addresses and ports will not be accessible through the SSH tunnel. Thus, this behavior is unsupported. -You can, however, connect directly to a single MongoDB node using the directConnection=True option with SSH tunneling. diff --git a/doc/compatibility-policy.rst b/doc/compatibility-policy.rst deleted file mode 100644 index a20b9681eb..0000000000 --- a/doc/compatibility-policy.rst +++ /dev/null @@ -1,62 +0,0 @@ -Compatibility Policy -==================== - -Semantic Versioning -------------------- - -PyMongo's version numbers follow `semantic versioning`_: each version number -is structured "major.minor.patch". Patch releases fix bugs, minor releases -add features (and may fix bugs), and major releases include API changes that -break backwards compatibility (and may add features and fix bugs). - -Deprecation ------------ - -Before we remove a feature in a major release, PyMongo's maintainers make an -effort to release at least one minor version that *deprecates* it. We add -"**DEPRECATED**" to the feature's documentation, and update the code to raise a -`DeprecationWarning`_. You can ensure your code is future-proof by running -your code with the latest PyMongo release and looking for DeprecationWarnings. - -The interpreter silences DeprecationWarnings by default. For example, the -following code uses the deprecated ``insert`` method but does not raise any -warning: - -.. code-block:: python - - # "insert.py" (with PyMongo 3.X) - from pymongo import MongoClient - - client = MongoClient() - client.test.test.insert({}) - -To print deprecation warnings to stderr, run python with "-Wd":: - - $ python3 -Wd insert.py - insert.py:4: DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. - client.test.test.insert({}) - -You can turn warnings into exceptions with "python -We":: - - $ python3 -We insert.py - Traceback (most recent call last): - File "insert.py", line 4, in - client.test.test.insert({}) - File "/home/durin/work/mongo-python-driver/pymongo/collection.py", line 2906, in insert - "instead.", DeprecationWarning, stacklevel=2) - DeprecationWarning: insert is deprecated. Use insert_one or insert_many instead. - -If your own code's test suite passes with "python -We" then it uses no -deprecated PyMongo features. - -.. seealso:: The Python documentation on `the warnings module`_, - and `the -W command line option`_. - -.. _semantic versioning: http://semver.org/ - -.. _DeprecationWarning: - https://docs.python.org/3/library/exceptions.html#DeprecationWarning - -.. _the warnings module: https://docs.python.org/3/library/warnings.html - -.. _the -W command line option: https://docs.python.org/3/using/cmdline.html#cmdoption-w diff --git a/doc/conf.py b/doc/conf.py index 1e18eb29bf..063429cd98 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,15 +1,15 @@ -# -*- coding: utf-8 -*- # # PyMongo documentation build configuration file # # This file is execfile()d with the current directory set to its containing dir. +from __future__ import annotations -import os import sys +from pathlib import Path -sys.path[0:0] = [os.path.abspath("..")] +sys.path[0:0] = [Path("..").resolve()] -import pymongo # noqa +import pymongo # noqa: E402 # -- General configuration ----------------------------------------------------- @@ -26,13 +26,12 @@ # Add optional extensions try: - import sphinxcontrib.shellcheck # noqa + import sphinxcontrib.shellcheck # noqa: F401 extensions += ["sphinxcontrib.shellcheck"] except ImportError: pass - # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] @@ -83,19 +82,27 @@ # Options for link checking # The anchors on the rendered markdown page are created after the fact, # so those link results in a 404. -# wiki.centos.org has been flakey. +# wiki.centos.org has been flaky. # sourceforge.net is giving a 403 error, but is still accessible from the browser. +# Links to release notes in jira give 401 error: unauthorized. PYTHON-5585 linkcheck_ignore = [ - "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check", + "https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#requesting-an-immediate-check", + "https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback", "https://github.com/mongodb/libmongocrypt/blob/master/bindings/python/README.rst#installing-from-source", r"https://wiki.centos.org/[\w/]*", - r"http://sourceforge.net/", + r"https://sourceforge.net/", + r"https://jira\.mongodb\.org/secure/ReleaseNote\.jspa.*", ] +# Allow for flaky links. +linkcheck_retries = 3 + # -- Options for extensions ---------------------------------------------------- autoclass_content = "init" -doctest_path = [os.path.abspath("..")] +autodoc_typehints = "description" + +doctest_path = [Path("..").resolve()] doctest_test_doctest_blocks = "" @@ -104,17 +111,23 @@ client = MongoClient() client.drop_database("doctest_test") db = client.doctest_test +server_major_version = int(client.server_info()['version'].split()[-1][0]) """ # -- Options for HTML output --------------------------------------------------- -# Theme gratefully vendored from CPython source. -html_theme = "pydoctheme" -html_theme_path = ["."] -html_theme_options = {"collapsiblesidebar": True, "googletag": False} +try: + import furo # noqa: F401 + + html_theme = "furo" +except ImportError: + # Theme gratefully vendored from CPython source. + html_theme = "pydoctheme" + html_theme_path = ["."] + html_theme_options = {"collapsiblesidebar": True, "googletag": False} -# Additional static files. -html_static_path = ["static"] + # Additional static files. + html_static_path = ["static"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". @@ -192,6 +205,6 @@ intersphinx_mapping = { - "gevent": ("http://www.gevent.org/", None), + "gevent": ("https://www.gevent.org/", None), "py": ("https://docs.python.org/3/", None), } diff --git a/doc/contributors.rst b/doc/contributors.rst index 7ab87f7790..08296e9595 100644 --- a/doc/contributors.rst +++ b/doc/contributors.rst @@ -93,3 +93,17 @@ The following is a list of people who have contributed to - Ishmum Jawad Khan (ishmum123) - Arie Bovenberg (ariebovenberg) - Ben Warner (bcwarner) +- Jean-Christophe Fillion-Robin (jcfr) +- Sean Cheah (thalassemia) +- Dainis Gorbunovs (DainisGorbunovs) +- Iris Ho (sleepyStick) +- Stephan Hof (stephan-hof) +- Casey Clements (caseyclements) +- Ivan Lukyanchikov (ilukyanchikov) +- Terry Patterson +- Romain Morotti +- Navjot Singh (navjots18) +- Jib Adegunloye (Jibola) +- Jeffrey A. Clark (aclark4life) +- Steven Silvester (blink1073) +- Noah Stapp (NoahStapp) diff --git a/doc/developer/index.rst b/doc/developer/index.rst deleted file mode 100644 index 2ce1e0536c..0000000000 --- a/doc/developer/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Developer Guide -=============== - -Technical guide for contributors to PyMongo. - -.. toctree:: - :maxdepth: 1 - - periodic_executor diff --git a/doc/developer/periodic_executor.rst b/doc/developer/periodic_executor.rst deleted file mode 100644 index 9cb0ce0eb9..0000000000 --- a/doc/developer/periodic_executor.rst +++ /dev/null @@ -1,113 +0,0 @@ -Periodic Executors -================== - -.. currentmodule:: pymongo - -PyMongo implements a :class:`~periodic_executor.PeriodicExecutor` for two -purposes: as the background thread for :class:`~monitor.Monitor`, and to -regularly check if there are `OP_KILL_CURSORS` messages that must be sent to the server. - -Killing Cursors ---------------- - -An incompletely iterated :class:`~cursor.Cursor` on the client represents an -open cursor object on the server. In code like this, we lose a reference to -the cursor before finishing iteration:: - - for doc in collection.find(): - raise Exception() - -We try to send an `OP_KILL_CURSORS` to the server to tell it to clean up the -server-side cursor. But we must not take any locks directly from the cursor's -destructor (see `PYTHON-799`_), so we cannot safely use the PyMongo data -structures required to send a message. The solution is to add the cursor's id -to an array on the :class:`~mongo_client.MongoClient` without taking any locks. - -Each client has a :class:`~periodic_executor.PeriodicExecutor` devoted to -checking the array for cursor ids. Any it sees are the result of cursors that -were freed while the server-side cursor was still open. The executor can safely -take the locks it needs in order to send the `OP_KILL_CURSORS` message. - -.. _PYTHON-799: https://jira.mongodb.org/browse/PYTHON-799 - -Stopping Executors ------------------- - -Just as :class:`~cursor.Cursor` must not take any locks from its destructor, -neither can :class:`~mongo_client.MongoClient` and :class:`~topology.Topology`. -Thus, although the client calls :meth:`close` on its kill-cursors thread, and -the topology calls :meth:`close` on all its monitor threads, the :meth:`close` -method cannot actually call :meth:`wake` on the executor, since :meth:`wake` -takes a lock. - -Instead, executors wake periodically to check if ``self.close`` is set, -and if so they exit. - -A thread can log spurious errors if it wakes late in the Python interpreter's -shutdown sequence, so we try to join threads before then. Each periodic -executor (either a monitor or a kill-cursors thread) adds a weakref to itself -to a set called ``_EXECUTORS``, in the ``periodic_executor`` module. - -An `exit handler`_ runs on shutdown and tells all executors to stop, then -tries (with a short timeout) to join all executor threads. - -.. _exit handler: https://docs.python.org/2/library/atexit.html - -Monitoring ----------- - -For each server in the topology, :class:`~topology.Topology` uses a periodic -executor to launch a monitor thread. This thread must not prevent the topology -from being freed, so it weakrefs the topology. Furthermore, it uses a weakref -callback to terminate itself soon after the topology is freed. - -Solid lines represent strong references, dashed lines weak ones: - -.. generated with graphviz: "dot -Tpng periodic-executor-refs.dot > periodic-executor-refs.png" - -.. image:: ../static/periodic-executor-refs.png - -See `Stopping Executors`_ above for an explanation of the ``_EXECUTORS`` set. - -It is a requirement of the `Server Discovery And Monitoring Spec`_ that a -sleeping monitor can be awakened early. Aside from infrequent wakeups to do -their appointed chores, and occasional interruptions, periodic executors also -wake periodically to check if they should terminate. - -Our first implementation of this idea was the obvious one: use the Python -standard library's threading.Condition.wait with a timeout. Another thread -wakes the executor early by signaling the condition variable. - -A topology cannot signal the condition variable to tell the executor to -terminate, because it would risk a deadlock in the garbage collector: no -destructor or weakref callback can take a lock to signal the condition variable -(see `PYTHON-863`_); thus the only way for a dying object to terminate a -periodic executor is to set its "stopped" flag and let the executor see the -flag next time it wakes. - -We erred on the side of prompt cleanup, and set the check interval at 100ms. We -assumed that checking a flag and going back to sleep 10 times a second was -cheap on modern machines. - -Starting in Python 3.2, the builtin C implementation of lock.acquire takes a -timeout parameter, so Python 3.2+ Condition variables sleep simply by calling -lock.acquire; they are implemented as efficiently as expected. - -But in Python 2, lock.acquire has no timeout. To wait with a timeout, a Python -2 condition variable sleeps a millisecond, tries to acquire the lock, sleeps -twice as long, and tries again. This exponential backoff reaches a maximum -sleep time of 50ms. - -If PyMongo calls the condition variable's "wait" method with a short timeout, -the exponential backoff is restarted frequently. Overall, the condition variable -is not waking a few times a second, but hundreds of times. (See `PYTHON-983`_.) - -Thus the current design of periodic executors is surprisingly simple: they -do a simple `time.sleep` for a half-second, check if it is time to wake or -terminate, and sleep again. - -.. _Server Discovery And Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.rst#requesting-an-immediate-check - -.. _PYTHON-863: https://jira.mongodb.org/browse/PYTHON-863 - -.. _PYTHON-983: https://jira.mongodb.org/browse/PYTHON-983 diff --git a/doc/docs-requirements.txt b/doc/docs-requirements.txt deleted file mode 100644 index 455a47d217..0000000000 --- a/doc/docs-requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -Sphinx~=4.2 -sphinx_rtd_theme~=0.5 -readthedocs-sphinx-search~=0.1 -sphinxcontrib-shellcheck~=1.1 diff --git a/doc/examples/aggregation.rst b/doc/examples/aggregation.rst deleted file mode 100644 index cdd82ff6fb..0000000000 --- a/doc/examples/aggregation.rst +++ /dev/null @@ -1,77 +0,0 @@ -Aggregation Examples -==================== - -There are several methods of performing aggregations in MongoDB. These -examples cover the new aggregation framework, using map reduce and using the -group method. - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('aggregation_example') - -Setup ------ -To start, we'll insert some example data which we can perform -aggregations on: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> db = MongoClient().aggregation_example - >>> result = db.things.insert_many([{"x": 1, "tags": ["dog", "cat"]}, - ... {"x": 2, "tags": ["cat"]}, - ... {"x": 2, "tags": ["mouse", "cat", "dog"]}, - ... {"x": 3, "tags": []}]) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] - -.. _aggregate-examples: - -Aggregation Framework ---------------------- - -This example shows how to use the -:meth:`~pymongo.collection.Collection.aggregate` method to use the aggregation -framework. We'll perform a simple aggregation to count the number of -occurrences for each tag in the ``tags`` array, across the entire collection. -To achieve this we need to pass in three operations to the pipeline. -First, we need to unwind the ``tags`` array, then group by the tags and -sum them up, finally we sort by count. - -As python dictionaries don't maintain order you should use :class:`~bson.son.SON` -or :class:`collections.OrderedDict` where explicit ordering is required -eg "$sort": - -.. note:: - - aggregate requires server version **>= 2.1.0**. - -.. doctest:: - - >>> from bson.son import SON - >>> pipeline = [ - ... {"$unwind": "$tags"}, - ... {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - ... {"$sort": SON([("count", -1), ("_id", -1)])} - ... ] - >>> import pprint - >>> pprint.pprint(list(db.things.aggregate(pipeline))) - [{'_id': 'cat', 'count': 3}, - {'_id': 'dog', 'count': 2}, - {'_id': 'mouse', 'count': 1}] - -To run an explain plan for this aggregation use the -:meth:`~pymongo.database.Database.command` method:: - - >>> db.command('aggregate', 'things', pipeline=pipeline, explain=True) - {'ok': 1.0, 'stages': [...]} - -As well as simple aggregations the aggregation framework provides projection -capabilities to reshape the returned data. Using projections and aggregation, -you can add computed fields, create new virtual sub-objects, and extract -sub-fields into the top-level of results. - -.. seealso:: The full documentation for MongoDB's `aggregation framework - `_ diff --git a/doc/examples/authentication.rst b/doc/examples/authentication.rst deleted file mode 100644 index db2dbd3d1f..0000000000 --- a/doc/examples/authentication.rst +++ /dev/null @@ -1,360 +0,0 @@ -Authentication Examples -======================= - -MongoDB supports several different authentication mechanisms. These examples -cover all authentication methods currently supported by PyMongo, documenting -Python module and MongoDB version dependencies. - -.. _percent escaped: - -Percent-Escaping Username and Password --------------------------------------- - -Username and password must be percent-escaped with -:py:func:`urllib.parse.quote`, to be used in a MongoDB URI. For example:: - - >>> from pymongo import MongoClient - >>> import urllib.parse - >>> username = urllib.parse.quote_plus('user') - >>> username - 'user' - >>> password = urllib.parse.quote_plus('pass/word') - >>> password - 'pass%2Fword' - >>> MongoClient('mongodb://%s:%s@127.0.0.1' % (username, password)) - ... - -.. _scram_sha_256: - -SCRAM-SHA-256 (RFC 7677) ------------------------- -.. versionadded:: 3.7 - -SCRAM-SHA-256 is the default authentication mechanism supported by a cluster -configured for authentication with MongoDB 4.0 or later. Authentication -requires a username, a password, and a database name. The default database -name is "admin", this can be overridden with the ``authSource`` option. -Credentials can be specified as arguments to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authSource='the_database', - ... authMechanism='SCRAM-SHA-256') - -Or through the MongoDB URI:: - - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-256" - >>> client = MongoClient(uri) - -SCRAM-SHA-1 (RFC 5802) ----------------------- -.. versionadded:: 2.8 - -SCRAM-SHA-1 is the default authentication mechanism supported by a cluster -configured for authentication with MongoDB 3.0 or later. Authentication -requires a username, a password, and a database name. The default database -name is "admin", this can be overridden with the ``authSource`` option. -Credentials can be specified as arguments to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authSource='the_database', - ... authMechanism='SCRAM-SHA-1') - -Or through the MongoDB URI:: - - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=SCRAM-SHA-1" - >>> client = MongoClient(uri) - -For best performance on Python versions older than 2.7.8 install `backports.pbkdf2`_. - -.. _backports.pbkdf2: https://pypi.python.org/pypi/backports.pbkdf2/ - -MONGODB-CR ----------- - -.. warning:: MONGODB-CR was deprecated with the release of MongoDB 3.6 and - is no longer supported by MongoDB 4.0. - -Before MongoDB 3.0 the default authentication mechanism was MONGODB-CR, -the "MongoDB Challenge-Response" protocol:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... username='user', - ... password='password', - ... authMechanism='MONGODB-CR') - >>> - >>> uri = "mongodb://user:password@example.com/?authSource=the_database&authMechanism=MONGODB-CR" - >>> client = MongoClient(uri) - -Default Authentication Mechanism --------------------------------- - -If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 when connected -to MongoDB 3.6 and negotiates the mechanism to use (SCRAM-SHA-1 -or SCRAM-SHA-256) when connected to MongoDB 4.0+. - -Default Database and "authSource" ---------------------------------- - -You can specify both a default database and the authentication database in the -URI:: - - >>> uri = "mongodb://user:password@example.com/default_db?authSource=admin" - >>> client = MongoClient(uri) - -PyMongo will authenticate on the "admin" database, but the default database -will be "default_db":: - - >>> # get_database with no "name" argument chooses the DB from the URI - >>> db = MongoClient(uri).get_database() - >>> print(db.name) - 'default_db' - -.. _mongodb_x509: - -MONGODB-X509 ------------- -.. versionadded:: 2.6 - -The MONGODB-X509 mechanism authenticates via the X.509 certificate presented -by the driver during TLS/SSL negotiation. This authentication method requires -the use of TLS/SSL connections with certificate validation:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('example.com', - ... authMechanism="MONGODB-X509", - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - -MONGODB-X509 authenticates against the $external virtual database, so you -do not have to specify a database in the URI:: - - >>> uri = "mongodb://example.com/?authMechanism=MONGODB-X509" - >>> client = MongoClient(uri, - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - >>> - -.. _gssapi: - -GSSAPI (Kerberos) ------------------ -.. versionadded:: 2.5 - -GSSAPI (Kerberos) authentication is available in the Enterprise Edition of -MongoDB. - -Unix -~~~~ - -To authenticate using GSSAPI you must first install the python `kerberos`_ or -`pykerberos`_ module using easy_install or pip. Make sure you run kinit before -using the following authentication methods:: - - $ kinit mongodbuser@EXAMPLE.COM - mongodbuser@EXAMPLE.COM's Password: - $ klist - Credentials cache: FILE:/tmp/krb5cc_1000 - Principal: mongodbuser@EXAMPLE.COM - - Issued Expires Principal - Feb 9 13:48:51 2013 Feb 9 23:48:51 2013 krbtgt/EXAMPLE.COM@EXAMPLE.COM - -Now authenticate using the MongoDB URI. GSSAPI authenticates against the -$external virtual database so you do not have to specify a database in the -URI:: - - >>> # Note: the kerberos principal must be url encoded. - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@mongo-server.example.com/?authMechanism=GSSAPI" - >>> client = MongoClient(uri) - >>> - -The default service name used by MongoDB and PyMongo is `mongodb`. You can -specify a custom service name with the ``authMechanismProperties`` option:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@mongo-server.example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:myservicename" - >>> client = MongoClient(uri) - -Windows (SSPI) -~~~~~~~~~~~~~~ -.. versionadded:: 3.3 - -First install the `winkerberos`_ module. Unlike authentication on Unix kinit is -not used. If the user to authenticate is different from the user that owns the -application process provide a password to authenticate:: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM:mongodbuserpassword@example.com/?authMechanism=GSSAPI" - -Two extra ``authMechanismProperties`` are supported on Windows platforms: - -- CANONICALIZE_HOST_NAME - Uses the fully qualified domain name (FQDN) of the - MongoDB host for the server principal (GSSAPI libraries on Unix do this by - default):: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&authMechanismProperties=CANONICALIZE_HOST_NAME:true" - -- SERVICE_REALM - This is used when the user's realm is different from the service's realm:: - - >>> uri = "mongodb://mongodbuser%40EXAMPLE.COM@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_REALM:otherrealm" - - -.. _kerberos: http://pypi.python.org/pypi/kerberos -.. _pykerberos: https://pypi.python.org/pypi/pykerberos -.. _winkerberos: https://pypi.python.org/pypi/winkerberos/ - -.. _sasl_plain: - -SASL PLAIN (RFC 4616) ---------------------- -.. versionadded:: 2.6 - -MongoDB Enterprise Edition version 2.6 and newer support the SASL PLAIN -authentication mechanism, initially intended for delegating authentication -to an LDAP server. Using the PLAIN mechanism is very similar to MONGODB-CR. -These examples use the $external virtual database for LDAP support:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" - >>> client = MongoClient(uri) - >>> - -SASL PLAIN is a clear-text authentication mechanism. We **strongly** recommend -that you connect to MongoDB using TLS/SSL with certificate validation when -using the SASL PLAIN mechanism:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://user:password@example.com/?authMechanism=PLAIN" - >>> client = MongoClient(uri, - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem', - ... tlsCAFile='/path/to/ca.pem') - >>> - -.. _MONGODB-AWS: - -MONGODB-AWS ------------ -.. versionadded:: 3.11 - -The MONGODB-AWS authentication mechanism is available in MongoDB 4.4+ and -requires extra pymongo dependencies. To use it, install pymongo with the -``aws`` extra:: - - $ python -m pip install 'pymongo[aws]' - -The MONGODB-AWS mechanism authenticates using AWS IAM credentials (an access -key ID and a secret access key), `temporary AWS IAM credentials`_ obtained -from an `AWS Security Token Service (STS)`_ `Assume Role`_ request, -AWS Lambda `environment variables`_, or temporary AWS IAM credentials assigned -to an `EC2 instance`_ or ECS task. The use of temporary credentials, in -addition to an access key ID and a secret access key, also requires a -security (or session) token. - -Credentials can be configured through the MongoDB URI, environment variables, -or the local EC2 or ECS endpoint. The order in which the client searches for -credentials is: - -#. Credentials passed through the URI -#. Environment variables -#. ECS endpoint if and only if ``AWS_CONTAINER_CREDENTIALS_RELATIVE_URI`` is set. -#. EC2 endpoint - -MONGODB-AWS authenticates against the "$external" virtual database, so none of -the URIs in this section need to include the ``authSource`` URI option. - -AWS IAM credentials -~~~~~~~~~~~~~~~~~~~ - -Applications can authenticate using AWS IAM credentials by providing a valid -access key id and secret access key pair as the username and password, -respectively, in the MongoDB URI. A sample URI would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://:@localhost/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: The access_key_id and secret_access_key passed into the URI MUST - be `percent escaped`_. - -AssumeRole -~~~~~~~~~~ - -Applications can authenticate using temporary credentials returned from an -assume role request. These temporary credentials consist of an access key -ID, a secret access key, and a security token passed into the URI. -A sample URI would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://:@example.com/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:" - >>> client = MongoClient(uri) - -.. note:: The access_key_id, secret_access_key, and session_token passed into - the URI MUST be `percent escaped`_. - -AWS Lambda (Environment Variables) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When the username and password are not provided and the MONGODB-AWS mechanism -is set, the client will fallback to using the `environment variables`_ -``AWS_ACCESS_KEY_ID``, ``AWS_SECRET_ACCESS_KEY``, and ``AWS_SESSION_TOKEN`` -for the access key ID, secret access key, and session token, respectively:: - - $ export AWS_ACCESS_KEY_ID= - $ export AWS_SECRET_ACCESS_KEY= - $ export AWS_SESSION_TOKEN= - $ python - >>> from pymongo import MongoClient - >>> uri = "mongodb://example.com/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will use credentials set via the environment variables. - These environment variables MUST NOT be `percent escaped`_. - -ECS Container -~~~~~~~~~~~~~ - -Applications can authenticate from an ECS container via temporary -credentials assigned to the machine. A sample URI on an ECS container -would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will query the ECS container endpoint to obtain these - credentials. - -EC2 Instance -~~~~~~~~~~~~ - -Applications can authenticate from an EC2 instance via temporary -credentials assigned to the machine. A sample URI on an EC2 machine -would be:: - - >>> from pymongo import MongoClient - >>> uri = "mongodb://localhost/?authMechanism=MONGODB-AWS" - >>> client = MongoClient(uri) - -.. note:: No username, password, or session token is passed into the URI. - PyMongo will query the EC2 instance endpoint to obtain these - credentials. - -.. _temporary AWS IAM credentials: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html -.. _AWS Security Token Service (STS): https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html -.. _Assume Role: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html -.. _EC2 instance: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html -.. _environment variables: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime diff --git a/doc/examples/bulk.rst b/doc/examples/bulk.rst deleted file mode 100644 index 23367dd2c5..0000000000 --- a/doc/examples/bulk.rst +++ /dev/null @@ -1,178 +0,0 @@ -Bulk Write Operations -===================== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('bulk_example') - -This tutorial explains how to take advantage of PyMongo's bulk -write operation features. Executing write operations in batches -reduces the number of network round trips, increasing write -throughput. - -Bulk Insert ------------ - -.. versionadded:: 2.6 - -A batch of documents can be inserted by passing a list to the -:meth:`~pymongo.collection.Collection.insert_many` method. PyMongo -will automatically split the batch into smaller sub-batches based on -the maximum message size accepted by MongoDB, supporting very large -bulk insert operations. - -.. doctest:: - - >>> import pymongo - >>> db = pymongo.MongoClient().bulk_example - >>> db.test.insert_many([{'i': i} for i in range(10000)]).inserted_ids - [...] - >>> db.test.count_documents({}) - 10000 - -Mixed Bulk Write Operations ---------------------------- - -.. versionadded:: 2.7 - -PyMongo also supports executing mixed bulk write operations. A batch -of insert, update, and remove operations can be executed together using -the bulk write operations API. - -.. _ordered_bulk: - -Ordered Bulk Write Operations -............................. - -Ordered bulk write operations are batched and sent to the server in the -order provided for serial execution. The return value is an instance of -:class:`~pymongo.results.BulkWriteResult` describing the type and count -of operations performed. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from pprint import pprint - >>> from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne - >>> result = db.test.bulk_write([ - ... DeleteMany({}), # Remove all documents from the previous example. - ... InsertOne({'_id': 1}), - ... InsertOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... UpdateOne({'_id': 1}, {'$set': {'foo': 'bar'}}), - ... UpdateOne({'_id': 4}, {'$inc': {'j': 1}}, upsert=True), - ... ReplaceOne({'j': 1}, {'j': 2})]) - >>> pprint(result.bulk_api_result) - {'nInserted': 3, - 'nMatched': 2, - 'nModified': 2, - 'nRemoved': 10000, - 'nUpserted': 1, - 'upserted': [{'_id': 4, 'index': 5}], - 'writeConcernErrors': [], - 'writeErrors': []} - -The first write failure that occurs (e.g. duplicate key error) aborts the -remaining operations, and PyMongo raises -:class:`~pymongo.errors.BulkWriteError`. The :attr:`details` attibute of -the exception instance provides the execution results up until the failure -occurred and details about the failure - including the operation that caused -the failure. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from pymongo import InsertOne, DeleteOne, ReplaceOne - >>> from pymongo.errors import BulkWriteError - >>> requests = [ - ... ReplaceOne({'j': 2}, {'i': 5}), - ... InsertOne({'_id': 4}), # Violates the unique key constraint on _id. - ... DeleteOne({'i': 5})] - >>> try: - ... db.test.bulk_write(requests) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{'code': 11000, - 'errmsg': '...E11000...duplicate key error...', - 'index': 1,... - 'op': {'_id': 4}}]} - -.. _unordered_bulk: - -Unordered Bulk Write Operations -............................... - -Unordered bulk write operations are batched and sent to the server in -**arbitrary order** where they may be executed in parallel. Any errors -that occur are reported after all operations are attempted. - -In the next example the first and third operations fail due to the unique -constraint on _id. Since we are doing unordered execution the second -and fourth operations succeed. - -.. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> requests = [ - ... InsertOne({'_id': 1}), - ... DeleteOne({'_id': 2}), - ... InsertOne({'_id': 3}), - ... ReplaceOne({'_id': 4}, {'i': 1})] - >>> try: - ... db.test.bulk_write(requests, ordered=False) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 0, - 'nMatched': 1, - 'nModified': 1, - 'nRemoved': 1, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [], - 'writeErrors': [{'code': 11000, - 'errmsg': '...E11000...duplicate key error...', - 'index': 0,... - 'op': {'_id': 1}}, - {'code': 11000, - 'errmsg': '...', - 'index': 2,... - 'op': {'_id': 3}}]} - -Write Concern -............. - -Bulk operations are executed with the -:attr:`~pymongo.collection.Collection.write_concern` of the collection they -are executed against. Write concern errors (e.g. wtimeout) will be reported -after all operations are attempted, regardless of execution order. - -:: - >>> from pymongo import WriteConcern - >>> coll = db.get_collection( - ... 'test', write_concern=WriteConcern(w=3, wtimeout=1)) - >>> try: - ... coll.bulk_write([InsertOne({'a': i}) for i in range(4)]) - ... except BulkWriteError as bwe: - ... pprint(bwe.details) - ... - {'nInserted': 4, - 'nMatched': 0, - 'nModified': 0, - 'nRemoved': 0, - 'nUpserted': 0, - 'upserted': [], - 'writeConcernErrors': [{'code': 64... - 'errInfo': {'wtimeout': True}, - 'errmsg': 'waiting for replication timed out'}], - 'writeErrors': []} diff --git a/doc/examples/collations.rst b/doc/examples/collations.rst deleted file mode 100644 index 1a5106039c..0000000000 --- a/doc/examples/collations.rst +++ /dev/null @@ -1,134 +0,0 @@ -Collations -========== - -.. seealso:: The API docs for :mod:`~pymongo.collation`. - -Collations are a new feature in MongoDB version 3.4. They provide a set of rules -to use when comparing strings that comply with the conventions of a particular -language, such as Spanish or German. If no collation is specified, the server -sorts strings based on a binary comparison. Many languages have specific -ordering rules, and collations allow users to build applications that adhere to -language-specific comparison rules. - -In French, for example, the last accent in a given word determines the sorting -order. The correct sorting order for the following four words in French is:: - - cote < côte < coté < côté - -Specifying a French collation allows users to sort string fields using the -French sort order. - -Usage ------ - -Users can specify a collation for a -:ref:`collection`, an -:ref:`index`, or a -:ref:`CRUD command `. - -Collation Parameters: -~~~~~~~~~~~~~~~~~~~~~ - -Collations can be specified with the :class:`~pymongo.collation.Collation` model -or with plain Python dictionaries. The structure is the same:: - - Collation(locale=, - caseLevel=, - caseFirst=, - strength=, - numericOrdering=, - alternate=, - maxVariable=, - backwards=) - -The only required parameter is ``locale``, which the server parses as -an `ICU format locale ID `_. -For example, set ``locale`` to ``en_US`` to represent US English -or ``fr_CA`` to represent Canadian French. - -For a complete description of the available parameters, see the MongoDB `manual -`_. - -.. COMMENT add link for manual entry. - -.. _collation-on-collection: - -Assign a Default Collation to a Collection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The following example demonstrates how to create a new collection called -``contacts`` and assign a default collation with the ``fr_CA`` locale. This -operation ensures that all queries that are run against the ``contacts`` -collection use the ``fr_CA`` collation unless another collation is explicitly -specified:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - db = MongoClient().test - collection = db.create_collection('contacts', - collation=Collation(locale='fr_CA')) - -.. _collation-on-index: - -Assign a Default Collation to an Index -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -When creating a new index, you can specify a default collation. - -The following example shows how to create an index on the ``name`` -field of the ``contacts`` collection, with the ``unique`` parameter -enabled and a default collation with ``locale`` set to ``fr_CA``:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - contacts = MongoClient().test.contacts - contacts.create_index('name', - unique=True, - collation=Collation(locale='fr_CA')) - -.. _collation-on-operation: - -Specify a Collation for a Query -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Individual queries can specify a collation to use when sorting -results. The following example demonstrates a query that runs on the -``contacts`` collection in database ``test``. It matches on -documents that contain ``New York`` in the ``city`` field, -and sorts on the ``name`` field with the ``fr_CA`` collation:: - - from pymongo import MongoClient - from pymongo.collation import Collation - - collection = MongoClient().test.contacts - docs = collection.find({'city': 'New York'}).sort('name').collation( - Collation(locale='fr_CA')) - -Other Query Types -~~~~~~~~~~~~~~~~~ - -You can use collations to control document matching rules for several different -types of queries. All the various update and delete methods -(:meth:`~pymongo.collection.Collection.update_one`, -:meth:`~pymongo.collection.Collection.update_many`, -:meth:`~pymongo.collection.Collection.delete_one`, etc.) support collation, and -you can create query filters which employ collations to comply with any of the -languages and variants available to the ``locale`` parameter. - -The following example uses a collation with ``strength`` set to -:const:`~pymongo.collation.CollationStrength.SECONDARY`, which considers only -the base character and character accents in string comparisons, but not case -sensitivity, for example. All documents in the ``contacts`` collection with -``jürgen`` (case-insensitive) in the ``first_name`` field are updated:: - - from pymongo import MongoClient - from pymongo.collation import Collation, CollationStrength - - contacts = MongoClient().test.contacts - result = contacts.update_many( - {'first_name': 'jürgen'}, - {'$set': {'verified': 1}}, - collation=Collation(locale='de', - strength=CollationStrength.SECONDARY)) diff --git a/doc/examples/copydb.rst b/doc/examples/copydb.rst deleted file mode 100644 index 76d0c97a36..0000000000 --- a/doc/examples/copydb.rst +++ /dev/null @@ -1,73 +0,0 @@ -Copying a Database -================== - -MongoDB >= 4.2 --------------- - -Starting in MongoDB version 4.2, the server removes the deprecated ``copydb`` command. -As an alternative, users can use ``mongodump`` and ``mongorestore`` (with the ``mongorestore`` -options ``--nsFrom`` and ``--nsTo``). - -For example, to copy the ``test`` database from a local instance running on the -default port 27017 to the ``examples`` database on the same instance, you can: - -#. Use ``mongodump`` to dump the test database to an archive ``mongodump-test-db``:: - - mongodump --archive="mongodump-test-db" --db=test - -#. Use ``mongorestore`` with ``--nsFrom`` and ``--nsTo`` to restore (with database name change) - from the archive:: - - mongorestore --archive="mongodump-test-db" --nsFrom='test.*' --nsTo='examples.*' - -Include additional options as necessary, such as to specify the uri or host, username, -password and authentication database. - -For more info about using ``mongodump`` and ``mongorestore`` see the `Copy a Database`_ example -in the official ``mongodump`` documentation. - -MongoDB <= 4.0 --------------- - -When using MongoDB <= 4.0, it is possible to use the deprecated ``copydb`` command -to copy a database. To copy a database within a single ``mongod`` process, or -between ``mongod`` servers, connect to the target ``mongod`` and use the -:meth:`~pymongo.database.Database.command` method:: - - >>> from pymongo import MongoClient - >>> client = MongoClient('target.example.com') - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name') - -To copy from a different mongod server that is not password-protected:: - - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name', - fromhost='source.example.com') - -If the target server is password-protected, authenticate to the "admin" -database:: - - >>> client = MongoClient('target.example.com', - ... username='administrator', - ... password='pwd') - >>> client.admin.command('copydb', - fromdb='source_db_name', - todb='target_db_name', - fromhost='source.example.com') - -See the :doc:`authentication examples `. - -If the **source** server is password-protected, use the `copyDatabase -function in the mongo shell`_. - -Versions of PyMongo before 3.0 included a ``copy_database`` helper method, -but it has been removed. - -.. _copyDatabase function in the mongo shell: - http://mongodb.com/docs/manual/reference/method/db.copyDatabase/ - -.. _Copy a Database: - https://www.mongodb.com/docs/database-tools/mongodump/#std-label-mongodump-example-copy-clone-database diff --git a/doc/examples/custom_type.rst b/doc/examples/custom_type.rst deleted file mode 100644 index 404a6c8b55..0000000000 --- a/doc/examples/custom_type.rst +++ /dev/null @@ -1,421 +0,0 @@ -Custom Type Example -=================== - -This is an example of using a custom type with PyMongo. The example here shows -how to subclass :class:`~bson.codec_options.TypeCodec` to write a type -codec, which is used to populate a :class:`~bson.codec_options.TypeRegistry`. -The type registry can then be used to create a custom-type-aware -:class:`~pymongo.collection.Collection`. Read and write operations -issued against the resulting collection object transparently manipulate -documents as they are saved to or retrieved from MongoDB. - - -Setting Up ----------- - -We'll start by getting a clean database to use for the example: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client = MongoClient() - >>> client.drop_database('custom_type_example') - >>> db = client.custom_type_example - - -Since the purpose of the example is to demonstrate working with custom types, -we'll need a custom data type to use. For this example, we will be working with -the :py:class:`~decimal.Decimal` type from Python's standard library. Since the -BSON library's :class:`~bson.decimal128.Decimal128` type (that implements -the IEEE 754 decimal128 decimal-based floating-point numbering format) is -distinct from Python's built-in :py:class:`~decimal.Decimal` type, attempting -to save an instance of ``Decimal`` with PyMongo, results in an -:exc:`~bson.errors.InvalidDocument` exception. - -.. doctest:: - - >>> from decimal import Decimal - >>> num = Decimal("45.321") - >>> db.test.insert_one({'num': num}) - Traceback (most recent call last): - ... - bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: - - -.. _custom-type-type-codec: - -The :class:`~bson.codec_options.TypeCodec` Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.8 - -In order to encode a custom type, we must first define a **type codec** for -that type. A type codec describes how an instance of a custom type can be -*transformed* to and/or from one of the types :mod:`~bson` already understands. -Depending on the desired functionality, users must choose from the following -base classes when defining type codecs: - -* :class:`~bson.codec_options.TypeEncoder`: subclass this to define a codec that - encodes a custom Python type to a known BSON type. Users must implement the - ``python_type`` property/attribute and the ``transform_python`` method. -* :class:`~bson.codec_options.TypeDecoder`: subclass this to define a codec that - decodes a specified BSON type into a custom Python type. Users must implement - the ``bson_type`` property/attribute and the ``transform_bson`` method. -* :class:`~bson.codec_options.TypeCodec`: subclass this to define a codec that - can both encode and decode a custom type. Users must implement the - ``python_type`` and ``bson_type`` properties/attributes, as well as the - ``transform_python`` and ``transform_bson`` methods. - - -The type codec for our custom type simply needs to define how a -:py:class:`~decimal.Decimal` instance can be converted into a -:class:`~bson.decimal128.Decimal128` instance and vice-versa. Since we are -interested in both encoding and decoding our custom type, we use the -``TypeCodec`` base class to define our codec: - -.. doctest:: - - >>> from bson.decimal128 import Decimal128 - >>> from bson.codec_options import TypeCodec - >>> class DecimalCodec(TypeCodec): - ... python_type = Decimal # the Python type acted upon by this type codec - ... bson_type = Decimal128 # the BSON type acted upon by this type codec - ... def transform_python(self, value): - ... """Function that transforms a custom type value into a type - ... that BSON can encode.""" - ... return Decimal128(value) - ... def transform_bson(self, value): - ... """Function that transforms a vanilla BSON type value into our - ... custom type.""" - ... return value.to_decimal() - >>> decimal_codec = DecimalCodec() - - -.. _custom-type-type-registry: - -The :class:`~bson.codec_options.TypeRegistry` Class -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.8 - -Before we can begin encoding and decoding our custom type objects, we must -first inform PyMongo about the corresponding codec. This is done by creating -a :class:`~bson.codec_options.TypeRegistry` instance: - -.. doctest:: - - >>> from bson.codec_options import TypeRegistry - >>> type_registry = TypeRegistry([decimal_codec]) - - -Note that type registries can be instantiated with any number of type codecs. -Once instantiated, registries are immutable and the only way to add codecs -to a registry is to create a new one. - - -Putting It Together -------------------- - -Finally, we can define a :class:`~bson.codec_options.CodecOptions` instance -with our ``type_registry`` and use it to get a -:class:`~pymongo.collection.Collection` object that understands the -:py:class:`~decimal.Decimal` data type: - -.. doctest:: - - >>> from bson.codec_options import CodecOptions - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) - - -Now, we can seamlessly encode and decode instances of -:py:class:`~decimal.Decimal`: - -.. doctest:: - - >>> collection.insert_one({'num': Decimal("45.321")}) - - >>> mydoc = collection.find_one() - >>> import pprint - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal('45.321')} - - -We can see what's actually being saved to the database by creating a fresh -collection object without the customized codec options and using that to query -MongoDB: - -.. doctest:: - - >>> vanilla_collection = db.get_collection('test') - >>> pprint.pprint(vanilla_collection.find_one()) - {'_id': ObjectId('...'), 'num': Decimal128('45.321')} - - -Encoding Subtypes -^^^^^^^^^^^^^^^^^ - -Consider the situation where, in addition to encoding -:py:class:`~decimal.Decimal`, we also need to encode a type that subclasses -``Decimal``. PyMongo does this automatically for types that inherit from -Python types that are BSON-encodable by default, but the type codec system -described above does not offer the same flexibility. - -Consider this subtype of ``Decimal`` that has a method to return its value as -an integer: - -.. doctest:: - - >>> class DecimalInt(Decimal): - ... def my_method(self): - ... """Method implementing some custom logic.""" - ... return int(self) - -If we try to save an instance of this type without first registering a type -codec for it, we get an error: - -.. doctest:: - - >>> collection.insert_one({'num': DecimalInt("45.321")}) - Traceback (most recent call last): - ... - bson.errors.InvalidDocument: cannot encode object: Decimal('45.321'), of type: - -In order to proceed further, we must define a type codec for ``DecimalInt``. -This is trivial to do since the same transformation as the one used for -``Decimal`` is adequate for encoding ``DecimalInt`` as well: - -.. doctest:: - - >>> class DecimalIntCodec(DecimalCodec): - ... @property - ... def python_type(self): - ... """The Python type acted upon by this type codec.""" - ... return DecimalInt - >>> decimalint_codec = DecimalIntCodec() - - -.. note:: - - No attempt is made to modify decoding behavior because without additional - information, it is impossible to discern which incoming - :class:`~bson.decimal128.Decimal128` value needs to be decoded as ``Decimal`` - and which needs to be decoded as ``DecimalInt``. This example only considers - the situation where a user wants to *encode* documents containing either - of these types. - -After creating a new codec options object and using it to get a collection -object, we can seamlessly encode instances of ``DecimalInt``: - -.. doctest:: - - >>> type_registry = TypeRegistry([decimal_codec, decimalint_codec]) - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) - >>> collection.drop() - >>> collection.insert_one({'num': DecimalInt("45.321")}) - - >>> mydoc = collection.find_one() - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal('45.321')} - -Note that the ``transform_bson`` method of the base codec class results in -these values being decoded as ``Decimal`` (and not ``DecimalInt``). - - -.. _decoding-binary-types: - -Decoding :class:`~bson.binary.Binary` Types -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The decoding treatment of :class:`~bson.binary.Binary` types having -``subtype = 0`` by the :mod:`bson` module varies slightly depending on the -version of the Python runtime in use. This must be taken into account while -writing a ``TypeDecoder`` that modifies how this datatype is decoded. - -On Python 3.x, :class:`~bson.binary.Binary` data (``subtype = 0``) is decoded -as a ``bytes`` instance: - -.. code-block:: python - - >>> # On Python 3.x. - >>> from bson.binary import Binary - >>> newcoll = db.get_collection('new') - >>> newcoll.insert_one({'_id': 1, 'data': Binary(b"123", subtype=0)}) - >>> doc = newcoll.find_one() - >>> type(doc['data']) - bytes - - -On Python 2.7.x, the same data is decoded as a :class:`~bson.binary.Binary` -instance: - -.. code-block:: python - - >>> # On Python 2.7.x - >>> newcoll = db.get_collection('new') - >>> doc = newcoll.find_one() - >>> type(doc['data']) - bson.binary.Binary - - -As a consequence of this disparity, users must set the ``bson_type`` attribute -on their :class:`~bson.codec_options.TypeDecoder` classes differently, -depending on the python version in use. - - -.. note:: - - For codebases requiring compatibility with both Python 2 and 3, type - decoders will have to be registered for both possible ``bson_type`` values. - - -.. _fallback-encoder-callable: - -The ``fallback_encoder`` Callable ---------------------------------- - -.. versionadded:: 3.8 - - -In addition to type codecs, users can also register a callable to encode types -that BSON doesn't recognize and for which no type codec has been registered. -This callable is the **fallback encoder** and like the ``transform_python`` -method, it accepts an unencodable value as a parameter and returns a -BSON-encodable value. The following fallback encoder encodes python's -:py:class:`~decimal.Decimal` type to a :class:`~bson.decimal128.Decimal128`: - -.. doctest:: - - >>> def fallback_encoder(value): - ... if isinstance(value, Decimal): - ... return Decimal128(value) - ... return value - -After declaring the callback, we must create a type registry and codec options -with this fallback encoder before it can be used for initializing a collection: - -.. doctest:: - - >>> type_registry = TypeRegistry(fallback_encoder=fallback_encoder) - >>> codec_options = CodecOptions(type_registry=type_registry) - >>> collection = db.get_collection('test', codec_options=codec_options) - >>> collection.drop() - -We can now seamlessly encode instances of :py:class:`~decimal.Decimal`: - -.. doctest:: - - >>> collection.insert_one({'num': Decimal("45.321")}) - - >>> mydoc = collection.find_one() - >>> pprint.pprint(mydoc) - {'_id': ObjectId('...'), 'num': Decimal128('45.321')} - - -.. note:: - - Fallback encoders are invoked *after* attempts to encode the given value - with standard BSON encoders and any configured type encoders have failed. - Therefore, in a type registry configured with a type encoder and fallback - encoder that both target the same custom type, the behavior specified in - the type encoder will prevail. - - -Because fallback encoders don't need to declare the types that they encode -beforehand, they can be used to support interesting use-cases that cannot be -serviced by ``TypeEncoder``. One such use-case is described in the next -section. - - -Encoding Unknown Types -^^^^^^^^^^^^^^^^^^^^^^ - -In this example, we demonstrate how a fallback encoder can be used to save -arbitrary objects to the database. We will use the the standard library's -:py:mod:`pickle` module to serialize the unknown types and so naturally, this -approach only works for types that are picklable. - -We start by defining some arbitrary custom types: - -.. code-block:: python - - class MyStringType(object): - def __init__(self, value): - self.__value = value - def __repr__(self): - return "MyStringType('%s')" % (self.__value,) - - class MyNumberType(object): - def __init__(self, value): - self.__value = value - def __repr__(self): - return "MyNumberType(%s)" % (self.__value,) - -We also define a fallback encoder that pickles whatever objects it receives -and returns them as :class:`~bson.binary.Binary` instances with a custom -subtype. The custom subtype, in turn, allows us to write a TypeDecoder that -identifies pickled artifacts upon retrieval and transparently decodes them -back into Python objects: - -.. code-block:: python - - import pickle - from bson.binary import Binary, USER_DEFINED_SUBTYPE - def fallback_pickle_encoder(value): - return Binary(pickle.dumps(value), USER_DEFINED_SUBTYPE) - - class PickledBinaryDecoder(TypeDecoder): - bson_type = Binary - def transform_bson(self, value): - if value.subtype == USER_DEFINED_SUBTYPE: - return pickle.loads(value) - return value - - -.. note:: - - The above example is written assuming the use of Python 3. If you are using - Python 2, ``bson_type`` must be set to ``Binary``. See the - :ref:`decoding-binary-types` section for a detailed explanation. - - -Finally, we create a ``CodecOptions`` instance: - -.. code-block:: python - - codec_options = CodecOptions(type_registry=TypeRegistry( - [PickledBinaryDecoder()], fallback_encoder=fallback_pickle_encoder)) - -We can now round trip our custom objects to MongoDB: - -.. code-block:: python - - collection = db.get_collection('test_fe', codec_options=codec_options) - collection.insert_one({'_id': 1, 'str': MyStringType("hello world"), - 'num': MyNumberType(2)}) - mydoc = collection.find_one() - assert isinstance(mydoc['str'], MyStringType) - assert isinstance(mydoc['num'], MyNumberType) - - -Limitations ------------ - -PyMongo's type codec and fallback encoder features have the following -limitations: - -#. Users cannot customize the encoding behavior of Python types that PyMongo - already understands like ``int`` and ``str`` (the 'built-in types'). - Attempting to instantiate a type registry with one or more codecs that act - upon a built-in type results in a ``TypeError``. This limitation extends - to all subtypes of the standard types. -#. Chaining type encoders is not supported. A custom type value, once - transformed by a codec's ``transform_python`` method, *must* result in a - type that is either BSON-encodable by default, or can be - transformed by the fallback encoder into something BSON-encodable--it - *cannot* be transformed a second time by a different type codec. -#. The :meth:`~pymongo.database.Database.command` method does not apply the - user's TypeDecoders while decoding the command response document. -#. :mod:`gridfs` does not apply custom type encoding or decoding to any - documents received from or to returned to the user. diff --git a/doc/examples/datetimes.rst b/doc/examples/datetimes.rst deleted file mode 100644 index d712ce6138..0000000000 --- a/doc/examples/datetimes.rst +++ /dev/null @@ -1,104 +0,0 @@ -Datetimes and Timezones -======================= - -.. testsetup:: - - import datetime - from pymongo import MongoClient - from bson.codec_options import CodecOptions - client = MongoClient() - client.drop_database('dt_example') - db = client.dt_example - -These examples show how to handle Python :class:`datetime.datetime` objects -correctly in PyMongo. - -Basic Usage ------------ - -PyMongo uses :class:`datetime.datetime` objects for representing dates and times -in MongoDB documents. Because MongoDB assumes that dates and times are in UTC, -care should be taken to ensure that dates and times written to the database -reflect UTC. For example, the following code stores the current UTC date and -time into MongoDB: - -.. doctest:: - - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.utcnow()}) - -Always use :meth:`datetime.datetime.utcnow`, which returns the current time in -UTC, instead of :meth:`datetime.datetime.now`, which returns the current local -time. Avoid doing this: - -.. doctest:: - - >>> result = db.objects.insert_one( - ... {"last_modified": datetime.datetime.now()}) - -The value for `last_modified` is very different between these two examples, even -though both documents were stored at around the same local time. This will be -confusing to the application that reads them: - -.. doctest:: - - >>> [doc['last_modified'] for doc in db.objects.find()] # doctest: +SKIP - [datetime.datetime(2015, 7, 8, 18, 17, 28, 324000), - datetime.datetime(2015, 7, 8, 11, 17, 42, 911000)] - -:class:`bson.codec_options.CodecOptions` has a `tz_aware` option that enables -"aware" :class:`datetime.datetime` objects, i.e., datetimes that know what -timezone they're in. By default, PyMongo retrieves naive datetimes: - -.. doctest:: - - >>> result = db.tzdemo.insert_one( - ... {'date': datetime.datetime(2002, 10, 27, 6, 0, 0)}) - >>> db.tzdemo.find_one()['date'] - datetime.datetime(2002, 10, 27, 6, 0) - >>> options = CodecOptions(tz_aware=True) - >>> db.get_collection('tzdemo', codec_options=options).find_one()['date'] # doctest: +SKIP - datetime.datetime(2002, 10, 27, 6, 0, - tzinfo=) - -Saving Datetimes with Timezones -------------------------------- - -When storing :class:`datetime.datetime` objects that specify a timezone -(i.e. they have a `tzinfo` property that isn't ``None``), PyMongo will convert -those datetimes to UTC automatically: - -.. doctest:: - - >>> import pytz - >>> pacific = pytz.timezone('US/Pacific') - >>> aware_datetime = pacific.localize( - ... datetime.datetime(2002, 10, 27, 6, 0, 0)) - >>> result = db.times.insert_one({"date": aware_datetime}) - >>> db.times.find_one()['date'] - datetime.datetime(2002, 10, 27, 14, 0) - -Reading Time ------------- - -As previously mentioned, by default all :class:`datetime.datetime` objects -returned by PyMongo will be naive but reflect UTC (i.e. the time as stored in -MongoDB). By setting the `tz_aware` option on -:class:`~bson.codec_options.CodecOptions`, :class:`datetime.datetime` objects -will be timezone-aware and have a `tzinfo` property that reflects the UTC -timezone. - -PyMongo 3.1 introduced a `tzinfo` property that can be set on -:class:`~bson.codec_options.CodecOptions` to convert :class:`datetime.datetime` -objects to local time automatically. For example, if we wanted to read all times -out of MongoDB in US/Pacific time: - - >>> from bson.codec_options import CodecOptions - >>> db.times.find_one()['date'] - datetime.datetime(2002, 10, 27, 14, 0) - >>> aware_times = db.times.with_options(codec_options=CodecOptions( - ... tz_aware=True, - ... tzinfo=pytz.timezone('US/Pacific'))) - >>> result = aware_times.find_one() - datetime.datetime(2002, 10, 27, 6, 0, # doctest: +NORMALIZE_WHITESPACE - tzinfo=) diff --git a/doc/examples/encryption.rst b/doc/examples/encryption.rst deleted file mode 100644 index 5c3dc0864b..0000000000 --- a/doc/examples/encryption.rst +++ /dev/null @@ -1,709 +0,0 @@ -.. _Client-Side Field Level Encryption: - -Client-Side Field Level Encryption -================================== - -New in MongoDB 4.2, client-side field level encryption allows an application -to encrypt specific data fields in addition to pre-existing MongoDB -encryption features such as `Encryption at Rest -`_ and -`TLS/SSL (Transport Encryption) -`_. - -With field level encryption, applications can encrypt fields in documents -*prior* to transmitting data over the wire to the server. Client-side field -level encryption supports workloads where applications must guarantee that -unauthorized parties, including server administrators, cannot read the -encrypted data. - -.. seealso:: The MongoDB documentation on `Client Side Field Level Encryption `_. - -Dependencies ------------- - -To get started using client-side field level encryption in your project, -you will need to install the -`pymongocrypt `_ library -as well as the driver itself. Install both the driver and a compatible -version of pymongocrypt like this:: - - $ python -m pip install 'pymongo[encryption]' - -Note that installing on Linux requires pip 19 or later for manylinux2010 wheel -support. For more information about installing pymongocrypt see -`the installation instructions on the project's PyPI page -`_. - -mongocryptd ------------ - -The ``mongocryptd`` binary is required for automatic client-side encryption -and is included as a component in the `MongoDB Enterprise Server package -`_. -For detailed installation instructions see -`the MongoDB documentation on mongocryptd -`_. - -``mongocryptd`` performs the following: - -- Parses the automatic encryption rules specified to the database connection. - If the JSON schema contains invalid automatic encryption syntax or any - document validation syntax, ``mongocryptd`` returns an error. -- Uses the specified automatic encryption rules to mark fields in read and - write operations for encryption. -- Rejects read/write operations that may return unexpected or incorrect results - when applied to an encrypted field. For supported and unsupported operations, - see `Read/Write Support with Automatic Field Level Encryption - `_. - -A MongoClient configured with auto encryption will automatically spawn the -``mongocryptd`` process from the application's ``PATH``. Applications can -control the spawning behavior as part of the automatic encryption options. -For example to set the path to the ``mongocryptd`` process:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_spawn_path='/path/to/mongocryptd') - -To control the logging output of ``mongocryptd`` pass options using -``mongocryptd_spawn_args``:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_spawn_args=['--logpath=/path/to/mongocryptd.log', '--logappend']) - -If your application wishes to manage the ``mongocryptd`` process manually, -it is possible to disable spawning ``mongocryptd``:: - - auto_encryption_opts = AutoEncryptionOpts( - ..., - mongocryptd_bypass_spawn=True, - # URI of the local ``mongocryptd`` process. - mongocryptd_uri='mongodb://localhost:27020') - -``mongocryptd`` is only responsible for supporting automatic client-side field -level encryption and does not itself perform any encryption or decryption. - -.. _automatic-client-side-encryption: - -Automatic Client-Side Field Level Encryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Automatic client-side field level encryption is enabled by creating a -:class:`~pymongo.mongo_client.MongoClient` with the ``auto_encryption_opts`` -option set to an instance of -:class:`~pymongo.encryption_options.AutoEncryptionOpts`. The following -examples show how to setup automatic client-side field level encryption -using :class:`~pymongo.encryption.ClientEncryption` to create a new -encryption data key. - -.. note:: Automatic client-side field level encryption requires MongoDB 4.2 - enterprise or a MongoDB 4.2 Atlas cluster. The community version of the - server supports automatic decryption as well as - :ref:`explicit-client-side-encryption`. - -Providing Local Automatic Encryption Rules -`````````````````````````````````````````` - -The following example shows how to specify automatic encryption rules via the -``schema_map`` option. The automatic encryption rules are expressed using a -`strict subset of the JSON Schema syntax -`_. - -Supplying a ``schema_map`` provides more security than relying on -JSON Schemas obtained from the server. It protects against a -malicious server advertising a false JSON Schema, which could trick -the client into sending unencrypted data that should be encrypted. - -JSON Schemas supplied in the ``schema_map`` only apply to configuring -automatic client-side field level encryption. Other validation -rules in the JSON schema will not be enforced by the driver and -will result in an error.:: - - import os - - from bson.codec_options import CodecOptions - from bson import json_util - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) - from pymongo.encryption_options import AutoEncryptionOpts - - - def create_json_schema_file(kms_providers, key_vault_namespace, - key_vault_client): - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - key_vault_client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. We will not be calling - # encrypt() or decrypt() in this example so we can use any - # CodecOptions. - CodecOptions()) - - # Create a new data key and json schema for the encryptedField. - # https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules - data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_1']) - schema = { - "properties": { - "encryptedField": { - "encrypt": { - "keyId": [data_key_id], - "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - } - } - }, - "bsonType": "object" - } - # Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be - # able to parse the MongoDB extended JSON file. - json_schema_string = json_util.dumps( - schema, json_options=json_util.CANONICAL_JSON_OPTIONS) - - with open('jsonSchema.json', 'w') as file: - file.write(json_schema_string) - - - def main(): - # The MongoDB namespace (db.collection) used to store the - # encrypted documents in this example. - encrypted_namespace = "test.coll" - - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to access the key vault (key_vault_namespace). - key_vault_client = MongoClient() - key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - create_json_schema_file( - kms_providers, key_vault_namespace, key_vault_client) - - # Load the JSON Schema and construct the local schema_map option. - with open('jsonSchema.json', 'r') as file: - json_schema_string = file.read() - json_schema = json_util.loads(json_schema_string) - schema_map = {encrypted_namespace: json_schema} - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, schema_map=schema_map) - - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - db_name, coll_name = encrypted_namespace.split(".", 1) - coll = client[db_name][coll_name] - # Clear old data - coll.drop() - - coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) - unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) - - - if __name__ == "__main__": - main() - -Server-Side Field Level Encryption Enforcement -`````````````````````````````````````````````` - -The MongoDB 4.2 server supports using schema validation to enforce encryption -of specific fields in a collection. This schema validation will prevent an -application from inserting unencrypted values for any fields marked with the -``"encrypt"`` JSON schema keyword. - -The following example shows how to setup automatic client-side field level -encryption using -:class:`~pymongo.encryption.ClientEncryption` to create a new encryption -data key and create a collection with the -`Automatic Encryption JSON Schema Syntax -`_:: - - import os - - from bson.codec_options import CodecOptions - from bson.binary import STANDARD - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) - from pymongo.encryption_options import AutoEncryptionOpts - from pymongo.errors import OperationFailure - from pymongo.write_concern import WriteConcern - - - def main(): - # The MongoDB namespace (db.collection) used to store the - # encrypted documents in this example. - encrypted_namespace = "test.coll" - - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to access the key vault (key_vault_namespace). - key_vault_client = MongoClient() - key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - key_vault_client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. We will not be calling - # encrypt() or decrypt() in this example so we can use any - # CodecOptions. - CodecOptions()) - - # Create a new data key and json schema for the encryptedField. - data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_2']) - json_schema = { - "properties": { - "encryptedField": { - "encrypt": { - "keyId": [data_key_id], - "bsonType": "string", - "algorithm": - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - } - } - }, - "bsonType": "object" - } - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - db_name, coll_name = encrypted_namespace.split(".", 1) - db = client[db_name] - # Clear old data - db.drop_collection(coll_name) - # Create the collection with the encryption JSON Schema. - db.create_collection( - coll_name, - # uuid_representation=STANDARD is required to ensure that any - # UUIDs in the $jsonSchema document are encoded to BSON Binary - # with the standard UUID subtype 4. This is only needed when - # running the "create" collection command with an encryption - # JSON Schema. - codec_options=CodecOptions(uuid_representation=STANDARD), - write_concern=WriteConcern(w="majority"), - validator={"$jsonSchema": json_schema}) - coll = client[db_name][coll_name] - - coll.insert_one({"encryptedField": "123456789"}) - print('Decrypted document: %s' % (coll.find_one(),)) - unencrypted_coll = MongoClient()[db_name][coll_name] - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) - try: - unencrypted_coll.insert_one({"encryptedField": "123456789"}) - except OperationFailure as exc: - print('Unencrypted insert failed: %s' % (exc.details,)) - - - if __name__ == "__main__": - main() - -.. _automatic-queryable-client-side-encryption: - -Automatic Queryable Encryption (Beta) -````````````````````````````````````` - -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -You must have MongoDB 6.0rc8+ Enterprise to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. - -Automatic encryption in Queryable Encryption is configured with an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - from bson.codec_options import CodecOptions - from pymongo import MongoClient - from pymongo.encryption import Algorithm, ClientEncryption, QueryType - from pymongo.encryption_options import AutoEncryptionOpts - - - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - key_vault_namespace = "keyvault.datakeys" - key_vault_client = MongoClient() - client_encryption = ClientEncryption( - kms_providers, key_vault_namespace, key_vault_client, CodecOptions() - ) - key_vault = key_vault_client["keyvault"]["datakeys"] - key_vault.drop() - key1_id = client_encryption.create_data_key("local", key_alt_names=["firstName"]) - key2_id = client_encryption.create_data_key("local", key_alt_names=["lastName"]) - - encrypted_fields_map = { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": key1_id, - "queries": [{"queryType": "equality"}], - }, - { - "path": "lastName", - "bsonType": "string", - "keyId": key2_id, - } - ] - } - } - - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, encrypted_fields_map=encrypted_fields_map) - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - client.default.drop_collection('encryptedCollection') - coll = client.default.create_collection('encryptedCollection') - coll.insert_one({ "_id": 1, "firstName": "Jane", "lastName": "Doe" }) - docs = list(coll.find({"firstName": "Jane"})) - print(docs) - -In the above example, the ``firstName`` and ``lastName`` fields are -automatically encrypted and decrypted. - -Explicit Queryable Encryption (Beta) -```````````````````````````````````` - -PyMongo 4.2 brings beta support for Queryable Encryption with MongoDB 6.0. - -Queryable Encryption is the second version of Client-Side Field Level Encryption. -Data is encrypted client-side. Queryable Encryption supports indexed encrypted fields, -which are further processed server-side. - -You must have MongoDB 6.0rc8+ to preview the capability. - -Until PyMongo 4.2 release is finalized, it can be installed using:: - - pip install "pymongo@git+ssh://git@github.com/mongodb/mongo-python-driver.git@4.2.0b0#egg=pymongo[encryption]" - -Additionally, ``libmongocrypt`` must be installed from `source `_. - -Explicit encryption in Queryable Encryption is performed using the ``encrypt`` and ``decrypt`` -methods. Automatic encryption (to allow the ``find_one`` to automatically decrypt) is configured -using an ``encrypted_fields`` mapping, as demonstrated by the following example:: - - import os - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, AutoEncryptionOpts, - ClientEncryption, QueryType) - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # Set up the key vault (key_vault_namespace) for this example. - client = MongoClient() - key_vault = client[key_vault_db_name][key_vault_coll_name] - - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - client.codec_options) - - # Create a new data key for the encryptedField. - indexed_key_id = client_encryption.create_data_key( - 'local') - unindexed_key_id = client_encryption.create_data_key( - 'local') - - encrypted_fields = { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": indexed_key_id, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality" - } - }, - { - "keyId": unindexed_key_id, - "path": "encryptedUnindexed", - "bsonType": "string", - } - ] - } - - opts = AutoEncryptionOpts( - {"local": {"key": local_master_key}}, - key_vault.full_name, - bypass_query_analysis=True, - key_vault_client=client, - ) - - # The MongoClient used to read/write application data. - encrypted_client = MongoClient(auto_encryption_opts=opts) - encrypted_client.drop_database("test") - db = encrypted_client.test - - # Create the collection with encrypted fields. - coll = db.create_collection("coll", encryptedFields=encrypted_fields) - - # Create and encrypt an indexed and unindexed value. - val = "encrypted indexed value" - unindexed_val = "encrypted unindexed value" - insert_payload_indexed = client_encryption.encrypt(val, Algorithm.INDEXED, indexed_key_id, contention_factor=1) - insert_payload_unindexed = client_encryption.encrypt(unindexed_val, Algorithm.UNINDEXED, - unindexed_key_id) - - # Insert the payloads. - coll.insert_one({ - "encryptedIndexed": insert_payload_indexed, - "encryptedUnindexed": insert_payload_unindexed - }) - - # Encrypt our find payload using QueryType.EQUALITY. - # The value of "data_key_id" must be the same as used to encrypt the values - # above. - find_payload = client_encryption.encrypt( - val, Algorithm.INDEXED, indexed_key_id, query_type=QueryType.EQUALITY, contention_factor=1 - ) - - # Find the document we inserted using the encrypted payload. - # The returned document is automatically decrypted. - doc = coll.find_one({"encryptedIndexed": find_payload}) - print('Returned document: %s' % (doc,)) - - # Cleanup resources. - client_encryption.close() - encrypted_client.close() - client.close() - - - if __name__ == "__main__": - main() - -.. _explicit-client-side-encryption: - -Explicit Encryption -~~~~~~~~~~~~~~~~~~~ - -Explicit encryption is a MongoDB community feature and does not use the -``mongocryptd`` process. Explicit encryption is provided by the -:class:`~pymongo.encryption.ClientEncryption` class, for example:: - - import os - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # The MongoClient used to read/write application data. - client = MongoClient() - coll = client.test.coll - # Clear old data - coll.drop() - - # Set up the key vault (key_vault_namespace) for this example. - key_vault = client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - coll.codec_options) - - # Create a new data key for the encryptedField. - data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_3']) - - # Explicitly encrypt a field: - encrypted_field = client_encryption.encrypt( - "123456789", - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_id=data_key_id) - coll.insert_one({"encryptedField": encrypted_field}) - doc = coll.find_one() - print('Encrypted document: %s' % (doc,)) - - # Explicitly decrypt the field: - doc["encryptedField"] = client_encryption.decrypt(doc["encryptedField"]) - print('Decrypted document: %s' % (doc,)) - - # Cleanup resources. - client_encryption.close() - client.close() - - - if __name__ == "__main__": - main() - - -Explicit Encryption with Automatic Decryption -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although automatic encryption requires MongoDB 4.2 enterprise or a -MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all users. -To configure automatic *decryption* without automatic *encryption* set -``bypass_auto_encryption=True`` in -:class:`~pymongo.encryption_options.AutoEncryptionOpts`:: - - import os - - from pymongo import MongoClient - from pymongo.encryption import (Algorithm, - ClientEncryption) - from pymongo.encryption_options import AutoEncryptionOpts - - - def main(): - # This must be the same master key that was used to create - # the encryption key. - local_master_key = os.urandom(96) - kms_providers = {"local": {"key": local_master_key}} - - # The MongoDB namespace (db.collection) used to store - # the encryption data keys. - key_vault_namespace = "encryption.__pymongoTestKeyVault" - key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1) - - # bypass_auto_encryption=True disable automatic encryption but keeps - # the automatic _decryption_ behavior. bypass_auto_encryption will - # also disable spawning mongocryptd. - auto_encryption_opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, bypass_auto_encryption=True) - - client = MongoClient(auto_encryption_opts=auto_encryption_opts) - coll = client.test.coll - # Clear old data - coll.drop() - - # Set up the key vault (key_vault_namespace) for this example. - key_vault = client[key_vault_db_name][key_vault_coll_name] - # Ensure that two data keys cannot share the same keyAltName. - key_vault.drop() - key_vault.create_index( - "keyAltNames", - unique=True, - partialFilterExpression={"keyAltNames": {"$exists": True}}) - - client_encryption = ClientEncryption( - kms_providers, - key_vault_namespace, - # The MongoClient to use for reading/writing to the key vault. - # This can be the same MongoClient used by the main application. - client, - # The CodecOptions class used for encrypting and decrypting. - # This should be the same CodecOptions instance you have configured - # on MongoClient, Database, or Collection. - coll.codec_options) - - # Create a new data key for the encryptedField. - data_key_id = client_encryption.create_data_key( - 'local', key_alt_names=['pymongo_encryption_example_4']) - - # Explicitly encrypt a field: - encrypted_field = client_encryption.encrypt( - "123456789", - Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name='pymongo_encryption_example_4') - coll.insert_one({"encryptedField": encrypted_field}) - # Automatically decrypts any encrypted fields. - doc = coll.find_one() - print('Decrypted document: %s' % (doc,)) - unencrypted_coll = MongoClient().test.coll - print('Encrypted document: %s' % (unencrypted_coll.find_one(),)) - - # Cleanup resources. - client_encryption.close() - client.close() - - - if __name__ == "__main__": - main() diff --git a/doc/examples/geo.rst b/doc/examples/geo.rst deleted file mode 100644 index 2234a20757..0000000000 --- a/doc/examples/geo.rst +++ /dev/null @@ -1,108 +0,0 @@ -Geospatial Indexing Example -=========================== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('geo_example') - -This example shows how to create and use a :data:`~pymongo.GEO2D` -index in PyMongo. To create a spherical (earth-like) geospatial index use :data:`~pymongo.GEOSPHERE` instead. - -.. seealso:: The MongoDB documentation on `Geospatial Indexes `_. - -Creating a Geospatial Index ---------------------------- - -Creating a geospatial index in pymongo is easy: - -.. doctest:: - - >>> from pymongo import MongoClient, GEO2D - >>> db = MongoClient().geo_example - >>> db.places.create_index([("loc", GEO2D)]) - 'loc_2d' - -Inserting Places ----------------- - -Locations in MongoDB are represented using either embedded documents -or lists where the first two elements are coordinates. Here, we'll -insert a couple of example locations: - -.. doctest:: - - >>> result = db.places.insert_many([{"loc": [2, 5]}, - ... {"loc": [30, 5]}, - ... {"loc": [1, 2]}, - ... {"loc": [4, 4]}]) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...'), ObjectId('...'), ObjectId('...')] - -.. note:: If specifying latitude and longitude coordinates in :data:`~pymongo.GEOSPHERE`, list the **longitude** first and then **latitude**. - -Querying --------- - -Using the geospatial index we can find documents near another point: - -.. doctest:: - - >>> import pprint - >>> for doc in db.places.find({"loc": {"$near": [3, 6]}}).limit(3): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - -.. note:: If using :data:`pymongo.GEOSPHERE`, using $nearSphere is recommended. - -The $maxDistance operator requires the use of :class:`~bson.son.SON`: - -.. doctest:: - - >>> from bson.son import SON - >>> query = {"loc": SON([("$near", [3, 6]), ("$maxDistance", 100)])} - >>> for doc in db.places.find(query).limit(3): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - -It's also possible to query for all items within a given rectangle -(specified by lower-left and upper-right coordinates): - -.. doctest:: - - >>> query = {"loc": {"$within": {"$box": [[2, 2], [5, 6]]}}} - >>> for doc in db.places.find(query).sort('_id'): - ... pprint.pprint(doc) - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - -Or circle (specified by center point and radius): - -.. doctest:: - - >>> query = {"loc": {"$within": {"$center": [[0, 0], 6]}}} - >>> for doc in db.places.find(query).sort('_id'): - ... pprint.pprint(doc) - ... - {'_id': ObjectId('...'), 'loc': [2, 5]} - {'_id': ObjectId('...'), 'loc': [1, 2]} - {'_id': ObjectId('...'), 'loc': [4, 4]} - -geoNear queries are also supported using :class:`~bson.son.SON`:: - - >>> from bson.son import SON - >>> db.command(SON([('geoNear', 'places'), ('near', [1, 2])])) - {'ok': 1.0, 'stats': ...} - -.. warning:: Starting in MongoDB version 4.0, MongoDB deprecates the **geoNear** command. Use one of the following operations instead. - - * $geoNear - aggregation stage. - * $near - query operator. - * $nearSphere - query operator. diff --git a/doc/examples/gevent.rst b/doc/examples/gevent.rst deleted file mode 100644 index 6eb283dca9..0000000000 --- a/doc/examples/gevent.rst +++ /dev/null @@ -1,50 +0,0 @@ -Gevent -====== - -PyMongo supports `Gevent `_. Simply call Gevent's -``monkey.patch_all()`` before loading any other modules: - -.. doctest:: - - >>> # You must call patch_all() *before* importing any other modules - >>> from gevent import monkey - >>> _ = monkey.patch_all() - >>> from pymongo import MongoClient - >>> client = MongoClient() - -PyMongo uses thread and socket functions from the Python standard library. -Gevent's monkey-patching replaces those standard functions so that PyMongo -does asynchronous I/O with non-blocking sockets, and schedules operations -on greenlets instead of threads. - -Avoid blocking in Hub.join --------------------------- - -By default, PyMongo uses threads to discover and monitor your servers' topology -(see :ref:`health-monitoring`). If you execute ``monkey.patch_all()`` when -your application first begins, PyMongo automatically uses greenlets instead -of threads. - -When shutting down, if your application calls :meth:`~gevent.hub.Hub.join` on -Gevent's :class:`~gevent.hub.Hub` without first terminating these background -greenlets, the call to :meth:`~gevent.hub.Hub.join` blocks indefinitely. You -therefore **must close or dereference** any active -:class:`~pymongo.mongo_client.MongoClient` before exiting. - -An example solution to this issue in some application frameworks is a signal -handler to end background greenlets when your application receives SIGHUP: - -.. code-block:: python - - import signal - - def graceful_reload(signum, traceback): - """Explicitly close some global MongoClient object.""" - client.close() - - signal.signal(signal.SIGHUP, graceful_reload) - -Applications using uWSGI prior to 1.9.16 are affected by this issue, -or newer uWSGI versions with the ``-gevent-wait-for-hub`` option. -See `the uWSGI changelog for details -`_. diff --git a/doc/examples/gridfs.rst b/doc/examples/gridfs.rst deleted file mode 100644 index a015f6a9fd..0000000000 --- a/doc/examples/gridfs.rst +++ /dev/null @@ -1,83 +0,0 @@ -GridFS Example -============== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('gridfs_example') - -This example shows how to use :mod:`gridfs` to store large binary -objects (e.g. files) in MongoDB. - -.. seealso:: The API docs for :mod:`gridfs`. - -.. seealso:: `This blog post - `_ - for some motivation behind this API. - -Setup ------ - -We start by creating a :class:`~gridfs.GridFS` instance to use: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> import gridfs - >>> - >>> db = MongoClient().gridfs_example - >>> fs = gridfs.GridFS(db) - -Every :class:`~gridfs.GridFS` instance is created with and will -operate on a specific :class:`~pymongo.database.Database` instance. - -Saving and Retrieving Data --------------------------- - -The simplest way to work with :mod:`gridfs` is to use its key/value -interface (the :meth:`~gridfs.GridFS.put` and -:meth:`~gridfs.GridFS.get` methods). To write data to GridFS, use -:meth:`~gridfs.GridFS.put`: - -.. doctest:: - - >>> a = fs.put(b"hello world") - -:meth:`~gridfs.GridFS.put` creates a new file in GridFS, and returns -the value of the file document's ``"_id"`` key. Given that ``"_id"`` -we can use :meth:`~gridfs.GridFS.get` to get back the contents of the -file: - -.. doctest:: - - >>> fs.get(a).read() - b'hello world' - -:meth:`~gridfs.GridFS.get` returns a file-like object, so we get the -file's contents by calling :meth:`~gridfs.grid_file.GridOut.read`. - -In addition to putting a :class:`str` as a GridFS file, we can also -put any file-like object (an object with a :meth:`read` -method). GridFS will handle reading the file in chunk-sized segments -automatically. We can also add additional attributes to the file as -keyword arguments: - -.. doctest:: - - >>> b = fs.put(fs.get(a), filename="foo", bar="baz") - >>> out = fs.get(b) - >>> out.read() - b'hello world' - >>> out.filename - 'foo' - >>> out.bar - 'baz' - >>> out.upload_date - datetime.datetime(...) - -The attributes we set in :meth:`~gridfs.GridFS.put` are stored in the -file document, and retrievable after calling -:meth:`~gridfs.GridFS.get`. Some attributes (like ``"filename"``) are -special and are defined in the GridFS specification - see that -document for more details. diff --git a/doc/examples/high_availability.rst b/doc/examples/high_availability.rst deleted file mode 100644 index 8f94aba074..0000000000 --- a/doc/examples/high_availability.rst +++ /dev/null @@ -1,367 +0,0 @@ -High Availability and PyMongo -============================= - -PyMongo makes it easy to write highly available applications whether -you use a `single replica set `_ -or a `large sharded cluster -`_. - -Connecting to a Replica Set ---------------------------- - -PyMongo makes working with `replica sets -`_ easy. Here we'll launch a new -replica set and show how to handle both initialization and normal -connections with PyMongo. - -.. seealso:: The MongoDB documentation on `replication `_. - -Starting a Replica Set -~~~~~~~~~~~~~~~~~~~~~~ - -The main `replica set documentation -`_ contains extensive information -about setting up a new replica set or migrating an existing MongoDB -setup, be sure to check that out. Here, we'll just do the bare minimum -to get a three node replica set setup locally. - -.. warning:: Replica sets should always use multiple nodes in - production - putting all set members on the same physical node is - only recommended for testing and development. - -We start three ``mongod`` processes, each on a different port and with -a different dbpath, but all using the same replica set name "foo". - -.. code-block:: bash - - $ mkdir -p /data/db0 /data/db1 /data/db2 - $ mongod --port 27017 --dbpath /data/db0 --replSet foo - -.. code-block:: bash - - $ mongod --port 27018 --dbpath /data/db1 --replSet foo - -.. code-block:: bash - - $ mongod --port 27019 --dbpath /data/db2 --replSet foo - -Initializing the Set -~~~~~~~~~~~~~~~~~~~~ - -At this point all of our nodes are up and running, but the set has yet -to be initialized. Until the set is initialized no node will become -the primary, and things are essentially "offline". - -To initialize the set we need to connect directly to a single node and run the -initiate command using the ``directConnection`` option:: - - >>> from pymongo import MongoClient - >>> c = MongoClient('localhost', 27017, directConnection=True) - -.. note:: We could have connected to any of the other nodes instead, - but only the node we initiate from is allowed to contain any - initial data. - -After connecting, we run the initiate command to get things started:: - - >>> config = {'_id': 'foo', 'members': [ - ... {'_id': 0, 'host': 'localhost:27017'}, - ... {'_id': 1, 'host': 'localhost:27018'}, - ... {'_id': 2, 'host': 'localhost:27019'}]} - >>> c.admin.command("replSetInitiate", config) - {'ok': 1.0, ...} - -The three ``mongod`` servers we started earlier will now coordinate -and come online as a replica set. - -Connecting to a Replica Set -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The initial connection as made above is a special case for an -uninitialized replica set. Normally we'll want to connect -differently. A connection to a replica set can be made using the -:meth:`~pymongo.mongo_client.MongoClient` constructor, specifying -one or more members of the set and optionally the replica set name. -Any of the following connects to the replica set we just created:: - - >>> MongoClient('localhost') - MongoClient(host=['localhost:27017'], ...) - >>> MongoClient('localhost', replicaset='foo') - MongoClient(host=['localhost:27017'], replicaset='foo', ...) - >>> MongoClient('localhost:27018', replicaset='foo') - MongoClient(['localhost:27018'], replicaset='foo', ...) - >>> MongoClient('localhost', 27019, replicaset='foo') - MongoClient(['localhost:27019'], replicaset='foo', ...) - >>> MongoClient('mongodb://localhost:27017,localhost:27018/') - MongoClient(['localhost:27017', 'localhost:27018'], ...) - >>> MongoClient('mongodb://localhost:27017,localhost:27018/?replicaSet=foo') - MongoClient(['localhost:27017', 'localhost:27018'], replicaset='foo', ...) - -The addresses passed to :meth:`~pymongo.mongo_client.MongoClient` are called -the *seeds*. As long as at least one of the seeds is online, MongoClient -discovers all the members in the replica set, and determines which is the -current primary and which are secondaries or arbiters. Each seed must be the -address of a single mongod. Multihomed and round robin DNS addresses are -**not** supported. - -The :class:`~pymongo.mongo_client.MongoClient` constructor is non-blocking: -the constructor returns immediately while the client connects to the replica -set using background threads. Note how, if you create a client and immediately -print the string representation of its -:attr:`~pymongo.mongo_client.MongoClient.nodes` attribute, the list may be -empty initially. If you wait a moment, MongoClient discovers the whole replica -set:: - - >>> from time import sleep - >>> c = MongoClient(replicaset='foo'); print(c.nodes); sleep(0.1); print(c.nodes) - frozenset([]) - frozenset([('localhost', 27019), ('localhost', 27017), ('localhost', 27018)]) - -You need not wait for replica set discovery in your application, however. -If you need to do any operation with a MongoClient, such as a -:meth:`~pymongo.collection.Collection.find` or an -:meth:`~pymongo.collection.Collection.insert_one`, the client waits to discover -a suitable member before it attempts the operation. - -Handling Failover -~~~~~~~~~~~~~~~~~ - -When a failover occurs, PyMongo will automatically attempt to find the -new primary node and perform subsequent operations on that node. This -can't happen completely transparently, however. Here we'll perform an -example failover to illustrate how everything behaves. First, we'll -connect to the replica set and perform a couple of basic operations:: - - >>> db = MongoClient("localhost", replicaSet='foo').test - >>> db.test.insert_one({"x": 1}).inserted_id - ObjectId('...') - >>> db.test.find_one() - {'x': 1, '_id': ObjectId('...')} - -By checking the host and port, we can see that we're connected to -*localhost:27017*, which is the current primary:: - - >>> db.client.address - ('localhost', 27017) - -Now let's bring down that node and see what happens when we run our -query again:: - - >>> db.test.find_one() - Traceback (most recent call last): - pymongo.errors.AutoReconnect: ... - -We get an :class:`~pymongo.errors.AutoReconnect` exception. This means -that the driver was not able to connect to the old primary (which -makes sense, as we killed the server), but that it will attempt to -automatically reconnect on subsequent operations. When this exception -is raised our application code needs to decide whether to retry the -operation or to simply continue, accepting the fact that the operation -might have failed. - -On subsequent attempts to run the query we might continue to see this -exception. Eventually, however, the replica set will failover and -elect a new primary (this should take no more than a couple of seconds in -general). At that point the driver will connect to the new primary and -the operation will succeed:: - - >>> db.test.find_one() - {'x': 1, '_id': ObjectId('...')} - >>> db.client.address - ('localhost', 27018) - -Bring the former primary back up. It will rejoin the set as a secondary. -Now we can move to the next section: distributing reads to secondaries. - -.. _secondary-reads: - -Secondary Reads -~~~~~~~~~~~~~~~ - -By default an instance of MongoClient sends queries to -the primary member of the replica set. To use secondaries for queries -we have to change the read preference:: - - >>> client = MongoClient( - ... 'localhost:27017', - ... replicaSet='foo', - ... readPreference='secondaryPreferred') - >>> client.read_preference - SecondaryPreferred(tag_sets=None) - -Now all queries will be sent to the secondary members of the set. If there are -no secondary members the primary will be used as a fallback. If you have -queries you would prefer to never send to the primary you can specify that -using the ``secondary`` read preference. - -By default the read preference of a :class:`~pymongo.database.Database` is -inherited from its MongoClient, and the read preference of a -:class:`~pymongo.collection.Collection` is inherited from its Database. To use -a different read preference use the -:meth:`~pymongo.mongo_client.MongoClient.get_database` method, or the -:meth:`~pymongo.database.Database.get_collection` method:: - - >>> from pymongo import ReadPreference - >>> client.read_preference - SecondaryPreferred(tag_sets=None) - >>> db = client.get_database('test', read_preference=ReadPreference.SECONDARY) - >>> db.read_preference - Secondary(tag_sets=None) - >>> coll = db.get_collection('test', read_preference=ReadPreference.PRIMARY) - >>> coll.read_preference - Primary() - -You can also change the read preference of an existing -:class:`~pymongo.collection.Collection` with the -:meth:`~pymongo.collection.Collection.with_options` method:: - - >>> coll2 = coll.with_options(read_preference=ReadPreference.NEAREST) - >>> coll.read_preference - Primary() - >>> coll2.read_preference - Nearest(tag_sets=None) - -Note that since most database commands can only be sent to the primary of a -replica set, the :meth:`~pymongo.database.Database.command` method does not obey -the Database's :attr:`~pymongo.database.Database.read_preference`, but you can -pass an explicit read preference to the method:: - - >>> db.command('dbstats', read_preference=ReadPreference.NEAREST) - {...} - -Reads are configured using three options: **read preference**, **tag sets**, -and **local threshold**. - -**Read preference**: - -Read preference is configured using one of the classes from -:mod:`~pymongo.read_preferences` (:class:`~pymongo.read_preferences.Primary`, -:class:`~pymongo.read_preferences.PrimaryPreferred`, -:class:`~pymongo.read_preferences.Secondary`, -:class:`~pymongo.read_preferences.SecondaryPreferred`, or -:class:`~pymongo.read_preferences.Nearest`). For convenience, we also provide -:class:`~pymongo.read_preferences.ReadPreference` with the following -attributes: - -- ``PRIMARY``: Read from the primary. This is the default read preference, - and provides the strongest consistency. If no primary is available, raise - :class:`~pymongo.errors.AutoReconnect`. - -- ``PRIMARY_PREFERRED``: Read from the primary if available, otherwise read - from a secondary. - -- ``SECONDARY``: Read from a secondary. If no matching secondary is available, - raise :class:`~pymongo.errors.AutoReconnect`. - -- ``SECONDARY_PREFERRED``: Read from a secondary if available, otherwise - from the primary. - -- ``NEAREST``: Read from any available member. - -**Tag sets**: - -Replica-set members can be `tagged -`_ according to any -criteria you choose. By default, PyMongo ignores tags when -choosing a member to read from, but your read preference can be configured with -a ``tag_sets`` parameter. ``tag_sets`` must be a list of dictionaries, each -dict providing tag values that the replica set member must match. -PyMongo tries each set of tags in turn until it finds a set of -tags with at least one matching member. For example, to prefer reads from the -New York data center, but fall back to the San Francisco data center, tag your -replica set members according to their location and create a -MongoClient like so:: - - >>> from pymongo.read_preferences import Secondary - >>> db = client.get_database( - ... 'test', read_preference=Secondary([{'dc': 'ny'}, {'dc': 'sf'}])) - >>> db.read_preference - Secondary(tag_sets=[{'dc': 'ny'}, {'dc': 'sf'}]) - -MongoClient tries to find secondaries in New York, then San Francisco, -and raises :class:`~pymongo.errors.AutoReconnect` if none are available. As an -additional fallback, specify a final, empty tag set, ``{}``, which means "read -from any member that matches the mode, ignoring tags." - -See :mod:`~pymongo.read_preferences` for more information. - -.. _distributes reads to secondaries: - -**Local threshold**: - -If multiple members match the read preference and tag sets, PyMongo reads -from among the nearest members, chosen according to ping time. By default, -only members whose ping times are within 15 milliseconds of the nearest -are used for queries. You can choose to distribute reads among members with -higher latencies by setting ``localThresholdMS`` to a larger -number:: - - >>> client = pymongo.MongoClient( - ... replicaSet='repl0', - ... readPreference='secondaryPreferred', - ... localThresholdMS=35) - -In this case, PyMongo distributes reads among matching members within 35 -milliseconds of the closest member's ping time. - -.. note:: ``localThresholdMS`` is ignored when talking to a - replica set *through* a mongos. The equivalent is the localThreshold_ command - line option. - -.. _localThreshold: https://mongodb.com/docs/manual/reference/program/mongos/#std-option-mongos.--localThreshold - -.. _health-monitoring: - -Health Monitoring -''''''''''''''''' - -When MongoClient is initialized it launches background threads to -monitor the replica set for changes in: - -* Health: detect when a member goes down or comes up, or if a different member - becomes primary -* Configuration: detect when members are added or removed, and detect changes - in members' tags -* Latency: track a moving average of each member's ping time - -Replica-set monitoring ensures queries are continually routed to the proper -members as the state of the replica set changes. - -.. _mongos-load-balancing: - -mongos Load Balancing ---------------------- - -An instance of :class:`~pymongo.mongo_client.MongoClient` can be configured -with a list of addresses of mongos servers: - - >>> client = MongoClient('mongodb://host1,host2,host3') - -Each member of the list must be a single mongos server. Multihomed and round -robin DNS addresses are **not** supported. The client continuously -monitors all the mongoses' availability, and its network latency to each. - -PyMongo distributes operations evenly among the set of mongoses within its -``localThresholdMS`` (similar to how it `distributes reads to secondaries`_ -in a replica set). By default the threshold is 15 ms. - -The lowest-latency server, and all servers with latencies no more than -``localThresholdMS`` beyond the lowest-latency server's, receive -operations equally. For example, if we have three mongoses: - - - host1: 20 ms - - host2: 35 ms - - host3: 40 ms - -By default the ``localThresholdMS`` is 15 ms, so PyMongo uses host1 and host2 -evenly. It uses host1 because its network latency to the driver is shortest. It -uses host2 because its latency is within 15 ms of the lowest-latency server's. -But it excuses host3: host3 is 20ms beyond the lowest-latency server. - -If we set ``localThresholdMS`` to 30 ms all servers are within the threshold: - - >>> client = MongoClient('mongodb://host1,host2,host3/?localThresholdMS=30') - -.. warning:: Do **not** connect PyMongo to a pool of mongos instances through a - load balancer. A single socket connection must always be routed to the same - mongos instance for proper cursor support. diff --git a/doc/examples/index.rst b/doc/examples/index.rst deleted file mode 100644 index 6cdeafc201..0000000000 --- a/doc/examples/index.rst +++ /dev/null @@ -1,36 +0,0 @@ -Examples -======== - -The examples in this section are intended to give in depth overviews -of how to accomplish specific tasks with MongoDB and PyMongo. - -Unless otherwise noted, all examples assume that a MongoDB instance is -running on the default host and port. Assuming you have `downloaded -and installed `_ -MongoDB, you can start it like so: - -.. code-block:: bash - - $ mongod - -.. toctree:: - :maxdepth: 1 - - aggregation - authentication - collations - copydb - custom_type - bulk - datetimes - geo - gevent - gridfs - high_availability - mod_wsgi - server_selection - tailable - tls - type_hints - encryption - uuid diff --git a/doc/examples/mod_wsgi.rst b/doc/examples/mod_wsgi.rst deleted file mode 100644 index 96d6ce892f..0000000000 --- a/doc/examples/mod_wsgi.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _pymongo-and-mod_wsgi: - -PyMongo and mod_wsgi -==================== - -To run your application under `mod_wsgi `_, -follow these guidelines: - -* Run ``mod_wsgi`` in daemon mode with the ``WSGIDaemonProcess`` directive. -* Assign each application to a separate daemon with ``WSGIProcessGroup``. -* Use ``WSGIApplicationGroup %{GLOBAL}`` to ensure your application is running - in the daemon's main Python interpreter, not a sub interpreter. - -For example, this ``mod_wsgi`` configuration ensures an application runs in the -main interpreter:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - WSGIProcessGroup my_process - WSGIApplicationGroup %{GLOBAL} - - -If you have multiple applications that use PyMongo, put each in a separate -daemon, still in the global application group:: - - - WSGIDaemonProcess my_process - WSGIScriptAlias /my_app /path/to/app.wsgi - - WSGIProcessGroup my_process - - - WSGIDaemonProcess my_other_process - WSGIScriptAlias /my_other_app /path/to/other_app.wsgi - - WSGIProcessGroup my_other_process - - - WSGIApplicationGroup %{GLOBAL} - - -Background: ``mod_wsgi`` can run in "embedded" mode when only WSGIScriptAlias -is set, or "daemon" mode with WSGIDaemonProcess. In daemon mode, ``mod_wsgi`` -can run your application in the Python main interpreter, or in sub interpreters. -The correct way to run a PyMongo application is in daemon mode, using the main -interpreter. - -Python C extensions in general have issues running in multiple -Python sub interpreters. These difficulties are explained in the documentation for -`Py_NewInterpreter `_ -and in the `Multiple Python Sub Interpreters -`_ -section of the ``mod_wsgi`` documentation. - -Beginning with PyMongo 2.7, the C extension for BSON detects when it is running -in a sub interpreter and activates a workaround, which adds a small cost to -BSON decoding. To avoid this cost, use ``WSGIApplicationGroup %{GLOBAL}`` to -ensure your application runs in the main interpreter. - -Since your program runs in the main interpreter it should not share its -process with any other applications, lest they interfere with each other's -state. Each application should have its own daemon process, as shown in the -example above. diff --git a/doc/examples/server_selection.rst b/doc/examples/server_selection.rst deleted file mode 100644 index be2172489e..0000000000 --- a/doc/examples/server_selection.rst +++ /dev/null @@ -1,108 +0,0 @@ -Server Selector Example -======================= - -Users can exert fine-grained control over the `server selection algorithm`_ -by setting the `server_selector` option on the :class:`~pymongo.MongoClient` -to an appropriate callable. This example shows how to use this functionality -to prefer servers running on ``localhost``. - - -.. warning:: - - Use of custom server selector functions is a power user feature. Misusing - custom server selectors can have unintended consequences such as degraded - read/write performance. - - -.. testsetup:: - - from pymongo import MongoClient - - -.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ - - -Example: Selecting Servers Running on ``localhost`` ---------------------------------------------------- - -To start, we need to write the server selector function that will be used. -The server selector function should accept a list of -:class:`~pymongo.server_description.ServerDescription` objects and return a -list of server descriptions that are suitable for the read or write operation. -A server selector must not create or modify -:class:`~pymongo.server_description.ServerDescription` objects, and must return -the selected instances unchanged. - -In this example, we write a server selector that prioritizes servers running on -``localhost``. This can be desirable when using a sharded cluster with multiple -``mongos``, as locally run queries are likely to see lower latency and higher -throughput. Please note, however, that it is highly dependent on the -application if preferring ``localhost`` is beneficial or not. - -In addition to comparing the hostname with ``localhost``, our server selector -function accounts for the edge case when no servers are running on -``localhost``. In this case, we allow the default server selection logic to -prevail by passing through the received server description list unchanged. -Failure to do this would render the client unable to communicate with MongoDB -in the event that no servers were running on ``localhost``. - - -The described server selection logic is implemented in the following server -selector function: - - -.. doctest:: - - >>> def server_selector(server_descriptions): - ... servers = [ - ... server for server in server_descriptions - ... if server.address[0] == 'localhost' - ... ] - ... if not servers: - ... return server_descriptions - ... return servers - - - -Finally, we can create a :class:`~pymongo.MongoClient` instance with this -server selector. - - -.. doctest:: - - >>> client = MongoClient(server_selector=server_selector) - - - -Server Selection Process ------------------------- - -This section dives deeper into the server selection process for reads and -writes. In the case of a write, the driver performs the following operations -(in order) during the selection process: - - -#. Select all writeable servers from the list of known hosts. For a replica set - this is the primary, while for a sharded cluster this is all the known mongoses. - -#. Apply the user-defined server selector function. Note that the custom server - selector is **not** called if there are no servers left from the previous - filtering stage. - -#. Apply the ``localThresholdMS`` setting to the list of remaining hosts. This - whittles the host list down to only contain servers whose latency is at most - ``localThresholdMS`` milliseconds higher than the lowest observed latency. - -#. Select a server at random from the remaining host list. The desired - operation is then performed against the selected server. - - -In the case of **reads** the process is identical except for the first step. -Here, instead of selecting all writeable servers, we select all servers -matching the user's :class:`~pymongo.read_preferences.ReadPreference` from the -list of known hosts. As an example, for a 3-member replica set with a -:class:`~pymongo.read_preferences.Secondary` read preference, we would select -all available secondaries. - - -.. _server selection algorithm: https://mongodb.com/docs/manual/core/read-preference-mechanics/ diff --git a/doc/examples/tailable.rst b/doc/examples/tailable.rst deleted file mode 100644 index 79458dc2ff..0000000000 --- a/doc/examples/tailable.rst +++ /dev/null @@ -1,42 +0,0 @@ -Tailable Cursors -================ - -By default, MongoDB will automatically close a cursor when the client has -exhausted all results in the cursor. However, for `capped collections -`_ you may -use a `tailable cursor -`_ -that remains open after the client exhausts the results in the initial cursor. - -The following is a basic example of using a tailable cursor to tail the oplog -of a replica set member:: - - import time - - import pymongo - - client = pymongo.MongoClient() - oplog = client.local.oplog.rs - first = oplog.find().sort('$natural', pymongo.ASCENDING).limit(-1).next() - print(first) - ts = first['ts'] - - while True: - # For a regular capped collection CursorType.TAILABLE_AWAIT is the - # only option required to create a tailable cursor. When querying the - # oplog, the oplog_replay option enables an optimization to quickly - # find the 'ts' value we're looking for. The oplog_replay option - # can only be used when querying the oplog. Starting in MongoDB 4.4 - # this option is ignored by the server as queries against the oplog - # are optimized automatically by the MongoDB query engine. - cursor = oplog.find({'ts': {'$gt': ts}}, - cursor_type=pymongo.CursorType.TAILABLE_AWAIT, - oplog_replay=True) - while cursor.alive: - for doc in cursor: - ts = doc['ts'] - print(doc) - # We end up here if the find() returned no documents or if the - # tailable cursor timed out (no new documents were added to the - # collection for more than 1 second). - time.sleep(1) diff --git a/doc/examples/tls.rst b/doc/examples/tls.rst deleted file mode 100644 index 557ee7d9b9..0000000000 --- a/doc/examples/tls.rst +++ /dev/null @@ -1,234 +0,0 @@ -TLS/SSL and PyMongo -=================== - -PyMongo supports connecting to MongoDB over TLS/SSL. This guide covers the -configuration options supported by PyMongo. See `the server documentation -`_ to configure -MongoDB. - -.. warning:: Industry best practices recommend, and some regulations require, - the use of TLS 1.1 or newer. Though no application changes are required for - PyMongo to make use of the newest protocols, some operating systems or - versions may not provide an OpenSSL version new enough to support them. - - Users of macOS older than 10.13 (High Sierra) will need to install Python - from `python.org`_, `homebrew`_, `macports`_, or another similar source. - - Users of Linux or other non-macOS Unix can check their OpenSSL version like - this:: - - $ openssl version - - If the version number is less than 1.0.1 support for TLS 1.1 or newer is not - available. Contact your operating system vendor for a solution or upgrade to - a newer distribution. - - You can check your Python interpreter by installing the `requests`_ module - and executing the following command:: - - python -c "import requests; print(requests.get('https://www.howsmyssl.com/a/check', verify=False).json()['tls_version'])" - - You should see "TLS 1.X" where X is >= 1. - - You can read more about TLS versions and their security implications here: - - ``_ - -.. _python.org: https://www.python.org/downloads/ -.. _homebrew: https://brew.sh/ -.. _macports: https://www.macports.org/ -.. _requests: https://pypi.python.org/pypi/requests - -Basic configuration -................... - -In many cases connecting to MongoDB over TLS/SSL requires nothing more than -passing ``tls=True`` as a keyword argument to -:class:`~pymongo.mongo_client.MongoClient`:: - - >>> client = pymongo.MongoClient('example.com', tls=True) - -Or passing ``tls=true`` in the URI:: - - >>> client = pymongo.MongoClient('mongodb://example.com/?tls=true') - -This configures PyMongo to connect to the server using TLS, verify the server's -certificate and verify that the host you are attempting to connect to is listed -by that certificate. - -Certificate verification policy -............................... - -By default, PyMongo is configured to require a certificate from the server when -TLS is enabled. This is configurable using the ``tlsAllowInvalidCertificates`` -option. To disable this requirement pass ``tlsAllowInvalidCertificates=True`` -as a keyword parameter:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsAllowInvalidCertificates=True) - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsAllowInvalidCertificates=true' - >>> client = pymongo.MongoClient(uri) - -Specifying a CA file -.................... - -In some cases you may want to configure PyMongo to use a specific set of CA -certificates. This is most often the case when you are acting as your own -certificate authority rather than using server certificates signed by a well -known authority. The ``tlsCAFile`` option takes a path to a CA file. It can be -passed as a keyword argument:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCAFile='/path/to/ca.pem') - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsCAFile=/path/to/ca.pem' - >>> client = pymongo.MongoClient(uri) - -Specifying a certificate revocation list -........................................ - -The ``tlsCRLFile`` option takes a path to a CRL file. It can be passed -as a keyword argument:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCRLFile='/path/to/crl.pem') - -Or, in the URI:: - - >>> uri = 'mongodb://example.com/?tls=true&tlsCRLFile=/path/to/crl.pem' - >>> client = pymongo.MongoClient(uri) - -.. note:: Certificate revocation lists and :ref:`OCSP` cannot be used together. - -Client certificates -................... - -PyMongo can be configured to present a client certificate using the -``tlsCertificateKeyFile`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/client.pem') - -If the private key for the client certificate is stored in a separate file, -it should be concatenated with the certificate file. For example, to -concatenate a PEM-formatted certificate file ``cert.pem`` and a PEM-formatted -keyfile ``key.pem`` into a single file ``combined.pem``, on Unix systems, -users can run:: - - $ cat key.pem cert.pem > combined.pem - -PyMongo can be configured with the concatenated certificate keyfile using the -``tlsCertificateKeyFile`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/combined.pem') - -If the private key contained in the certificate keyfile is encrypted, users -can provide a password or passphrase to decrypt the encrypted private keys -using the ``tlsCertificateKeyFilePassword`` option:: - - >>> client = pymongo.MongoClient('example.com', - ... tls=True, - ... tlsCertificateKeyFile='/path/to/combined.pem', - ... tlsCertificateKeyFilePassword=) - -These options can also be passed as part of the MongoDB URI. - -.. _OCSP: - -OCSP -.... - -Starting with PyMongo 3.11, if PyMongo was installed with the "ocsp" extra:: - - python -m pip install pymongo[ocsp] - -certificate revocation checking is enabled by way of `OCSP (Online Certification -Status Protocol) `_. -MongoDB 4.4+ `staples OCSP responses `_ -to the TLS handshake which PyMongo will verify, failing the TLS handshake if -the stapled OCSP response is invalid or indicates that the peer certificate is -revoked. - -When connecting to a server version older than 4.4, or when a 4.4+ version of -MongoDB does not staple an OCSP response, PyMongo will attempt to connect -directly to an OCSP endpoint if the peer certificate specified one. The TLS -handshake will only fail in this case if the response indicates that the -certificate is revoked. Invalid or malformed responses will be ignored, -favoring availability over maximum security. - -.. _TLSErrors: - -Troubleshooting TLS Errors -.......................... - -TLS errors often fall into three categories - certificate verification failure, -protocol version mismatch or certificate revocation checking failure. An error -message similar to the following means that OpenSSL was not able to verify the -server's certificate:: - - [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed - -This often occurs because OpenSSL does not have access to the system's -root certificates or the certificates are out of date. Linux users should -ensure that they have the latest root certificate updates installed from -their Linux vendor. macOS users using Python 3.7 or newer downloaded -from python.org `may have to run a script included with python -`_ to install -root certificates:: - - open "/Applications/Python /Install Certificates.command" - -Users of older PyPy portable versions may have to `set an environment -variable `_ to tell -OpenSSL where to find root certificates. This is easily done using the `certifi -module `_ from pypi:: - - $ pypy -m pip install certifi - $ export SSL_CERT_FILE=$(pypy -c "import certifi; print(certifi.where())") - -An error message similar to the following message means that the OpenSSL -version used by Python does not support a new enough TLS protocol to connect -to the server:: - - [SSL: TLSV1_ALERT_PROTOCOL_VERSION] tlsv1 alert protocol version - -Industry best practices recommend, and some regulations require, that older -TLS protocols be disabled in some MongoDB deployments. Some deployments may -disable TLS 1.0, others may disable TLS 1.0 and TLS 1.1. See the warning -earlier in this document for troubleshooting steps and solutions. - -An error message similar to the following message means that certificate -revocation checking failed:: - - [('SSL routines', 'tls_process_initial_server_flight', 'invalid status response')] - -See :ref:`OCSP` for more details. - -Python 3.10+ incompatibilities with TLS/SSL on MongoDB <= 4.0 -............................................................. - -Note that `changes made to the ssl module in Python 3.10+ -`_ may cause incompatibilities -with MongoDB <= 4.0. The following are some example errors that may occur with this -combination:: - - SSL handshake failed: localhost:27017: [SSL: SSLV3_ALERT_HANDSHAKE_FAILURE] sslv3 alert handshake failure (_ssl.c:997) - SSL handshake failed: localhost:27017: EOF occurred in violation of protocol (_ssl.c:997) - -The MongoDB server logs may show the following error:: - - 2021-06-30T21:22:44.917+0100 E NETWORK [conn16] SSL: error:1408A0C1:SSL routines:ssl3_get_client_hello:no shared cipher - -To resolve this issue, use Python <=3.10, upgrade to MongoDB 4.2+, or install -pymongo with the :ref:`OCSP` extra which relies on PyOpenSSL. diff --git a/doc/examples/type_hints.rst b/doc/examples/type_hints.rst deleted file mode 100644 index 6858e95290..0000000000 --- a/doc/examples/type_hints.rst +++ /dev/null @@ -1,245 +0,0 @@ - -.. _type_hints-example: - -Type Hints -=========== - -As of version 4.1, PyMongo ships with `type hints`_. With type hints, Python -type checkers can easily find bugs before they reveal themselves in your code. - -If your IDE is configured to use type hints, -it can suggest more appropriate completions and highlight errors in your code. -Some examples include `PyCharm`_, `Sublime Text`_, and `Visual Studio Code`_. - -You can also use the `mypy`_ tool from your command line or in Continuous Integration tests. - -All of the public APIs in PyMongo are fully type hinted, and -several of them support generic parameters for the -type of document object returned when decoding BSON documents. - -Due to `limitations in mypy`_, the default -values for generic document types are not yet provided (they will eventually be ``Dict[str, any]``). - -For a larger set of examples that use types, see the PyMongo `test_mypy module`_. - -If you would like to opt out of using the provided types, add the following to -your `mypy config`_: :: - - [mypy-pymongo] - follow_imports = False - - -Basic Usage ------------ - -Note that a type for :class:`~pymongo.mongo_client.MongoClient` must be specified. Here we use the -default, unspecified document type: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client: MongoClient = MongoClient() - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> retrieved = collection.find_one({"x": 1}) - >>> assert isinstance(retrieved, dict) - -For a more accurate typing for document type you can use: - -.. doctest:: - - >>> from typing import Any, Dict - >>> from pymongo import MongoClient - >>> client: MongoClient[Dict[str, Any]] = MongoClient() - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> retrieved = collection.find_one({"x": 1}) - >>> assert isinstance(retrieved, dict) - -Typed Client ------------- - -:class:`~pymongo.mongo_client.MongoClient` is generic on the document type used to decode BSON documents. - -You can specify a :class:`~bson.raw_bson.RawBSONDocument` document type: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> from bson.raw_bson import RawBSONDocument - >>> client = MongoClient(document_class=RawBSONDocument) - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "tags": ["dog", "cat"]}) - >>> result = collection.find_one({"x": 1}) - >>> assert isinstance(result, RawBSONDocument) - -Subclasses of :py:class:`collections.abc.Mapping` can also be used, such as :class:`~bson.son.SON`: - -.. doctest:: - - >>> from bson import SON - >>> from pymongo import MongoClient - >>> client = MongoClient(document_class=SON[str, int]) - >>> collection = client.test.test - >>> inserted = collection.insert_one({"x": 1, "y": 2 }) - >>> result = collection.find_one({"x": 1}) - >>> assert result is not None - >>> assert result["x"] == 1 - -Note that when using :class:`~bson.son.SON`, the key and value types must be given, e.g. ``SON[str, Any]``. - - -Typed Collection ----------------- - -You can use :py:class:`~typing.TypedDict` (Python 3.8+) when using a well-defined schema for the data in a :class:`~pymongo.collection.Collection`: - -.. doctest:: - - >>> from typing import TypedDict - >>> from pymongo import MongoClient - >>> from pymongo.collection import Collection - >>> class Movie(TypedDict): - ... name: str - ... year: int - ... - >>> client: MongoClient = MongoClient() - >>> collection: Collection[Movie] = client.test.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> assert result["year"] == 1993 - -Typed Database --------------- - -While less common, you could specify that the documents in an entire database -match a well-defined shema using :py:class:`~typing.TypedDict` (Python 3.8+). - - -.. doctest:: - - >>> from typing import TypedDict - >>> from pymongo import MongoClient - >>> from pymongo.database import Database - >>> class Movie(TypedDict): - ... name: str - ... year: int - ... - >>> client: MongoClient = MongoClient() - >>> db: Database[Movie] = client.test - >>> collection = db.test - >>> inserted = collection.insert_one({"name": "Jurassic Park", "year": 1993 }) - >>> result = collection.find_one({"name": "Jurassic Park"}) - >>> assert result is not None - >>> assert result["year"] == 1993 - -Typed Command -------------- -When using the :meth:`~pymongo.database.Database.command`, you can specify the document type by providing a custom :class:`~bson.codec_options.CodecOptions`: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> from bson.raw_bson import RawBSONDocument - >>> from bson import CodecOptions - >>> client: MongoClient = MongoClient() - >>> options = CodecOptions(RawBSONDocument) - >>> result = client.admin.command("ping", codec_options=options) - >>> assert isinstance(result, RawBSONDocument) - -Custom :py:class:`collections.abc.Mapping` subclasses and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. -For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. - -Typed BSON Decoding -------------------- -You can specify the document type returned by :mod:`bson` decoding functions by providing :class:`~bson.codec_options.CodecOptions`: - -.. doctest:: - - >>> from typing import Any, Dict - >>> from bson import CodecOptions, encode, decode - >>> class MyDict(Dict[str, Any]): - ... def foo(self): - ... return "bar" - ... - >>> options = CodecOptions(document_class=MyDict) - >>> doc = {"x": 1, "y": 2 } - >>> bsonbytes = encode(doc, codec_options=options) - >>> rt_document = decode(bsonbytes, codec_options=options) - >>> assert rt_document.foo() == "bar" - -:class:`~bson.raw_bson.RawBSONDocument` and :py:class:`~typing.TypedDict` (Python 3.8+) are also supported. -For :py:class:`~typing.TypedDict`, use the form: ``options: CodecOptions[MyTypedDict] = CodecOptions(...)``. - - -Troubleshooting ---------------- - -Client Type Annotation -~~~~~~~~~~~~~~~~~~~~~~ -If you forget to add a type annotation for a :class:`~pymongo.mongo_client.MongoClient` object you may get the followig ``mypy`` error:: - - from pymongo import MongoClient - client = MongoClient() # error: Need type annotation for "client" - -The solution is to annotate the type as ``client: MongoClient`` or ``client: MongoClient[Dict[str, Any]]``. See `Basic Usage`_. - -Incompatible Types -~~~~~~~~~~~~~~~~~~ -If you use the generic form of :class:`~pymongo.mongo_client.MongoClient` you -may encounter a ``mypy`` error like:: - - from pymongo import MongoClient - - client: MongoClient = MongoClient() - client.test.test.insert_many( - {"a": 1} - ) # error: Dict entry 0 has incompatible type "str": "int"; - # expected "Mapping[str, Any]": "int" - - -The solution is to use ``client: MongoClient[Dict[str, Any]]`` as used in -`Basic Usage`_ . - -Actual Type Errors -~~~~~~~~~~~~~~~~~~ - -Other times ``mypy`` will catch an actual error, like the following code:: - - from pymongo import MongoClient - from typing import Mapping - client: MongoClient = MongoClient() - client.test.test.insert_one( - [{}] - ) # error: Argument 1 to "insert_one" of "Collection" has - # incompatible type "List[Dict[, ]]"; - # expected "Mapping[str, Any]" - -In this case the solution is to use ``insert_one({})``, passing a document instead of a list. - -Another example is trying to set a value on a :class:`~bson.raw_bson.RawBSONDocument`, which is read-only.:: - - from bson.raw_bson import RawBSONDocument - from pymongo import MongoClient - - client = MongoClient(document_class=RawBSONDocument) - coll = client.test.test - doc = {"my": "doc"} - coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - assert len(retreived.raw) > 0 - retreived[ - "foo" - ] = "bar" # error: Unsupported target for indexed assignment - # ("RawBSONDocument") [index] - -.. _PyCharm: https://www.jetbrains.com/help/pycharm/type-hinting-in-product.html -.. _Visual Studio Code: https://code.visualstudio.com/docs/languages/python -.. _Sublime Text: https://github.com/sublimelsp/LSP-pyright -.. _type hints: https://docs.python.org/3/library/typing.html -.. _mypy: https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html -.. _limitations in mypy: https://github.com/python/mypy/issues/3737 -.. _mypy config: https://mypy.readthedocs.io/en/stable/config_file.html -.. _test_mypy module: https://github.com/mongodb/mongo-python-driver/blob/master/test/test_mypy.py diff --git a/doc/examples/uuid.rst b/doc/examples/uuid.rst deleted file mode 100644 index 90ec71ebe2..0000000000 --- a/doc/examples/uuid.rst +++ /dev/null @@ -1,512 +0,0 @@ - -.. _handling-uuid-data-example: - -Handling UUID Data -================== - -PyMongo ships with built-in support for dealing with UUID types. -It is straightforward to store native :class:`uuid.UUID` objects -to MongoDB and retrieve them as native :class:`uuid.UUID` objects:: - - from pymongo import MongoClient - from bson.binary import UuidRepresentation - from uuid import uuid4 - - # use the 'standard' representation for cross-language compatibility. - client = MongoClient(uuidRepresentation='standard') - collection = client.get_database('uuid_db').get_collection('uuid_coll') - - # remove all documents from collection - collection.delete_many({}) - - # create a native uuid object - uuid_obj = uuid4() - - # save the native uuid object to MongoDB - collection.insert_one({'uuid': uuid_obj}) - - # retrieve the stored uuid object from MongoDB - document = collection.find_one({}) - - # check that the retrieved UUID matches the inserted UUID - assert document['uuid'] == uuid_obj - -Native :class:`uuid.UUID` objects can also be used as part of MongoDB -queries:: - - document = collection.find({'uuid': uuid_obj}) - assert document['uuid'] == uuid_obj - -The above examples illustrate the simplest of use-cases - one where the -UUID is generated by, and used in the same application. However, -the situation can be significantly more complex when dealing with a MongoDB -deployment that contains UUIDs created by other drivers as the Java and CSharp -drivers have historically encoded UUIDs using a byte-order that is different -from the one used by PyMongo. Applications that require interoperability across -these drivers must specify the appropriate -:class:`~bson.binary.UuidRepresentation`. - -In the following sections, we describe how drivers have historically differed -in their encoding of UUIDs, and how applications can use the -:class:`~bson.binary.UuidRepresentation` configuration option to maintain -cross-language compatibility. - -.. attention:: New applications that do not share a MongoDB deployment with - any other application and that have never stored UUIDs in MongoDB - should use the ``standard`` UUID representation for cross-language - compatibility. See :ref:`configuring-uuid-representation` for details - on how to configure the :class:`~bson.binary.UuidRepresentation`. - -.. _example-legacy-uuid: - -Legacy Handling of UUID Data ----------------------------- - -Historically, MongoDB Drivers have used different byte-ordering -while serializing UUID types to :class:`~bson.binary.Binary`. -Consider, for instance, a UUID with the following canonical textual -representation:: - - 00112233-4455-6677-8899-aabbccddeeff - -This UUID would historically be serialized by the Python driver as:: - - 00112233-4455-6677-8899-aabbccddeeff - -The same UUID would historically be serialized by the C# driver as:: - - 33221100-5544-7766-8899-aabbccddeeff - -Finally, the same UUID would historically be serialized by the Java driver as:: - - 77665544-3322-1100-ffee-ddccbbaa9988 - -.. note:: For in-depth information about the the byte-order historically - used by different drivers, see the `Handling of Native UUID Types - Specification - `_. - -This difference in the byte-order of UUIDs encoded by different drivers can -result in highly unintuitive behavior in some scenarios. We detail two such -scenarios in the next sections. - -Scenario 1: Applications Share a MongoDB Deployment -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Consider the following situation: - -* Application ``C`` written in C# generates a UUID and uses it as the ``_id`` - of a document that it proceeds to insert into the ``uuid_test`` collection of - the ``example_db`` database. Let's assume that the canonical textual - representation of the generated UUID is:: - - 00112233-4455-6677-8899-aabbccddeeff - -* Application ``P`` written in Python attempts to ``find`` the document - written by application ``C`` in the following manner:: - - from uuid import UUID - collection = client.example_db.uuid_test - result = collection.find_one({'_id': UUID('00112233-4455-6677-8899-aabbccddeeff')}) - - In this instance, ``result`` will never be the document that - was inserted by application ``C`` in the previous step. This is because of - the different byte-order used by the C# driver for representing UUIDs as - BSON Binary. The following query, on the other hand, will successfully find - this document:: - - result = collection.find_one({'_id': UUID('33221100-5544-7766-8899-aabbccddeeff')}) - -This example demonstrates how the differing byte-order used by different -drivers can hamper interoperability. To workaround this problem, users should -configure their ``MongoClient`` with the appropriate -:class:`~bson.binary.UuidRepresentation` (in this case, ``client`` in application -``P`` can be configured to use the -:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation to -avoid the unintuitive behavior) as described in -:ref:`configuring-uuid-representation`. - -Scenario 2: Round-Tripping UUIDs -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -In the following examples, we see how using a misconfigured -:class:`~bson.binary.UuidRepresentation` can cause an application -to inadvertently change the :class:`~bson.binary.Binary` subtype, and in some -cases, the bytes of the :class:`~bson.binary.Binary` field itself when -round-tripping documents containing UUIDs. - -Consider the following situation:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.PYTHON_LEGACY stores a Binary subtype-3 UUID - python_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=python_opts) - collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)})['_id'] == 'foo' - - # Retrieving this document using UuidRepresentation.STANDARD returns a Binary instance - std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - std_collection = client.testdb.get_collection('test', codec_options=std_opts) - doc = std_collection.find_one({'_id': 'foo'}) - assert isinstance(doc['uuid'], Binary) - - # Round-tripping the retrieved document yields the exact same document - std_collection.replace_one({'_id': 'foo'}, doc) - round_tripped_doc = collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) - assert doc == round_tripped_doc - - -In this example, round-tripping the document using the incorrect -:class:`~bson.binary.UuidRepresentation` (``STANDARD`` instead of -``PYTHON_LEGACY``) changes the :class:`~bson.binary.Binary` subtype as a -side-effect. **Note that this can also happen when the situation is reversed - -i.e. when the original document is written using ``STANDARD`` representation -and then round-tripped using the ``PYTHON_LEGACY`` representation.** - -In the next example, we see the consequences of incorrectly using a -representation that modifies byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) -when round-tripping documents:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.STANDARD stores a Binary subtype-4 UUID - std_opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=std_opts) - collection.insert_one({'_id': 'baz', 'uuid': input_uuid}) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)})['_id'] == 'baz' - - # Retrieving this document using UuidRepresentation.JAVA_LEGACY returns a native UUID - # without modifying the UUID byte-order - java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - java_collection = client.testdb.get_collection('test', codec_options=java_opts) - doc = java_collection.find_one({'_id': 'baz'}) - assert doc['uuid'] == input_uuid - - # Round-tripping the retrieved document silently changes the Binary bytes and subtype - java_collection.replace_one({'_id': 'baz'}, doc) - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 3)}) is None - assert collection.find_one({'uuid': Binary(input_uuid.bytes, 4)}) is None - round_tripped_doc = collection.find_one({'_id': 'baz'}) - assert round_tripped_doc['uuid'] == Binary(input_uuid.bytes, 3).as_uuid(UuidRepresentation.JAVA_LEGACY) - - -In this case, using the incorrect :class:`~bson.binary.UuidRepresentation` -(``JAVA_LEGACY`` instead of ``STANDARD``) changes the -:class:`~bson.binary.Binary` bytes and subtype as a side-effect. -**Note that this happens when any representation that -manipulates byte-order (``CSHARP_LEGACY`` or ``JAVA_LEGACY``) is incorrectly -used to round-trip UUIDs written with ``STANDARD``. When the situation is -reversed - i.e. when the original document is written using ``CSHARP_LEGACY`` -or ``JAVA_LEGACY`` and then round-tripped using ``STANDARD`` - -only the :class:`~bson.binary.Binary` subtype is changed.** - -.. note:: Starting in PyMongo 4.0, these issue will be resolved as - the ``STANDARD`` representation will decode Binary subtype 3 fields as - :class:`~bson.binary.Binary` objects of subtype 3 (instead of - :class:`uuid.UUID`), and each of the ``LEGACY_*`` representations will - decode Binary subtype 4 fields to :class:`~bson.binary.Binary` objects of - subtype 4 (instead of :class:`uuid.UUID`). - -.. _configuring-uuid-representation: - -Configuring a UUID Representation ---------------------------------- - -Users can workaround the problems described above by configuring their -applications with the appropriate :class:`~bson.binary.UuidRepresentation`. -Configuring the representation modifies PyMongo's behavior while -encoding :class:`uuid.UUID` objects to BSON and decoding -Binary subtype 3 and 4 fields from BSON. - -Applications can set the UUID representation in one of the following ways: - -#. At the ``MongoClient`` level using the ``uuidRepresentation`` URI option, - e.g.:: - - client = MongoClient("mongodb://a:27107/?uuidRepresentation=standard") - - Valid values are: - - .. list-table:: - :header-rows: 1 - - * - Value - - UUID Representation - - * - ``unspecified`` - - :ref:`unspecified-representation-details` - - * - ``standard`` - - :ref:`standard-representation-details` - - * - ``pythonLegacy`` - - :ref:`python-legacy-representation-details` - - * - ``javaLegacy`` - - :ref:`java-legacy-representation-details` - - * - ``csharpLegacy`` - - :ref:`csharp-legacy-representation-details` - -#. At the ``MongoClient`` level using the ``uuidRepresentation`` kwarg - option, e.g.:: - - from bson.binary import UuidRepresentation - client = MongoClient(uuidRepresentation=UuidRepresentation.STANDARD) - -#. At the ``Database`` or ``Collection`` level by supplying a suitable - :class:`~bson.codec_options.CodecOptions` instance, e.g.:: - - from bson.codec_options import CodecOptions - csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) - java_opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - - # Get database/collection from client with csharpLegacy UUID representation - csharp_database = client.get_database('csharp_db', codec_options=csharp_opts) - csharp_collection = client.testdb.get_collection('csharp_coll', codec_options=csharp_opts) - - # Get database/collection from existing database/collection with javaLegacy UUID representation - java_database = csharp_database.with_options(codec_options=java_opts) - java_collection = csharp_collection.with_options(codec_options=java_opts) - -Supported UUID Representations ------------------------------- - -.. list-table:: - :header-rows: 1 - - * - UUID Representation - - Default? - - Encode :class:`uuid.UUID` to - - Decode :class:`~bson.binary.Binary` subtype 4 to - - Decode :class:`~bson.binary.Binary` subtype 3 to - - * - :ref:`standard-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - :class:`~bson.binary.Binary` subtype 3 - - * - :ref:`unspecified-representation-details` - - Yes, in PyMongo>=4 - - Raise :exc:`ValueError` - - :class:`~bson.binary.Binary` subtype 4 - - :class:`~bson.binary.Binary` subtype 3 - - * - :ref:`python-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with standard byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - * - :ref:`java-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with Java legacy byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - - * - :ref:`csharp-legacy-representation-details` - - No - - :class:`~bson.binary.Binary` subtype 3 with C# legacy byte-order - - :class:`~bson.binary.Binary` subtype 4 - - :class:`uuid.UUID` - -We now detail the behavior and use-case for each supported UUID -representation. - -.. _unspecified-representation-details: - -``UNSPECIFIED`` -^^^^^^^^^^^^^^^ - -.. attention:: Starting in PyMongo 4.0, - :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` is the default - UUID representation used by PyMongo. - -The :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` representation -prevents the incorrect interpretation of UUID bytes by stopping short of -automatically converting UUID fields in BSON to native UUID types. Decoding -a UUID when using this representation returns a :class:`~bson.binary.Binary` -object instead. If required, users can coerce the decoded -:class:`~bson.binary.Binary` objects into native UUIDs using the -:meth:`~bson.binary.Binary.as_uuid` method and specifying the appropriate -representation format. The following example shows -what this might look like for a UUID stored by the C# driver:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - from uuid import uuid4 - - # Using UuidRepresentation.CSHARP_LEGACY - csharp_opts = CodecOptions(uuid_representation=UuidRepresentation.CSHARP_LEGACY) - - # Store a legacy C#-formatted UUID - input_uuid = uuid4() - collection = client.testdb.get_collection('test', codec_options=csharp_opts) - collection.insert_one({'_id': 'foo', 'uuid': input_uuid}) - - # Using UuidRepresentation.UNSPECIFIED - unspec_opts = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) - unspec_collection = client.testdb.get_collection('test', codec_options=unspec_opts) - - # UUID fields are decoded as Binary when UuidRepresentation.UNSPECIFIED is configured - document = unspec_collection.find_one({'_id': 'foo'}) - decoded_field = document['uuid'] - assert isinstance(decoded_field, Binary) - - # Binary.as_uuid() can be used to coerce the decoded value to a native UUID - decoded_uuid = decoded_field.as_uuid(UuidRepresentation.CSHARP_LEGACY) - assert decoded_uuid == input_uuid - -Native :class:`uuid.UUID` objects cannot directly be encoded to -:class:`~bson.binary.Binary` when the UUID representation is ``UNSPECIFIED`` -and attempting to do so will result in an exception:: - - unspec_collection.insert_one({'_id': 'bar', 'uuid': uuid4()}) - Traceback (most recent call last): - ... - ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information. - -Instead, applications using :data:`~bson.binary.UuidRepresentation.UNSPECIFIED` -must explicitly coerce a native UUID using the -:meth:`~bson.binary.Binary.from_uuid` method:: - - explicit_binary = Binary.from_uuid(uuid4(), UuidRepresentation.STANDARD) - unspec_collection.insert_one({'_id': 'bar', 'uuid': explicit_binary}) - -.. _standard-representation-details: - -``STANDARD`` -^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used by new applications or - applications that are encoding and/or decoding UUIDs in MongoDB for the - first time. - -The :data:`~bson.binary.UuidRepresentation.STANDARD` representation -enables cross-language compatibility by ensuring the same byte-ordering -when encoding UUIDs from all drivers. UUIDs written by a driver with this -representation configured will be handled correctly by every other provided -it is also configured with the ``STANDARD`` representation. - -``STANDARD`` encodes native :class:`uuid.UUID` objects to -:class:`~bson.binary.Binary` subtype 4 objects. - -.. _python-legacy-representation-details: - -``PYTHON_LEGACY`` -^^^^^^^^^^^^^^^^^ - -.. attention:: This uuid representation should be used when reading UUIDs - generated by existing applications that use the Python driver - but **don't** explicitly set a UUID representation. - -.. attention:: :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` - was the default uuid representation in PyMongo 3. - -The :data:`~bson.binary.UuidRepresentation.PYTHON_LEGACY` representation -corresponds to the legacy representation of UUIDs used by PyMongo. This -representation conforms with -`RFC 4122 Section 4.1.2 `_. - -The following example illustrates the use of this representation:: - - from bson.codec_options import CodecOptions, DEFAULT_CODEC_OPTIONS - from bson.binary import Binary, UuidRepresentation - - # No configured UUID representation - collection = client.python_legacy.get_collection('test', codec_options=DEFAULT_CODEC_OPTIONS) - - # Using UuidRepresentation.PYTHON_LEGACY - pylegacy_opts = CodecOptions(uuid_representation=UuidRepresentation.PYTHON_LEGACY) - pylegacy_collection = client.python_legacy.get_collection('test', codec_options=pylegacy_opts) - - # UUIDs written by PyMongo 3 with no UuidRepresentation configured - # (or PyMongo 4.0 with PYTHON_LEGACY) can be queried using PYTHON_LEGACY - uuid_1 = uuid4() - pylegacy_collection.insert_one({'uuid': uuid_1}) - document = pylegacy_collection.find_one({'uuid': uuid_1}) - -``PYTHON_LEGACY`` encodes native :class:`uuid.UUID` objects to -:class:`~bson.binary.Binary` subtype 3 objects, preserving the same -byte-order as :attr:`~uuid.UUID.bytes`:: - - from bson.binary import Binary - - document = collection.find_one({'uuid': Binary(uuid_2.bytes, subtype=3)}) - assert document['uuid'] == uuid_2 - -.. _java-legacy-representation-details: - -``JAVA_LEGACY`` -^^^^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used when reading UUIDs - written to MongoDB by the legacy applications (i.e. applications that don't - use the ``STANDARD`` representation) using the Java driver. - -The :data:`~bson.binary.UuidRepresentation.JAVA_LEGACY` representation -corresponds to the legacy representation of UUIDs used by the MongoDB Java -Driver. - -.. note:: The ``JAVA_LEGACY`` representation reverses the order of bytes 0-7, - and bytes 8-15. - -As an example, consider the same UUID described in :ref:`example-legacy-uuid`. -Let us assume that an application used the Java driver without an explicitly -specified UUID representation to insert the example UUID -``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using ``PYTHON_LEGACY``, we end up with an entirely different UUID:: - - UUID('77665544-3322-1100-ffee-ddccbbaa9988') - -However, if we explicitly set the representation to -:data:`~bson.binary.UuidRepresentation.JAVA_LEGACY`, we get the correct result:: - - UUID('00112233-4455-6677-8899-aabbccddeeff') - -PyMongo uses the specified UUID representation to reorder the BSON bytes and -load them correctly. ``JAVA_LEGACY`` encodes native :class:`uuid.UUID` objects -to :class:`~bson.binary.Binary` subtype 3 objects, while performing the same -byte-reordering as the legacy Java driver's UUID to BSON encoder. - -.. _csharp-legacy-representation-details: - -``CSHARP_LEGACY`` -^^^^^^^^^^^^^^^^^ - -.. attention:: This UUID representation should be used when reading UUIDs - written to MongoDB by the legacy applications (i.e. applications that don't - use the ``STANDARD`` representation) using the C# driver. - -The :data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY` representation -corresponds to the legacy representation of UUIDs used by the MongoDB Java -Driver. - -.. note:: The ``CSHARP_LEGACY`` representation reverses the order of bytes 0-3, - bytes 4-5, and bytes 6-7. - -As an example, consider the same UUID described in :ref:`example-legacy-uuid`. -Let us assume that an application used the C# driver without an explicitly -specified UUID representation to insert the example UUID -``00112233-4455-6677-8899-aabbccddeeff`` into MongoDB. If we try to read this -value using PYTHON_LEGACY, we end up with an entirely different UUID:: - - UUID('33221100-5544-7766-8899-aabbccddeeff') - -However, if we explicitly set the representation to -:data:`~bson.binary.UuidRepresentation.CSHARP_LEGACY`, we get the correct result:: - - UUID('00112233-4455-6677-8899-aabbccddeeff') - -PyMongo uses the specified UUID representation to reorder the BSON bytes and -load them correctly. ``CSHARP_LEGACY`` encodes native :class:`uuid.UUID` -objects to :class:`~bson.binary.Binary` subtype 3 objects, while performing -the same byte-reordering as the legacy C# driver's UUID to BSON encoder. diff --git a/doc/faq.rst b/doc/faq.rst deleted file mode 100644 index ca83f5de4c..0000000000 --- a/doc/faq.rst +++ /dev/null @@ -1,544 +0,0 @@ -Frequently Asked Questions -========================== - -.. contents:: - -Is PyMongo thread-safe? ------------------------ - -PyMongo is thread-safe and provides built-in connection pooling -for threaded applications. - -.. _pymongo-fork-safe: - -Is PyMongo fork-safe? ---------------------- - -PyMongo is not fork-safe. Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to -a child process. Instead, the parent process and each child process must -create their own instances of MongoClient. Instances of MongoClient copied from -the parent process have a high probability of deadlock in the child process due -to the inherent incompatibilities between ``fork()``, threads, and locks -described :ref:`below `. PyMongo will attempt to -issue a warning if there is a chance of this deadlock occurring. - -.. _pymongo-fork-safe-details: - -MongoClient spawns multiple threads to run background tasks such as monitoring -connected servers. These threads share state that is protected by instances of -:class:`~threading.Lock`, which are themselves `not fork-safe`_. The -driver is therefore subject to the same limitations as any other multithreaded -code that uses :class:`~threading.Lock` (and mutexes in general). One of these -limitations is that the locks become useless after ``fork()``. During the fork, -all locks are copied over to the child process in the same state as they were -in the parent: if they were locked, the copied locks are also locked. The child -created by ``fork()`` only has one thread, so any locks that were taken out by -other threads in the parent will never be released in the child. The next time -the child process attempts to acquire one of these locks, deadlock occurs. - -For a long but interesting read about the problems of Python locks in -multithreaded contexts with ``fork()``, see http://bugs.python.org/issue6721. - -.. _not fork-safe: http://bugs.python.org/issue6721 - -.. _connection-pooling: - -Can PyMongo help me load the results of my query as a Pandas ``DataFrame``? ---------------------------------------------------------------------------- - -While PyMongo itself does not provide any APIs for working with -numerical or columnar data, -`PyMongoArrow `_ -is a companion library to PyMongo that makes it easy to load MongoDB query result sets as -`Pandas DataFrames `_, -`NumPy ndarrays `_, or -`Apache Arrow Tables `_. - -How does connection pooling work in PyMongo? --------------------------------------------- - -Every :class:`~pymongo.mongo_client.MongoClient` instance has a built-in -connection pool per server in your MongoDB topology. These pools open sockets -on demand to support the number of concurrent MongoDB operations that your -multi-threaded application requires. There is no thread-affinity for sockets. - -The size of each connection pool is capped at ``maxPoolSize``, which defaults -to 100. If there are ``maxPoolSize`` connections to a server and all are in -use, the next request to that server will wait until one of the connections -becomes available. - -The client instance opens two additional sockets per server in your MongoDB -topology for monitoring the server's state. - -For example, a client connected to a 3-node replica set opens 6 monitoring -sockets. It also opens as many sockets as needed to support a multi-threaded -application's concurrent operations on each server, up to ``maxPoolSize``. With -a ``maxPoolSize`` of 100, if the application only uses the primary (the -default), then only the primary connection pool grows and the total connections -is at most 106. If the application uses a -:class:`~pymongo.read_preferences.ReadPreference` to query the secondaries, -their pools also grow and the total connections can reach 306. - -Additionally, the pools are rate limited such that each connection pool can -only create at most 2 connections in parallel at any time. The connection -creation covers covers all the work required to setup a new connection -including DNS, TCP, SSL/TLS, MongoDB handshake, and MongoDB authentication. -For example, if three threads concurrently attempt to check out a connection -from an empty pool, the first two threads will begin creating new connections -while the third thread will wait. The third thread stops waiting when either: - -- one of the first two threads finishes creating a connection, or -- an existing connection is checked back into the pool. - -Rate limiting concurrent connection creation reduces the likelihood of -connection storms and improves the driver's ability to reuse existing -connections. - -It is possible to set the minimum number of concurrent connections to each -server with ``minPoolSize``, which defaults to 0. The connection pool will be -initialized with this number of sockets. If sockets are closed due to any -network errors, causing the total number of sockets (both in use and idle) to -drop below the minimum, more sockets are opened until the minimum is reached. - -The maximum number of milliseconds that a connection can remain idle in the -pool before being removed and replaced can be set with ``maxIdleTimeMS``, which -defaults to `None` (no limit). - -The default configuration for a :class:`~pymongo.mongo_client.MongoClient` -works for most applications:: - - client = MongoClient(host, port) - -Create this client **once** for each process, and reuse it for all -operations. It is a common mistake to create a new client for each request, -which is very inefficient. - -To support extremely high numbers of concurrent MongoDB operations within one -process, increase ``maxPoolSize``:: - - client = MongoClient(host, port, maxPoolSize=200) - -... or make it unbounded:: - - client = MongoClient(host, port, maxPoolSize=None) - -Once the pool reaches its maximum size, additional threads have to wait for -sockets to become available. PyMongo does not limit the number of threads -that can wait for sockets to become available and it is the application's -responsibility to limit the size of its thread pool to bound queuing during a -load spike. Threads are allowed to wait for any length of time unless -``waitQueueTimeoutMS`` is defined:: - - client = MongoClient(host, port, waitQueueTimeoutMS=100) - -A thread that waits more than 100ms (in this example) for a socket raises -:exc:`~pymongo.errors.ConnectionFailure`. Use this option if it is more -important to bound the duration of operations during a load spike than it is to -complete every operation. - -When :meth:`~pymongo.mongo_client.MongoClient.close` is called by any thread, -all idle sockets are closed, and all sockets that are in use will be closed as -they are returned to the pool. - -Does PyMongo support Python 3? ------------------------------- - -PyMongo supports CPython 3.7+ and PyPy3.7+. See the :doc:`python3` for details. - -Does PyMongo support asynchronous frameworks like Gevent, asyncio, Tornado, or Twisted? ---------------------------------------------------------------------------------------- - -PyMongo fully supports :doc:`Gevent `. - -To use MongoDB with `asyncio `_ -or `Tornado `_, see the -`Motor `_ project. - -For `Twisted `_, see `TxMongo -`_. Its stated mission is to keep feature -parity with PyMongo. - -.. _writes-and-ids: - -Why does PyMongo add an _id field to all of my documents? ---------------------------------------------------------- - -When a document is inserted to MongoDB using -:meth:`~pymongo.collection.Collection.insert_one`, -:meth:`~pymongo.collection.Collection.insert_many`, or -:meth:`~pymongo.collection.Collection.bulk_write`, and that document does not -include an ``_id`` field, PyMongo automatically adds one for you, set to an -instance of :class:`~bson.objectid.ObjectId`. For example:: - - >>> my_doc = {'x': 1} - >>> collection.insert_one(my_doc) - - >>> my_doc - {'x': 1, '_id': ObjectId('560db337fba522189f171720')} - -Users often discover this behavior when calling -:meth:`~pymongo.collection.Collection.insert_many` with a list of references -to a single document raises :exc:`~pymongo.errors.BulkWriteError`. Several -Python idioms lead to this pitfall:: - - >>> doc = {} - >>> collection.insert_many(doc for _ in range(10)) - Traceback (most recent call last): - ... - pymongo.errors.BulkWriteError: batch op errors occurred - >>> doc - {'_id': ObjectId('560f171cfba52279f0b0da0c')} - - >>> docs = [{}] - >>> collection.insert_many(docs * 10) - Traceback (most recent call last): - ... - pymongo.errors.BulkWriteError: batch op errors occurred - >>> docs - [{'_id': ObjectId('560f1933fba52279f0b0da0e')}] - -PyMongo adds an ``_id`` field in this manner for a few reasons: - -- All MongoDB documents are required to have an ``_id`` field. -- If PyMongo were to insert a document without an ``_id`` MongoDB would add one - itself, but it would not report the value back to PyMongo. -- Copying the document to insert before adding the ``_id`` field would be - prohibitively expensive for most high write volume applications. - -If you don't want PyMongo to add an ``_id`` to your documents, insert only -documents that already have an ``_id`` field, added by your application. - -Key order in subdocuments -- why does my query work in the shell but not PyMongo? ---------------------------------------------------------------------------------- - -.. - Note: We should rework this section now that Python 3.6+ has ordered dict. - -.. testsetup:: key-order - - from bson.son import SON - from pymongo.mongo_client import MongoClient - - collection = MongoClient().test.collection - collection.drop() - collection.insert_one({'_id': 1.0, - 'subdocument': SON([('b', 1.0), ('a', 1.0)])}) - -The key-value pairs in a BSON document can have any order (except that ``_id`` -is always first). The mongo shell preserves key order when reading and writing -data. Observe that "b" comes before "a" when we create the document and when it -is displayed: - -.. code-block:: javascript - - > // mongo shell. - > db.collection.insertOne( { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } ) - WriteResult({ "nInserted" : 1 }) - > db.collection.findOne() - { "_id" : 1, "subdocument" : { "b" : 1, "a" : 1 } } - -PyMongo represents BSON documents as Python dicts by default, and the order -of keys in dicts is not defined. That is, a dict declared with the "a" key -first is the same, to Python, as one with "b" first: - - >>> print({'a': 1.0, 'b': 1.0}) - {'a': 1.0, 'b': 1.0} - >>> print({'b': 1.0, 'a': 1.0}) - {'a': 1.0, 'b': 1.0} - -Therefore, Python dicts are not guaranteed to show keys in the order they are -stored in BSON. Here, "a" is shown before "b": - - >>> print(collection.find_one()) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -To preserve order when reading BSON, use the :class:`~bson.son.SON` class, -which is a dict that remembers its key order. First, get a handle to the -collection, configured to use :class:`~bson.son.SON` instead of dict: - -.. doctest:: key-order - :options: +NORMALIZE_WHITESPACE - - >>> from bson import CodecOptions, SON - >>> opts = CodecOptions(document_class=SON) - >>> opts - CodecOptions(document_class=...SON..., tz_aware=False, uuid_representation=UuidRepresentation.UNSPECIFIED, unicode_decode_error_handler='strict', tzinfo=None, type_registry=TypeRegistry(type_codecs=[], fallback_encoder=None)) - >>> collection_son = collection.with_options(codec_options=opts) - -Now, documents and subdocuments in query results are represented with -:class:`~bson.son.SON` objects: - -.. doctest:: key-order - - >>> print(collection_son.find_one()) - SON([('_id', 1.0), ('subdocument', SON([('b', 1.0), ('a', 1.0)]))]) - -The subdocument's actual storage layout is now visible: "b" is before "a". - -Because a dict's key order is not defined, you cannot predict how it will be -serialized **to** BSON. But MongoDB considers subdocuments equal only if their -keys have the same order. So if you use a dict to query on a subdocument it may -not match: - - >>> collection.find_one({'subdocument': {'a': 1.0, 'b': 1.0}}) is None - True - -Swapping the key order in your query makes no difference: - - >>> collection.find_one({'subdocument': {'b': 1.0, 'a': 1.0}}) is None - True - -... because, as we saw above, Python considers the two dicts the same. - -There are two solutions. First, you can match the subdocument field-by-field: - - >>> collection.find_one({'subdocument.a': 1.0, - ... 'subdocument.b': 1.0}) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -The query matches any subdocument with an "a" of 1.0 and a "b" of 1.0, -regardless of the order you specify them in Python or the order they are stored -in BSON. Additionally, this query now matches subdocuments with additional -keys besides "a" and "b", whereas the previous query required an exact match. - -The second solution is to use a :class:`~bson.son.SON` to specify the key order: - - >>> query = {'subdocument': SON([('b', 1.0), ('a', 1.0)])} - >>> collection.find_one(query) - {'_id': 1.0, 'subdocument': {'a': 1.0, 'b': 1.0}} - -The key order you use when you create a :class:`~bson.son.SON` is preserved -when it is serialized to BSON and used as a query. Thus you can create a -subdocument that exactly matches the subdocument in the collection. - -.. seealso:: `MongoDB Manual entry on subdocument matching - `_. - -What does *CursorNotFound* cursor id not valid at server mean? --------------------------------------------------------------- -Cursors in MongoDB can timeout on the server if they've been open for -a long time without any operations being performed on them. This can -lead to an :class:`~pymongo.errors.CursorNotFound` exception being -raised when attempting to iterate the cursor. - -How do I change the timeout value for cursors? ----------------------------------------------- -MongoDB doesn't support custom timeouts for cursors, but cursor -timeouts can be turned off entirely. Pass ``no_cursor_timeout=True`` to -:meth:`~pymongo.collection.Collection.find`. - -How can I store :mod:`decimal.Decimal` instances? -------------------------------------------------- - -PyMongo >= 3.4 supports the Decimal128 BSON type introduced in MongoDB 3.4. -See :mod:`~bson.decimal128` for more information. - -MongoDB <= 3.2 only supports IEEE 754 floating points - the same as the -Python float type. The only way PyMongo could store Decimal instances to -these versions of MongoDB would be to convert them to this standard, so -you'd really only be storing floats anyway - we force users to do this -conversion explicitly so that they are aware that it is happening. - -I'm saving ``9.99`` but when I query my document contains ``9.9900000000000002`` - what's going on here? --------------------------------------------------------------------------------------------------------- -The database representation is ``9.99`` as an IEEE floating point (which -is common to MongoDB and Python as well as most other modern -languages). The problem is that ``9.99`` cannot be represented exactly -with a double precision floating point - this is true in some versions of -Python as well: - - >>> 9.99 - 9.9900000000000002 - -The result that you get when you save ``9.99`` with PyMongo is exactly the -same as the result you'd get saving it with the JavaScript shell or -any of the other languages (and as the data you're working with when -you type ``9.99`` into a Python program). - -Can you add attribute style access for documents? -------------------------------------------------- -This request has come up a number of times but we've decided not to -implement anything like this. The relevant `jira case -`_ has some information -about the decision, but here is a brief summary: - -1. This will pollute the attribute namespace for documents, so could - lead to subtle bugs / confusing errors when using a key with the - same name as a dictionary method. - -2. The only reason we even use SON objects instead of regular - dictionaries is to maintain key ordering, since the server - requires this for certain operations. So we're hesitant to - needlessly complicate SON (at some point it's hypothetically - possible we might want to revert back to using dictionaries alone, - without breaking backwards compatibility for everyone). - -3. It's easy (and Pythonic) for new users to deal with documents, - since they behave just like dictionaries. If we start changing - their behavior it adds a barrier to entry for new users - another - class to learn. - -What is the correct way to handle time zones with PyMongo? ----------------------------------------------------------- - -See :doc:`examples/datetimes` for examples on how to handle -:class:`~datetime.datetime` objects correctly. - -How can I save a :mod:`datetime.date` instance? ------------------------------------------------ -PyMongo doesn't support saving :mod:`datetime.date` instances, since -there is no BSON type for dates without times. Rather than having the -driver enforce a convention for converting :mod:`datetime.date` -instances to :mod:`datetime.datetime` instances for you, any -conversion should be performed in your client code. - -.. _web-application-querying-by-objectid: - -When I query for a document by ObjectId in my web application I get no result ------------------------------------------------------------------------------ -It's common in web applications to encode documents' ObjectIds in URLs, like:: - - "/posts/50b3bda58a02fb9a84d8991e" - -Your web framework will pass the ObjectId portion of the URL to your request -handler as a string, so it must be converted to :class:`~bson.objectid.ObjectId` -before it is passed to :meth:`~pymongo.collection.Collection.find_one`. It is a -common mistake to forget to do this conversion. Here's how to do it correctly -in Flask_ (other web frameworks are similar):: - - from pymongo import MongoClient - from bson.objectid import ObjectId - - from flask import Flask, render_template - - client = MongoClient() - app = Flask(__name__) - - @app.route("/posts/<_id>") - def show_post(_id): - # NOTE!: converting _id from string to ObjectId before passing to find_one - post = client.db.posts.find_one({'_id': ObjectId(_id)}) - return render_template('post.html', post=post) - - if __name__ == "__main__": - app.run() - -.. _Flask: http://flask.pocoo.org/ - -.. seealso:: :ref:`querying-by-objectid` - -How can I use PyMongo from Django? ----------------------------------- -`Django `_ is a popular Python web -framework. Django includes an ORM, :mod:`django.db`. Currently, -there's no official MongoDB backend for Django. - -`django-mongodb-engine `_ -is an unofficial MongoDB backend that supports Django aggregations, (atomic) -updates, embedded objects, Map/Reduce and GridFS. It allows you to use most -of Django's built-in features, including the ORM, admin, authentication, site -and session frameworks and caching. - -However, it's easy to use MongoDB (and PyMongo) from Django -without using a Django backend. Certain features of Django that require -:mod:`django.db` (admin, authentication and sessions) will not work -using just MongoDB, but most of what Django provides can still be -used. - -One project which should make working with MongoDB and Django easier -is `mango `_. Mango is a set of -MongoDB backends for Django sessions and authentication (bypassing -:mod:`django.db` entirely). - -.. _using-with-mod-wsgi: - -Does PyMongo work with **mod_wsgi**? ------------------------------------- -Yes. See the configuration guide for :ref:`pymongo-and-mod_wsgi`. - -Does PyMongo work with PythonAnywhere? --------------------------------------- -No. PyMongo creates Python threads which -`PythonAnywhere `_ does not support. For more -information see `PYTHON-1495 `_. - -How can I use something like Python's ``json`` module to encode my documents to JSON? -------------------------------------------------------------------------------------- -:mod:`~bson.json_util` is PyMongo's built in, flexible tool for using -Python's :mod:`json` module with BSON documents and `MongoDB Extended JSON -`_. The -:mod:`json` module won't work out of the box with all documents from PyMongo -as PyMongo supports some special types (like :class:`~bson.objectid.ObjectId` -and :class:`~bson.dbref.DBRef`) that are not supported in JSON. - -`python-bsonjs `_ is a fast -BSON to MongoDB Extended JSON converter built on top of -`libbson `_. `python-bsonjs` does not -depend on PyMongo and can offer a nice performance improvement over -:mod:`~bson.json_util`. `python-bsonjs` works best with PyMongo when using -:class:`~bson.raw_bson.RawBSONDocument`. - -Why do I get OverflowError decoding dates stored by another language's driver? ------------------------------------------------------------------------------- -PyMongo decodes BSON datetime values to instances of Python's -:class:`datetime.datetime`. Instances of :class:`datetime.datetime` are -limited to years between :data:`datetime.MINYEAR` (usually 1) and -:data:`datetime.MAXYEAR` (usually 9999). Some MongoDB drivers (e.g. the PHP -driver) can store BSON datetimes with year values far outside those supported -by :class:`datetime.datetime`. - -There are a few ways to work around this issue. One option is to filter -out documents with values outside of the range supported by -:class:`datetime.datetime`:: - - >>> from datetime import datetime - >>> coll = client.test.dates - >>> cur = coll.find({'dt': {'$gte': datetime.min, '$lte': datetime.max}}) - -Another option, assuming you don't need the datetime field, is to filter out -just that field:: - - >>> cur = coll.find({}, projection={'dt': False}) - -.. _multiprocessing: - -Using PyMongo with Multiprocessing ----------------------------------- - -On Unix systems the multiprocessing module spawns processes using ``fork()``. -Care must be taken when using instances of -:class:`~pymongo.mongo_client.MongoClient` with ``fork()``. Specifically, -instances of MongoClient must not be copied from a parent process to a child -process. Instead, the parent process and each child process must create their -own instances of MongoClient. For example:: - - # Each process creates its own instance of MongoClient. - def func(): - db = pymongo.MongoClient().mydb - # Do something with db. - - proc = multiprocessing.Process(target=func) - proc.start() - -**Never do this**:: - - client = pymongo.MongoClient() - - # Each child process attempts to copy a global MongoClient - # created in the parent process. Never do this. - def func(): - db = client.mydb - # Do something with db. - - proc = multiprocessing.Process(target=func) - proc.start() - -Instances of MongoClient copied from the parent process have a high probability -of deadlock in the child process due to -:ref:`inherent incompatibilities between fork(), threads, and locks -`. PyMongo will attempt to issue a warning if there -is a chance of this deadlock occurring. - -.. seealso:: :ref:`pymongo-fork-safe` diff --git a/doc/index.rst b/doc/index.rst index b43f5cf580..85812d1b14 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,61 +1,23 @@ PyMongo |release| Documentation =============================== +.. note:: The PyMongo documentation has been migrated to the + `MongoDB Documentation site `_. + This site contains only the detailed changelog and API docs, while the + rest of the documentation appears on the MongoDB Documentation site. + Overview -------- **PyMongo** is a Python distribution containing tools for working with -`MongoDB `_, and is the recommended way to -work with MongoDB from Python. This documentation attempts to explain -everything you need to know to use **PyMongo**. - -.. todo:: a list of PyMongo's features - -:doc:`installation` - Instructions on how to get the distribution. - -:doc:`tutorial` - Start here for a quick overview. - -:doc:`examples/index` - Examples of how to perform specific tasks. - -:doc:`atlas` - Using PyMongo with MongoDB Atlas. - -:doc:`examples/tls` - Using PyMongo with TLS / SSL. - -:doc:`examples/encryption` - Using PyMongo with client side encryption. - -:doc:`examples/type_hints` - Using PyMongo with type hints. - -:doc:`faq` - Some questions that come up often. - -:doc:`migrate-to-pymongo4` - A PyMongo 3.x to 4.x migration guide. - -:doc:`python3` - Frequently asked questions about python 3 support. - -:doc:`compatibility-policy` - Explanation of deprecations, and how to keep pace with changes in PyMongo's - API. +`MongoDB `_, and is the recommended way to +work with MongoDB from Python. :doc:`api/index` The complete API documentation, organized by module. -:doc:`tools` - A listing of Python tools and libraries that have been written for - MongoDB. +:doc:`changelog` + A full list of changes to PyMongo. -:doc:`developer/index` - Developer guide for contributors to PyMongo. - -:doc:`common-issues` - Common issues encountered when using PyMongo. Getting Help ------------ @@ -70,7 +32,7 @@ Issues ------ All issues should be reported (and can be tracked / voted for / commented on) at the main `MongoDB JIRA bug tracker -`_, in the "Python Driver" +`_, in the "Python Driver" project. Feature Requests / Feedback @@ -83,26 +45,16 @@ Contributing **PyMongo** has a large :doc:`community ` and contributions are always encouraged. Contributions can be as simple as minor tweaks to this documentation. To contribute, fork the project on -`GitHub `_ and send a +`GitHub `_ and send a pull request. -Changes -------- -See the :doc:`changelog` for a full list of changes to PyMongo. -For older versions of the documentation please see the -`archive list `_. - About This Documentation ------------------------ This documentation is generated using the `Sphinx `_ documentation generator. The source files for the documentation are located in the *doc/* directory of the -**PyMongo** distribution. To generate the docs locally run the -following command from the root directory of the **PyMongo** source: - -.. code-block:: bash - - $ python setup.py doc +**PyMongo** distribution. See the PyMongo `contributing guide `_ +for instructions on the building the docs from source. Indices and tables ------------------ @@ -114,17 +66,6 @@ Indices and tables .. toctree:: :hidden: - atlas - installation - tutorial - examples/index - faq - compatibility-policy api/index - tools - contributors changelog - python3 - migrate-to-pymongo4 - developer/index - common-issues + contributors diff --git a/doc/installation.rst b/doc/installation.rst deleted file mode 100644 index 788faf46cc..0000000000 --- a/doc/installation.rst +++ /dev/null @@ -1,196 +0,0 @@ -Installing / Upgrading -====================== -.. highlight:: bash - -**PyMongo** is in the `Python Package Index -`_. - -.. warning:: **Do not install the "bson" package from pypi.** PyMongo comes - with its own bson package; doing "pip install bson" or "easy_install bson" - installs a third-party package that is incompatible with PyMongo. - -Installing with pip -------------------- - -We recommend using `pip `_ -to install pymongo on all platforms:: - - $ python3 -m pip install pymongo - -To get a specific version of pymongo:: - - $ python3 -m pip install pymongo==3.5.1 - -To upgrade using pip:: - - $ python3 -m pip install --upgrade pymongo - -Dependencies ------------- - -PyMongo supports CPython 3.7+ and PyPy3.7+. - -Optional dependencies: - -GSSAPI authentication requires `pykerberos -`_ on Unix or `WinKerberos -`_ on Windows. The correct -dependency can be installed automatically along with PyMongo:: - - $ python3 -m pip install "pymongo[gssapi]" - -:ref:`MONGODB-AWS` authentication requires `pymongo-auth-aws -`_:: - - $ python3 -m pip install "pymongo[aws]" - -Support for mongodb+srv:// URIs requires `dnspython -`_:: - - $ python3 -m pip install "pymongo[srv]" - -:ref:`OCSP` requires `PyOpenSSL -`_, `requests -`_ and `service_identity -`_:: - - $ python3 -m pip install "pymongo[ocsp]" - -Wire protocol compression with snappy requires `python-snappy -`_:: - - $ python3 -m pip install "pymongo[snappy]" - -Wire protocol compression with zstandard requires `zstandard -`_:: - - $ python3 -m pip install "pymongo[zstd]" - -:ref:`Client-Side Field Level Encryption` requires `pymongocrypt -`_:: - - $ python3 -m pip install "pymongo[encryption]" - -You can install all dependencies automatically with the following -command:: - - $ python3 -m pip install "pymongo[gssapi,aws,ocsp,snappy,srv,zstd,encryption]" - -Installing from source ----------------------- - -If you'd rather install directly from the source (i.e. to stay on the -bleeding edge), install the C extension dependencies then check out the -latest source from GitHub and install the driver from the resulting tree:: - - $ git clone https://github.com/mongodb/mongo-python-driver.git pymongo - $ cd pymongo/ - $ python3 setup.py install - -Installing from source on Unix -.............................. - -To build the optional C extensions on Linux or another non-macOS Unix you must -have the GNU C compiler (gcc) installed. Depending on your flavor of Unix -(or Linux distribution) you may also need a python development package that -provides the necessary header files for your version of Python. The package -name may vary from distro to distro. - -Debian and Ubuntu users should issue the following command:: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, Oracle Linux, -Fedora, etc.) should issue the following command:: - - $ sudo yum install gcc python-devel - -Installing from source on macOS / OSX -..................................... - -If you want to install PyMongo with C extensions from source you will need -the command line developer tools. On modern versions of macOS they can be -installed by running the following in Terminal (found in -/Applications/Utilities/):: - - xcode-select --install - -For older versions of OSX you may need Xcode. See the notes below for various -OSX and Xcode versions. - -**Snow Leopard (10.6)** - Xcode 3 with 'UNIX Development Support'. - -**Snow Leopard Xcode 4**: The Python versions shipped with OSX 10.6.x -are universal binaries. They support i386, PPC, and x86_64. Xcode 4 removed -support for PPC, causing the distutils version shipped with Apple's builds of -Python to fail to build the C extensions if you have Xcode 4 installed. There -is a workaround:: - - # For some Python builds from python.org - $ env ARCHFLAGS='-arch i386 -arch x86_64' python -m easy_install pymongo - -See `http://bugs.python.org/issue11623 `_ -for a more detailed explanation. - -**Lion (10.7) and newer** - PyMongo's C extensions can be built against -versions of Python 3.7+ downloaded from python.org. In all cases Xcode must be -installed with 'UNIX Development Support'. - -**Xcode 5.1**: Starting with version 5.1 the version of clang that ships with -Xcode throws an error when it encounters compiler flags it doesn't recognize. -This may cause C extension builds to fail with an error similar to:: - - clang: error: unknown argument: '-mno-fused-madd' [-Wunused-command-line-argument-hard-error-in-future] - -There are workarounds:: - - # Apple specified workaround for Xcode 5.1 - # easy_install - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future easy_install pymongo - # or pip - $ ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install pymongo - - # Alternative workaround using CFLAGS - # easy_install - $ CFLAGS=-Qunused-arguments easy_install pymongo - # or pip - $ CFLAGS=-Qunused-arguments pip install pymongo - - -Installing from source on Windows -................................. - -If you want to install PyMongo with C extensions from source the following -requirements apply to both CPython and ActiveState's ActivePython: - -Windows -~~~~~~~ - -Install Visual Studio 2015+. - -.. _install-no-c: - -Installing Without C Extensions -------------------------------- - -By default, the driver attempts to build and install optional C -extensions (used for increasing performance) when it is installed. If -any extension fails to build the driver will be installed anyway but a -warning will be printed. - -If you wish to install PyMongo without the C extensions, even if the -extensions build properly, it can be done using a command line option to -*setup.py*:: - - $ python3 setup.py --no_ext install - -Installing a beta or release candidate --------------------------------------- - -MongoDB, Inc. may occasionally tag a beta or release candidate for testing by -the community before final release. These releases will not be uploaded to pypi -but can be found on the -`GitHub tags page `_. -They can be installed by passing the full URL for the tag to pip:: - - $ python3 -m pip install https://github.com/mongodb/mongo-python-driver/archive/3.11.0rc0.tar.gz diff --git a/doc/make.bat b/doc/make.bat index 2119f51099..aa1adb91a6 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -21,7 +21,7 @@ if errorlevel 9009 ( echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ + echo.https://sphinx-doc.org/ exit /b 1 ) diff --git a/doc/migrate-to-pymongo4.rst b/doc/migrate-to-pymongo4.rst index 5843a2261b..fda3e2e129 100644 --- a/doc/migrate-to-pymongo4.rst +++ b/doc/migrate-to-pymongo4.rst @@ -1,13 +1,14 @@ +:orphan: + .. _pymongo4-migration-guide: PyMongo 4 Migration Guide ========================= -.. contents:: - .. testsetup:: from pymongo import MongoClient, ReadPreference + client = MongoClient() database = client.my_database collection = database.my_collection @@ -35,7 +36,7 @@ Python 3.6+ PyMongo 4.0 drops support for Python 2.7, 3.4, and 3.5. Users who wish to upgrade to 4.x must first upgrade to Python 3.6.2+. Users upgrading from -Python 2 should consult the :doc:`python3`. +Python 2 should consult `Python 3 `_. Enable Deprecation Warnings --------------------------- @@ -119,7 +120,7 @@ Renamed URI options Several deprecated URI options have been renamed to the standardized option names defined in the -`URI options specification `_. +`URI options specification `_. The old option names and their renamed equivalents are summarized in the table below. Some renamed options have different semantics from the option being replaced as noted in the 'Migration Notes' column. @@ -329,13 +330,13 @@ Removed :meth:`pymongo.database.Database.collection_names`. Use :meth:`~pymongo.database.Database.list_collection_names` instead. Code like this:: - names = client.collection_names() - non_system_names = client.collection_names(include_system_collections=False) + names = client.db.collection_names() + non_system_names = client.db.collection_names(include_system_collections=False) can be changed to this:: - names = client.list_collection_names() - non_system_names = client.list_collection_names(filter={"name": {"$regex": r"^(?!system\\.)"}}) + names = client.db.list_collection_names() + non_system_names = client.db.list_collection_names(filter={"name": {"$regex": "^(?!system\\.)"}}) Database.current_op is removed .............................. @@ -638,12 +639,10 @@ Collection.group is removed ........................... Removed :meth:`pymongo.collection.Collection.group`. This method was -deprecated in PyMongo 3.5. MongoDB 4.2 removed the `group command`_. +deprecated in PyMongo 3.5. MongoDB 4.2 removed the group command. Use :meth:`~pymongo.collection.Collection.aggregate` with the ``$group`` stage instead. -.. _group command: https://mongodb.com/docs/manual/reference/command/group/ - Collection.map_reduce and Collection.inline_map_reduce are removed .................................................................. @@ -799,8 +798,7 @@ incoming documents after receiving them from PyMongo. Alternatively, if your application uses the ``SONManipulator`` API to convert custom types to BSON, the :class:`~bson.codec_options.TypeCodec` and :class:`~bson.codec_options.TypeRegistry` APIs may be a suitable alternative. -For more information, see the -:doc:`custom type example `. +For more information, see `Custom Types `_. ``SON().items()`` now returns ``dict_items`` object. ---------------------------------------------------- @@ -840,7 +838,6 @@ pymongo.GEOHAYSTACK is removed Removed :attr:`pymongo.GEOHAYSTACK`. Replace with "geoHaystack" or create a 2d index and use $geoNear or $geoWithin instead. -See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack. UUIDLegacy is removed --------------------- @@ -871,7 +868,7 @@ GridFS changes disable_md5 parameter is removed ................................ -Removed the `disable_md5` option for :class:`~gridfs.GridFSBucket` and +Removed the ``disable_md5`` option for :class:`~gridfs.GridFSBucket` and :class:`~gridfs.GridFS`. GridFS no longer generates checksums. Applications that desire a file digest should implement it outside GridFS and store it with other file metadata. For example:: @@ -941,9 +938,7 @@ Collection.parallel_scan is removed ................................... Removed :meth:`~pymongo.collection.Collection.parallel_scan`. MongoDB 4.2 -removed the `parallelCollectionScan command`_. There is no replacement. - -.. _parallelCollectionScan command: https://mongodb.com/docs/manual/reference/command/parallelCollectionScan/ +removed the parallelCollectionScan command. There is no replacement. pymongo.message helpers are removed ................................... @@ -970,18 +965,25 @@ correct type. Otherwise the document is returned as normal. Previously, any subdocument containing a ``$ref`` field would be decoded as a :class:`~bson.dbref.DBRef`. -.. _DBRef specification: https://github.com/mongodb/specifications/blob/5a8c8d7/source/dbref.rst +.. _DBRef specification: https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md Encoding a UUID raises an error by default .......................................... -The default uuid_representation for :class:`~bson.codec_options.CodecOptions`, +The default ``uuid_representation`` for :class:`~bson.codec_options.CodecOptions`, :class:`~bson.json_util.JSONOptions`, and :class:`~pymongo.mongo_client.MongoClient` has been changed from :data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to :data:`bson.binary.UuidRepresentation.UNSPECIFIED`. Attempting to encode a :class:`uuid.UUID` instance to BSON or JSON now produces an error by default. -See :ref:`handling-uuid-data-example` for details. +If you were using UUIDs previously, you will need to set your ``uuid_representation`` to +:data:`bson.binary.UuidRepresentation.PYTHON_LEGACY` to avoid data corruption. If you do not have UUIDs, +then you should set :data:`bson.binary.UuidRepresentation.STANDARD`. If you do not explicitly set a value, +you will receive an error like this when attempting to encode a :class:`uuid.UUID`:: + + ValueError: cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted... + +See `Handling UUIDs `_ for details. Additional BSON classes implement ``__slots__`` ............................................... diff --git a/doc/python3.rst b/doc/python3.rst deleted file mode 100644 index 812bc33b35..0000000000 --- a/doc/python3.rst +++ /dev/null @@ -1,116 +0,0 @@ -Python 3 FAQ -============ - -.. contents:: - -What Python 3 versions are supported? -------------------------------------- - -PyMongo supports CPython 3.7+ and PyPy3.7+. - -Are there any PyMongo behavior changes with Python 3? ------------------------------------------------------ - -Only one intentional change. Instances of :class:`bytes` -are encoded as BSON type 5 (Binary data) with subtype 0. -In Python 3 they are decoded back to :class:`bytes`. In -Python 2 they are decoded to :class:`~bson.binary.Binary` -with subtype 0. - -For example, let's insert a :class:`bytes` instance using Python 3 then -read it back. Notice the byte string is decoded back to :class:`bytes`:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.insert_one({'binary': b'this is a byte string'}).inserted_id - ObjectId('4f9086b1fba5222021000000') - >>> c.test.bintest.find_one() - {'binary': b'this is a byte string', '_id': ObjectId('4f9086b1fba5222021000000')} - -Now retrieve the same document in Python 2. Notice the byte string is decoded -to :class:`~bson.binary.Binary`:: - - Python 2.7.6 (default, Feb 26 2014, 10:36:22) - [GCC 4.7.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pymongo - >>> c = pymongo.MongoClient() - >>> c.test.bintest.find_one() - {u'binary': Binary('this is a byte string', 0), u'_id': ObjectId('4f9086b1fba5222021000000')} - - -There is a similar change in behavior in parsing JSON binary with subtype 0. -In Python 3 they are decoded into :class:`bytes`. In Python 2 they are -decoded to :class:`~bson.binary.Binary` with subtype 0. - -For example, let's decode a JSON binary subtype 0 using Python 3. Notice the -byte string is decoded to :class:`bytes`:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> from bson.json_util import loads - >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') - {'b': b'this is a byte string'} - -Now decode the same JSON in Python 2 . Notice the byte string is decoded -to :class:`~bson.binary.Binary`:: - - Python 2.7.10 (default, Feb 7 2017, 00:08:15) - [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> from bson.json_util import loads - >>> loads('{"b": {"$binary": "dGhpcyBpcyBhIGJ5dGUgc3RyaW5n", "$type": "00"}}') - {u'b': Binary('this is a byte string', 0)} - -Why can't I share pickled ObjectIds between some versions of Python 2 and 3? ----------------------------------------------------------------------------- - -Instances of :class:`~bson.objectid.ObjectId` pickled using Python 2 -can always be unpickled using Python 3. - -If you pickled an ObjectId using Python 2 and want to unpickle it using -Python 3 you must pass ``encoding='latin-1'`` to pickle.loads:: - - Python 2.7.6 (default, Feb 26 2014, 10:36:22) - [GCC 4.7.3] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f919ba2fba5225b84000000') - >>> pickle.dumps(oid) - 'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...' - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads(b'ccopy_reg\n_reconstructor\np0\n(cbson.objectid\...', encoding='latin-1') - ObjectId('4f919ba2fba5225b84000000') - - -If you need to pickle ObjectIds using Python 3 and unpickle them using Python 2 -you must use ``protocol <= 2``:: - - Python 3.7.9 (v3.7.9:13c94747c7, Aug 15 2020, 01:31:08) - [Clang 6.0 (clang-600.0.57)] on darwin - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> from bson.objectid import ObjectId - >>> oid = ObjectId() - >>> oid - ObjectId('4f96f20c430ee6bd06000000') - >>> pickle.dumps(oid, protocol=2) - b'\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...' - - Python 2.7.15 (default, Jun 21 2018, 15:00:48) - [GCC 7.3.0] on linux2 - Type "help", "copyright", "credits" or "license" for more information. - >>> import pickle - >>> pickle.loads('\x80\x02cbson.objectid\nObjectId\nq\x00)\x81q\x01c_codecs\nencode\...') - ObjectId('4f96f20c430ee6bd06000000') diff --git a/doc/tools.rst b/doc/tools.rst deleted file mode 100644 index e88b57ee69..0000000000 --- a/doc/tools.rst +++ /dev/null @@ -1,170 +0,0 @@ -Tools -===== -Many tools have been written for working with **PyMongo**. If you know -of or have created a tool for working with MongoDB from Python please -list it here. - -.. note:: We try to keep this list current. As such, projects that - have not been updated recently or appear to be unmaintained will - occasionally be removed from the list or moved to the back (to keep - the list from becoming too intimidating). - - If a project gets removed that is still being developed or is in active use - please let us know or add it back. - -ORM-like Layers ---------------- -Some people have found that they prefer to work with a layer that -has more features than PyMongo provides. Often, things like models and -validation are desired. To that end, several different ORM-like layers -have been written by various authors. - -It is our recommendation that new users begin by working directly with -PyMongo, as described in the rest of this documentation. Many people -have found that the features of PyMongo are enough for their -needs. Even if you eventually come to the decision to use one of these -layers, the time spent working directly with the driver will have -increased your understanding of how MongoDB actually works. - -MongoEngine - `MongoEngine `_ is another ORM-like - layer on top of PyMongo. It allows you to define schemas for - documents and query collections using syntax inspired by the Django - ORM. The code is available on `GitHub - `_; for more information, see - the `tutorial `_. - -MincePy - `MincePy `_ is an - object-document mapper (ODM) designed to make any Python object storable - and queryable in a MongoDB database. It is designed with machine learning - and big-data computational and experimental science applications in mind - but is entirely general and can be useful to anyone looking to organise, - share, or process large amounts data with as little change to their current - workflow as possible. - -Ming - `Ming `_ (the Merciless) is a - library that allows you to enforce schemas on a MongoDB database in - your Python application. It was developed by `SourceForge - `_ in the course of their migration to - MongoDB. See the `introductory blog post - `_ - for more details. - -MotorEngine - `MotorEngine `_ is a port of - MongoEngine to Motor, for asynchronous access with Tornado. - It implements the same modeling APIs to be data-portable, meaning that a - model defined in MongoEngine can be read in MotorEngine. The source is - `available on GitHub `_. - -uMongo - `uMongo `_ is a Python MongoDB ODM. - Its inception comes from two needs: the lack of async ODM and the - difficulty to do document (un)serialization with existing ODMs. - Works with multiple drivers: PyMongo, TxMongo, motor_asyncio, and - mongomock. The source `is available on GitHub - `_ - -No longer maintained -"""""""""""""""""""" - -PyMODM - `PyMODM `_ is an ORM-like framework on top - of PyMongo. PyMODM is maintained by engineers at MongoDB, Inc. and is quick - to adopt new MongoDB features. PyMODM is a "core" ODM, meaning that it - provides simple, extensible functionality that can be leveraged by other - libraries to target platforms like Django. At the same time, PyMODM is - powerful enough to be used for developing applications on its own. Complete - documentation is available on `readthedocs - `_. - -MongoKit - The `MongoKit `_ framework - is an ORM-like layer on top of PyMongo. There is also a MongoKit - `google group `_. - -MongoAlchemy - `MongoAlchemy `_ is another ORM-like layer on top of - PyMongo. Its API is inspired by `SQLAlchemy `_. The - code is available `on GitHub `_; - for more information, see `the tutorial `_. - -Minimongo - `minimongo `_ is a lightweight, - pythonic interface to MongoDB. It retains pymongo's query and update API, - and provides a number of additional features, including a simple - document-oriented interface, connection pooling, index management, and - collection & database naming helpers. The `source is on GitHub - `_. - -Manga - `Manga `_ aims to be a simpler ORM-like - layer on top of PyMongo. The syntax for defining schema is inspired by the - Django ORM, but Pymongo's query language is maintained. The source `is on - GitHub `_. - -Humongolus - `Humongolus `_ is a lightweight ORM - framework for Python and MongoDB. The name comes from the combination of - MongoDB and `Homunculus `_ (the - concept of a miniature though fully formed human body). Humongolus allows - you to create models/schemas with robust validation. It attempts to be as - pythonic as possible and exposes the pymongo cursor objects whenever - possible. The code is available for download - `at GitHub `_. Tutorials and usage - examples are also available at GitHub. - -Framework Tools ---------------- -This section lists tools and adapters that have been designed to work with -various Python frameworks and libraries. - -* `Djongo `_ is a connector for using - Django with MongoDB as the database backend. Use the Django Admin GUI to add and - modify documents in MongoDB. - The `Djongo Source Code `_ is hosted on GitHub - and the `Djongo package `_ is on pypi. -* `Django MongoDB Engine - `_ is a MongoDB - database backend for Django that completely integrates with its ORM. - For more information `see the tutorial - `_. -* `mango `_ provides MongoDB backends for - Django sessions and authentication (bypassing :mod:`django.db` entirely). -* `Django MongoEngine - `_ is a MongoDB backend for - Django, an `example: - `_. - For more information see ``_ -* `mongodb_beaker `_ is a - project to enable using MongoDB as a backend for `beakers `_ caching / session system. - `The source is on GitHub `_. -* `Log4Mongo `_ is a flexible - Python logging handler that can store logs in MongoDB using normal and capped - collections. -* `MongoLog `_ is a Python logging - handler that stores logs in MongoDB using a capped collection. -* `rod.recipe.mongodb `_ is a - ZC Buildout recipe for downloading and installing MongoDB. -* `mongobox `_ is a tool to run a sandboxed - MongoDB instance from within a python app. -* `Flask-MongoAlchemy `_ Add - Flask support for MongoDB using MongoAlchemy. -* `Flask-MongoKit `_ Flask extension - to better integrate MongoKit into Flask. -* `Flask-PyMongo `_ Flask-PyMongo - bridges Flask and PyMongo. - -Alternative Drivers -------------------- -These are alternatives to PyMongo. - -* `Motor `_ is a full-featured, non-blocking - MongoDB driver for Python Tornado applications. -* `TxMongo `_ is an asynchronous Twisted - Python driver for MongoDB. -* `MongoMock `_ is a small - library to help testing Python code that interacts with MongoDB via - Pymongo. diff --git a/doc/tutorial.rst b/doc/tutorial.rst deleted file mode 100644 index 55961241e8..0000000000 --- a/doc/tutorial.rst +++ /dev/null @@ -1,407 +0,0 @@ -Tutorial -======== - -.. testsetup:: - - from pymongo import MongoClient - client = MongoClient() - client.drop_database('test-database') - -This tutorial is intended as an introduction to working with -**MongoDB** and **PyMongo**. - -Prerequisites -------------- -Before we start, make sure that you have the **PyMongo** distribution -:doc:`installed `. In the Python shell, the following -should run without raising an exception: - -.. doctest:: - - >>> import pymongo - -This tutorial also assumes that a MongoDB instance is running on the -default host and port. Assuming you have `downloaded and installed -`_ MongoDB, you -can start it like so: - -.. code-block:: bash - - $ mongod - -Making a Connection with MongoClient ------------------------------------- -The first step when working with **PyMongo** is to create a -:class:`~pymongo.mongo_client.MongoClient` to the running **mongod** -instance. Doing so is easy: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> client = MongoClient() - -The above code will connect on the default host and port. We can also -specify the host and port explicitly, as follows: - -.. doctest:: - - >>> client = MongoClient('localhost', 27017) - -Or use the MongoDB URI format: - -.. doctest:: - - >>> client = MongoClient('mongodb://localhost:27017/') - -Getting a Database ------------------- -A single instance of MongoDB can support multiple independent -`databases `_. When -working with PyMongo you access databases using attribute style access -on :class:`~pymongo.mongo_client.MongoClient` instances: - -.. doctest:: - - >>> db = client.test_database - -If your database name is such that using attribute style access won't -work (like ``test-database``), you can use dictionary style access -instead: - -.. doctest:: - - >>> db = client['test-database'] - -Getting a Collection --------------------- -A `collection `_ is a -group of documents stored in MongoDB, and can be thought of as roughly -the equivalent of a table in a relational database. Getting a -collection in PyMongo works the same as getting a database: - -.. doctest:: - - >>> collection = db.test_collection - -or (using dictionary style access): - -.. doctest:: - - >>> collection = db['test-collection'] - -An important note about collections (and databases) in MongoDB is that -they are created lazily - none of the above commands have actually -performed any operations on the MongoDB server. Collections and -databases are created when the first document is inserted into them. - -Documents ---------- -Data in MongoDB is represented (and stored) using JSON-style -documents. In PyMongo we use dictionaries to represent documents. As -an example, the following dictionary might be used to represent a blog -post: - -.. doctest:: - - >>> import datetime - >>> post = {"author": "Mike", - ... "text": "My first blog post!", - ... "tags": ["mongodb", "python", "pymongo"], - ... "date": datetime.datetime.utcnow()} - -Note that documents can contain native Python types (like -:class:`datetime.datetime` instances) which will be automatically -converted to and from the appropriate `BSON -`_ types. - -.. todo:: link to table of Python <-> BSON types - -Inserting a Document --------------------- -To insert a document into a collection we can use the -:meth:`~pymongo.collection.Collection.insert_one` method: - -.. doctest:: - - >>> posts = db.posts - >>> post_id = posts.insert_one(post).inserted_id - >>> post_id - ObjectId('...') - -When a document is inserted a special key, ``"_id"``, is automatically -added if the document doesn't already contain an ``"_id"`` key. The value -of ``"_id"`` must be unique across the -collection. :meth:`~pymongo.collection.Collection.insert_one` returns an -instance of :class:`~pymongo.results.InsertOneResult`. For more information -on ``"_id"``, see the `documentation on _id -`_. - -After inserting the first document, the *posts* collection has -actually been created on the server. We can verify this by listing all -of the collections in our database: - -.. doctest:: - - >>> db.list_collection_names() - ['posts'] - -Getting a Single Document With :meth:`~pymongo.collection.Collection.find_one` ------------------------------------------------------------------------------- -The most basic type of query that can be performed in MongoDB is -:meth:`~pymongo.collection.Collection.find_one`. This method returns a -single document matching a query (or ``None`` if there are no -matches). It is useful when you know there is only one matching -document, or are only interested in the first match. Here we use -:meth:`~pymongo.collection.Collection.find_one` to get the first -document from the posts collection: - -.. doctest:: - - >>> import pprint - >>> pprint.pprint(posts.find_one()) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -The result is a dictionary matching the one that we inserted previously. - -.. note:: The returned document contains an ``"_id"``, which was - automatically added on insert. - -:meth:`~pymongo.collection.Collection.find_one` also supports querying -on specific elements that the resulting document must match. To limit -our results to a document with author "Mike" we do: - -.. doctest:: - - >>> pprint.pprint(posts.find_one({"author": "Mike"})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -If we try with a different author, like "Eliot", we'll get no result: - -.. doctest:: - - >>> posts.find_one({"author": "Eliot"}) - >>> - -.. _querying-by-objectid: - -Querying By ObjectId --------------------- -We can also find a post by its ``_id``, which in our example is an ObjectId: - -.. doctest:: - - >>> post_id - ObjectId(...) - >>> pprint.pprint(posts.find_one({"_id": post_id})) - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - -Note that an ObjectId is not the same as its string representation: - -.. doctest:: - - >>> post_id_as_str = str(post_id) - >>> posts.find_one({"_id": post_id_as_str}) # No result - >>> - -A common task in web applications is to get an ObjectId from the -request URL and find the matching document. It's necessary in this -case to **convert the ObjectId from a string** before passing it to -``find_one``:: - - from bson.objectid import ObjectId - - # The web framework gets post_id from the URL and passes it as a string - def get(post_id): - # Convert from string to ObjectId: - document = client.db.collection.find_one({'_id': ObjectId(post_id)}) - -.. seealso:: :ref:`web-application-querying-by-objectid` - -Bulk Inserts ------------- -In order to make querying a little more interesting, let's insert a -few more documents. In addition to inserting a single document, we can -also perform *bulk insert* operations, by passing a list as the -first argument to :meth:`~pymongo.collection.Collection.insert_many`. -This will insert each document in the list, sending only a single -command to the server: - -.. doctest:: - - >>> new_posts = [{"author": "Mike", - ... "text": "Another post!", - ... "tags": ["bulk", "insert"], - ... "date": datetime.datetime(2009, 11, 12, 11, 14)}, - ... {"author": "Eliot", - ... "title": "MongoDB is fun", - ... "text": "and pretty easy too!", - ... "date": datetime.datetime(2009, 11, 10, 10, 45)}] - >>> result = posts.insert_many(new_posts) - >>> result.inserted_ids - [ObjectId('...'), ObjectId('...')] - -There are a couple of interesting things to note about this example: - - - The result from :meth:`~pymongo.collection.Collection.insert_many` now - returns two :class:`~bson.objectid.ObjectId` instances, one for - each inserted document. - - ``new_posts[1]`` has a different "shape" than the other posts - - there is no ``"tags"`` field and we've added a new field, - ``"title"``. This is what we mean when we say that MongoDB is - *schema-free*. - -Querying for More Than One Document ------------------------------------ -To get more than a single document as the result of a query we use the -:meth:`~pymongo.collection.Collection.find` -method. :meth:`~pymongo.collection.Collection.find` returns a -:class:`~pymongo.cursor.Cursor` instance, which allows us to iterate -over all matching documents. For example, we can iterate over every -document in the ``posts`` collection: - -.. doctest:: - - >>> for post in posts.find(): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - -Just like we did with :meth:`~pymongo.collection.Collection.find_one`, -we can pass a document to :meth:`~pymongo.collection.Collection.find` -to limit the returned results. Here, we get only those documents whose -author is "Mike": - -.. doctest:: - - >>> for post in posts.find({"author": "Mike"}): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['mongodb', 'python', 'pymongo'], - 'text': 'My first blog post!'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Counting --------- -If we just want to know how many documents match a query we can -perform a :meth:`~pymongo.collection.Collection.count_documents` operation -instead of a full query. We can get a count of all of the documents -in a collection: - -.. doctest:: - - >>> posts.count_documents({}) - 3 - -or just of those documents that match a specific query: - -.. doctest:: - - >>> posts.count_documents({"author": "Mike"}) - 2 - -Range Queries -------------- -MongoDB supports many different types of `advanced queries -`_. As an -example, lets perform a query where we limit results to posts older -than a certain date, but also sort the results by author: - -.. doctest:: - - >>> d = datetime.datetime(2009, 11, 12, 12) - >>> for post in posts.find({"date": {"$lt": d}}).sort("author"): - ... pprint.pprint(post) - ... - {'_id': ObjectId('...'), - 'author': 'Eliot', - 'date': datetime.datetime(...), - 'text': 'and pretty easy too!', - 'title': 'MongoDB is fun'} - {'_id': ObjectId('...'), - 'author': 'Mike', - 'date': datetime.datetime(...), - 'tags': ['bulk', 'insert'], - 'text': 'Another post!'} - -Here we use the special ``"$lt"`` operator to do a range query, and -also call :meth:`~pymongo.cursor.Cursor.sort` to sort the results -by author. - -Indexing --------- - -Adding indexes can help accelerate certain queries and can also add additional -functionality to querying and storing documents. In this example, we'll -demonstrate how to create a `unique index -`_ on a key that rejects -documents whose value for that key already exists in the index. - -First, we'll need to create the index: - -.. doctest:: - - >>> result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], - ... unique=True) - >>> sorted(list(db.profiles.index_information())) - ['_id_', 'user_id_1'] - -Notice that we have two indexes now: one is the index on ``_id`` that MongoDB -creates automatically, and the other is the index on ``user_id`` we just -created. - -Now let's set up some user profiles: - -.. doctest:: - - >>> user_profiles = [ - ... {'user_id': 211, 'name': 'Luke'}, - ... {'user_id': 212, 'name': 'Ziltoid'}] - >>> result = db.profiles.insert_many(user_profiles) - -The index prevents us from inserting a document whose ``user_id`` is already in -the collection: - -.. doctest:: - :options: +IGNORE_EXCEPTION_DETAIL - - >>> new_profile = {'user_id': 213, 'name': 'Drew'} - >>> duplicate_profile = {'user_id': 212, 'name': 'Tommy'} - >>> result = db.profiles.insert_one(new_profile) # This is fine. - >>> result = db.profiles.insert_one(duplicate_profile) - Traceback (most recent call last): - DuplicateKeyError: E11000 duplicate key error index: test_database.profiles.$user_id_1 dup key: { : 212 } - -.. seealso:: The MongoDB documentation on `indexes `_ diff --git a/green_framework_test.py b/green_framework_test.py deleted file mode 100644 index d638d9b014..0000000000 --- a/green_framework_test.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test PyMongo with a variety of greenlet-based monkey-patching frameworks.""" - -import getopt -import sys - - -def run_gevent(): - """Prepare to run tests with Gevent. Can raise ImportError.""" - from gevent import monkey - - monkey.patch_all() - - -def run_eventlet(): - """Prepare to run tests with Eventlet. Can raise ImportError.""" - import eventlet - - # https://github.com/eventlet/eventlet/issues/401 - eventlet.sleep() - eventlet.monkey_patch() - - -FRAMEWORKS = { - "gevent": run_gevent, - "eventlet": run_eventlet, -} - - -def list_frameworks(): - """Tell the user what framework names are valid.""" - sys.stdout.write( - """Testable frameworks: %s - -Note that membership in this list means the framework can be tested with -PyMongo, not necessarily that it is officially supported. -""" - % ", ".join(sorted(FRAMEWORKS)) - ) - - -def run(framework_name, *args): - """Run tests with monkey-patching enabled. Can raise ImportError.""" - # Monkey-patch. - FRAMEWORKS[framework_name]() - - # Run the tests. - sys.argv[:] = ["setup.py", "test"] + list(args) - import setup # noqa - - -def main(): - """Parse options and run tests.""" - usage = """python %s FRAMEWORK_NAME - -Test PyMongo with a variety of greenlet-based monkey-patching frameworks. See -python %s --help-frameworks.""" % ( - sys.argv[0], - sys.argv[0], - ) - - try: - opts, args = getopt.getopt(sys.argv[1:], "h", ["help", "help-frameworks"]) - except getopt.GetoptError as err: - print(str(err)) - print(usage) - sys.exit(2) - - for option_name, _ in opts: - if option_name in ("-h", "--help"): - print(usage) - sys.exit() - elif option_name == "--help-frameworks": - list_frameworks() - sys.exit() - else: - raise AssertionError("unhandled option") - - if not args: - print(usage) - sys.exit(1) - - if args[0] not in FRAMEWORKS: - print("%r is not a testable framework.\n" % args[0]) - list_frameworks() - sys.exit(1) - - run( - args[0], *args[1:] # Framework name. - ) # Command line args to setup.py, like what test to run. - - -if __name__ == "__main__": - main() diff --git a/gridfs/__init__.py b/gridfs/__init__.py index 6ab843a85e..8173561beb 100644 --- a/gridfs/__init__.py +++ b/gridfs/__init__.py @@ -19,997 +19,36 @@ .. seealso:: The MongoDB documentation on `gridfs `_. """ - -from collections import abc -from typing import Any, List, Mapping, Optional, cast - -from bson.objectid import ObjectId +from __future__ import annotations + +from gridfs.asynchronous.grid_file import ( + AsyncGridFS, + AsyncGridFSBucket, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) from gridfs.errors import NoFile -from gridfs.grid_file import ( - DEFAULT_CHUNK_SIZE, +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE +from gridfs.synchronous.grid_file import ( + GridFS, + GridFSBucket, GridIn, GridOut, GridOutCursor, - _clear_entity_type_registry, - _disallow_transactions, ) -from pymongo import ASCENDING, DESCENDING, _csot -from pymongo.client_session import ClientSession -from pymongo.collection import Collection -from pymongo.common import validate_string -from pymongo.database import Database -from pymongo.errors import ConfigurationError -from pymongo.read_preferences import _ServerMode -from pymongo.write_concern import WriteConcern __all__ = [ + "AsyncGridFS", "GridFS", + "AsyncGridFSBucket", "GridFSBucket", "NoFile", "DEFAULT_CHUNK_SIZE", + "AsyncGridIn", "GridIn", + "AsyncGridOut", "GridOut", + "AsyncGridOutCursor", "GridOutCursor", ] - - -class GridFS(object): - """An instance of GridFS on top of a single Database.""" - - def __init__(self, database: Database, collection: str = "fs"): - """Create a new instance of :class:`GridFS`. - - Raises :class:`TypeError` if `database` is not an instance of - :class:`~pymongo.database.Database`. - - :Parameters: - - `database`: database to use - - `collection` (optional): root collection to use - - .. versionchanged:: 4.0 - Removed the `disable_md5` parameter. See - :ref:`removed-gridfs-checksum` for details. - - .. versionchanged:: 3.11 - Running a GridFS operation in a transaction now always raises an - error. GridFS does not support multi-document transactions. - - .. versionchanged:: 3.7 - Added the `disable_md5` parameter. - - .. versionchanged:: 3.1 - Indexes are only ensured on the first write to the DB. - - .. versionchanged:: 3.0 - `database` must use an acknowledged - :attr:`~pymongo.database.Database.write_concern` - - .. seealso:: The MongoDB documentation on `gridfs `_. - """ - if not isinstance(database, Database): - raise TypeError("database must be an instance of Database") - - database = _clear_entity_type_registry(database) - - if not database.write_concern.acknowledged: - raise ConfigurationError("database must use acknowledged write_concern") - - self.__collection = database[collection] - self.__files = self.__collection.files - self.__chunks = self.__collection.chunks - - def new_file(self, **kwargs: Any) -> GridIn: - """Create a new file in GridFS. - - Returns a new :class:`~gridfs.grid_file.GridIn` instance to - which data can be written. Any keyword arguments will be - passed through to :meth:`~gridfs.grid_file.GridIn`. - - If the ``"_id"`` of the file is manually specified, it must - not already exist in GridFS. Otherwise - :class:`~gridfs.errors.FileExists` is raised. - - :Parameters: - - `**kwargs` (optional): keyword arguments for file creation - """ - return GridIn(self.__collection, **kwargs) - - def put(self, data: Any, **kwargs: Any) -> Any: - """Put data in GridFS as a new file. - - Equivalent to doing:: - - with fs.new_file(**kwargs) as f: - f.write(data) - - `data` can be either an instance of :class:`bytes` or a file-like - object providing a :meth:`read` method. If an `encoding` keyword - argument is passed, `data` can also be a :class:`str` instance, which - will be encoded as `encoding` before being written. Any keyword - arguments will be passed through to the created file - see - :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the - ``"_id"`` of the created file. - - If the ``"_id"`` of the file is manually specified, it must - not already exist in GridFS. Otherwise - :class:`~gridfs.errors.FileExists` is raised. - - :Parameters: - - `data`: data to be written as a file. - - `**kwargs` (optional): keyword arguments for file creation - - .. versionchanged:: 3.0 - w=0 writes to GridFS are now prohibited. - """ - - with GridIn(self.__collection, **kwargs) as grid_file: - grid_file.write(data) - return grid_file._id - - def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: - """Get a file from GridFS by ``"_id"``. - - Returns an instance of :class:`~gridfs.grid_file.GridOut`, - which provides a file-like interface for reading. - - :Parameters: - - `file_id`: ``"_id"`` of the file to get - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - gout = GridOut(self.__collection, file_id, session=session) - - # Raise NoFile now, instead of on first attribute access. - gout._ensure_file() - return gout - - def get_version( - self, - filename: Optional[str] = None, - version: Optional[int] = -1, - session: Optional[ClientSession] = None, - **kwargs: Any - ) -> GridOut: - """Get a file from GridFS by ``"filename"`` or metadata fields. - - Returns a version of the file in GridFS whose filename matches - `filename` and whose metadata fields match the supplied keyword - arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. - - Version numbering is a convenience atop the GridFS API provided - by MongoDB. If more than one file matches the query (either by - `filename` alone, by metadata fields, or by a combination of - both), then version ``-1`` will be the most recently uploaded - matching file, ``-2`` the second most recently - uploaded, etc. Version ``0`` will be the first version - uploaded, ``1`` the second version, etc. So if three versions - have been uploaded, then version ``0`` is the same as version - ``-3``, version ``1`` is the same as version ``-2``, and - version ``2`` is the same as version ``-1``. - - Raises :class:`~gridfs.errors.NoFile` if no such version of - that file exists. - - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `version` (optional): version of the file to get (defaults - to -1, the most recent version uploaded) - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.1 - ``get_version`` no longer ensures indexes. - """ - query = kwargs - if filename is not None: - query["filename"] = filename - - _disallow_transactions(session) - cursor = self.__files.find(query, session=session) - if version is None: - version = -1 - if version < 0: - skip = abs(version) - 1 - cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) - else: - cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) - try: - doc = next(cursor) - return GridOut(self.__collection, file_document=doc, session=session) - except StopIteration: - raise NoFile("no version %d for filename %r" % (version, filename)) - - def get_last_version( - self, filename: Optional[str] = None, session: Optional[ClientSession] = None, **kwargs: Any - ) -> GridOut: - """Get the most recent version of a file in GridFS by ``"filename"`` - or metadata fields. - - Equivalent to calling :meth:`get_version` with the default - `version` (``-1``). - - :Parameters: - - `filename`: ``"filename"`` of the file to get, or `None` - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): find files by custom metadata. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - return self.get_version(filename=filename, session=session, **kwargs) - - # TODO add optional safe mode for chunk removal? - def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: - """Delete a file from GridFS by ``"_id"``. - - Deletes all data belonging to the file with ``"_id"``: - `file_id`. - - .. warning:: Any processes/threads reading from the file while - this method is executing will likely see an invalid/corrupt - file. Care should be taken to avoid concurrent reads to a file - while it is being deleted. - - .. note:: Deletes of non-existent files are considered successful - since the end result is the same: no file with that _id remains. - - :Parameters: - - `file_id`: ``"_id"`` of the file to delete - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.1 - ``delete`` no longer ensures indexes. - """ - _disallow_transactions(session) - self.__files.delete_one({"_id": file_id}, session=session) - self.__chunks.delete_many({"files_id": file_id}, session=session) - - def list(self, session: Optional[ClientSession] = None) -> List[str]: - """List the names of all files stored in this instance of - :class:`GridFS`. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.1 - ``list`` no longer ensures indexes. - """ - _disallow_transactions(session) - # With an index, distinct includes documents with no filename - # as None. - return [ - name for name in self.__files.distinct("filename", session=session) if name is not None - ] - - def find_one( - self, - filter: Optional[Any] = None, - session: Optional[ClientSession] = None, - *args: Any, - **kwargs: Any - ) -> Optional[GridOut]: - """Get a single file from gridfs. - - All arguments to :meth:`find` are also valid arguments for - :meth:`find_one`, although any `limit` argument will be - ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, - or ``None`` if no matching file is found. For example:: - - file = fs.find_one({"filename": "lisa.txt"}) - - :Parameters: - - `filter` (optional): a dictionary specifying - the query to be performing OR any other type to be used as - the value for a query for ``"_id"`` in the file collection. - - `*args` (optional): any additional positional arguments are - the same as the arguments to :meth:`find`. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): any additional keyword arguments - are the same as the arguments to :meth:`find`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - if filter is not None and not isinstance(filter, abc.Mapping): - filter = {"_id": filter} - - _disallow_transactions(session) - for f in self.find(filter, *args, session=session, **kwargs): - return f - - return None - - def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: - """Query GridFS for files. - - Returns a cursor that iterates across files matching - arbitrary queries on the files collection. Can be combined - with other modifiers for additional control. For example:: - - for grid_out in fs.find({"filename": "lisa.txt"}, - no_cursor_timeout=True): - data = grid_out.read() - - would iterate through all versions of "lisa.txt" stored in GridFS. - Note that setting no_cursor_timeout to True may be important to - prevent the cursor from timing out during long multi-file processing - work. - - As another example, the call:: - - most_recent_three = fs.find().sort("uploadDate", -1).limit(3) - - would return a cursor to the three most recently uploaded files - in GridFS. - - Follows a similar interface to - :meth:`~pymongo.collection.Collection.find` - in :class:`~pymongo.collection.Collection`. - - If a :class:`~pymongo.client_session.ClientSession` is passed to - :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances - are associated with that session. - - :Parameters: - - `filter` (optional): A query document that selects which files - to include in the result set. Can be an empty document to include - all files. - - `skip` (optional): the number of files to omit (from - the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to - return - - `no_cursor_timeout` (optional): if False (the default), any - returned cursor is closed by the server after 10 minutes of - inactivity. If set to True, the returned cursor will never - time out on the server. Care should be taken to ensure that - cursors with no_cursor_timeout turned on are properly closed. - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for this query. See - :meth:`~pymongo.cursor.Cursor.sort` for details. - - Raises :class:`TypeError` if any of the arguments are of - improper type. Returns an instance of - :class:`~gridfs.grid_file.GridOutCursor` - corresponding to this query. - - .. versionchanged:: 3.0 - Removed the read_preference, tag_sets, and - secondary_acceptable_latency_ms options. - .. versionadded:: 2.7 - .. seealso:: The MongoDB documentation on `find `_. - """ - return GridOutCursor(self.__collection, *args, **kwargs) - - def exists( - self, - document_or_id: Optional[Any] = None, - session: Optional[ClientSession] = None, - **kwargs: Any - ) -> bool: - """Check if a file exists in this instance of :class:`GridFS`. - - The file to check for can be specified by the value of its - ``_id`` key, or by passing in a query document. A query - document can be passed in as dictionary, or by using keyword - arguments. Thus, the following three calls are equivalent: - - >>> fs.exists(file_id) - >>> fs.exists({"_id": file_id}) - >>> fs.exists(_id=file_id) - - As are the following two calls: - - >>> fs.exists({"filename": "mike.txt"}) - >>> fs.exists(filename="mike.txt") - - And the following two: - - >>> fs.exists({"foo": {"$gt": 12}}) - >>> fs.exists(foo={"$gt": 12}) - - Returns ``True`` if a matching file exists, ``False`` - otherwise. Calls to :meth:`exists` will not automatically - create appropriate indexes; application developers should be - sure to create indexes if needed and as appropriate. - - :Parameters: - - `document_or_id` (optional): query document, or _id of the - document to check for - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - `**kwargs` (optional): keyword arguments are used as a - query document, if they're present. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - _disallow_transactions(session) - if kwargs: - f = self.__files.find_one(kwargs, ["_id"], session=session) - else: - f = self.__files.find_one(document_or_id, ["_id"], session=session) - - return f is not None - - -class GridFSBucket(object): - """An instance of GridFS on top of a single Database.""" - - def __init__( - self, - db: Database, - bucket_name: str = "fs", - chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - ) -> None: - """Create a new instance of :class:`GridFSBucket`. - - Raises :exc:`TypeError` if `database` is not an instance of - :class:`~pymongo.database.Database`. - - Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` - is not acknowledged. - - :Parameters: - - `database`: database to use. - - `bucket_name` (optional): The name of the bucket. Defaults to 'fs'. - - `chunk_size_bytes` (optional): The chunk size in bytes. Defaults - to 255KB. - - `write_concern` (optional): The - :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` - (the default) db.write_concern is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) db.read_preference is used. - - .. versionchanged:: 4.0 - Removed the `disable_md5` parameter. See - :ref:`removed-gridfs-checksum` for details. - - .. versionchanged:: 3.11 - Running a GridFSBucket operation in a transaction now always raises - an error. GridFSBucket does not support multi-document transactions. - - .. versionchanged:: 3.7 - Added the `disable_md5` parameter. - - .. versionadded:: 3.1 - - .. seealso:: The MongoDB documentation on `gridfs `_. - """ - if not isinstance(db, Database): - raise TypeError("database must be an instance of Database") - - db = _clear_entity_type_registry(db) - - wtc = write_concern if write_concern is not None else db.write_concern - if not wtc.acknowledged: - raise ConfigurationError("write concern must be acknowledged") - - self._bucket_name = bucket_name - self._collection = db[bucket_name] - self._chunks: Collection = self._collection.chunks.with_options( - write_concern=write_concern, read_preference=read_preference - ) - - self._files: Collection = self._collection.files.with_options( - write_concern=write_concern, read_preference=read_preference - ) - - self._chunk_size_bytes = chunk_size_bytes - self._timeout = db.client.options.timeout - - def open_upload_stream( - self, - filename: str, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None, - ) -> GridIn: - """Opens a Stream that the application can write the contents of the - file to. - - The user must specify the filename, and can choose to add any - additional information in the metadata field of the file document or - modify the chunk size. - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - with fs.open_upload_stream( - "test_file", chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) as grid_in: - grid_in.write("data I want to store!") - # uploaded on close - - Returns an instance of :class:`~gridfs.grid_file.GridIn`. - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - Raises :exc:`~ValueError` if `filename` is not a string. - - :Parameters: - - `filename`: The name of the file to upload. - - `chunk_size_bytes` (options): The number of bytes per chunk of this - file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the - files collection document. If not provided the metadata field will - be omitted from the files collection document. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - validate_string("filename", filename) - - opts = { - "filename": filename, - "chunk_size": ( - chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes - ), - } - if metadata is not None: - opts["metadata"] = metadata - - return GridIn(self._collection, session=session, **opts) - - def open_upload_stream_with_id( - self, - file_id: Any, - filename: str, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None, - ) -> GridIn: - """Opens a Stream that the application can write the contents of the - file to. - - The user must specify the file id and filename, and can choose to add - any additional information in the metadata field of the file document - or modify the chunk size. - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - with fs.open_upload_stream_with_id( - ObjectId(), - "test_file", - chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) as grid_in: - grid_in.write("data I want to store!") - # uploaded on close - - Returns an instance of :class:`~gridfs.grid_file.GridIn`. - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - Raises :exc:`~ValueError` if `filename` is not a string. - - :Parameters: - - `file_id`: The id to use for this file. The id must not have - already been used for another file. - - `filename`: The name of the file to upload. - - `chunk_size_bytes` (options): The number of bytes per chunk of this - file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the - files collection document. If not provided the metadata field will - be omitted from the files collection document. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - validate_string("filename", filename) - - opts = { - "_id": file_id, - "filename": filename, - "chunk_size": ( - chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes - ), - } - if metadata is not None: - opts["metadata"] = metadata - - return GridIn(self._collection, session=session, **opts) - - @_csot.apply - def upload_from_stream( - self, - filename: str, - source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None, - ) -> ObjectId: - """Uploads a user file to a GridFS bucket. - - Reads the contents of the user file from `source` and uploads - it to the file `filename`. Source can be a string or file-like object. - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - file_id = fs.upload_from_stream( - "test_file", - "data I want to store!", - chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - - Returns the _id of the uploaded file. - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - Raises :exc:`~ValueError` if `filename` is not a string. - - :Parameters: - - `filename`: The name of the file to upload. - - `source`: The source stream of the content to be uploaded. Must be - a file-like object that implements :meth:`read` or a string. - - `chunk_size_bytes` (options): The number of bytes per chunk of this - file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the - files collection document. If not provided the metadata field will - be omitted from the files collection document. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: - gin.write(source) - - return cast(ObjectId, gin._id) - - @_csot.apply - def upload_from_stream_with_id( - self, - file_id: Any, - filename: str, - source: Any, - chunk_size_bytes: Optional[int] = None, - metadata: Optional[Mapping[str, Any]] = None, - session: Optional[ClientSession] = None, - ) -> None: - """Uploads a user file to a GridFS bucket with a custom file id. - - Reads the contents of the user file from `source` and uploads - it to the file `filename`. Source can be a string or file-like object. - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - file_id = fs.upload_from_stream( - ObjectId(), - "test_file", - "data I want to store!", - chunk_size_bytes=4, - metadata={"contentType": "text/plain"}) - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - Raises :exc:`~ValueError` if `filename` is not a string. - - :Parameters: - - `file_id`: The id to use for this file. The id must not have - already been used for another file. - - `filename`: The name of the file to upload. - - `source`: The source stream of the content to be uploaded. Must be - a file-like object that implements :meth:`read` or a string. - - `chunk_size_bytes` (options): The number of bytes per chunk of this - file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. - - `metadata` (optional): User data for the 'metadata' field of the - files collection document. If not provided the metadata field will - be omitted from the files collection document. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - with self.open_upload_stream_with_id( - file_id, filename, chunk_size_bytes, metadata, session=session - ) as gin: - gin.write(source) - - def open_download_stream( - self, file_id: Any, session: Optional[ClientSession] = None - ) -> GridOut: - """Opens a Stream from which the application can read the contents of - the stored file specified by file_id. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - # get _id of file to read. - file_id = fs.upload_from_stream("test_file", "data I want to store!") - grid_out = fs.open_download_stream(file_id) - contents = grid_out.read() - - Returns an instance of :class:`~gridfs.grid_file.GridOut`. - - Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - - :Parameters: - - `file_id`: The _id of the file to be downloaded. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - gout = GridOut(self._collection, file_id, session=session) - - # Raise NoFile now, instead of on first attribute access. - gout._ensure_file() - return gout - - @_csot.apply - def download_to_stream( - self, file_id: Any, destination: Any, session: Optional[ClientSession] = None - ) -> None: - """Downloads the contents of the stored file specified by file_id and - writes the contents to `destination`. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - # Get _id of file to read - file_id = fs.upload_from_stream("test_file", "data I want to store!") - # Get file to write to - file = open('myfile','wb+') - fs.download_to_stream(file_id, file) - file.seek(0) - contents = file.read() - - Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - - :Parameters: - - `file_id`: The _id of the file to be downloaded. - - `destination`: a file-like object implementing :meth:`write`. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - with self.open_download_stream(file_id, session=session) as gout: - for chunk in gout: - destination.write(chunk) - - @_csot.apply - def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: - """Given an file_id, delete this stored file's files collection document - and associated chunks from a GridFS bucket. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - # Get _id of file to delete - file_id = fs.upload_from_stream("test_file", "data I want to store!") - fs.delete(file_id) - - Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - - :Parameters: - - `file_id`: The _id of the file to be deleted. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - _disallow_transactions(session) - res = self._files.delete_one({"_id": file_id}, session=session) - self._chunks.delete_many({"files_id": file_id}, session=session) - if not res.deleted_count: - raise NoFile("no file could be deleted because none matched %s" % file_id) - - def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: - """Find and return the files collection documents that match ``filter`` - - Returns a cursor that iterates across files matching - arbitrary queries on the files collection. Can be combined - with other modifiers for additional control. - - For example:: - - for grid_data in fs.find({"filename": "lisa.txt"}, - no_cursor_timeout=True): - data = grid_data.read() - - would iterate through all versions of "lisa.txt" stored in GridFS. - Note that setting no_cursor_timeout to True may be important to - prevent the cursor from timing out during long multi-file processing - work. - - As another example, the call:: - - most_recent_three = fs.find().sort("uploadDate", -1).limit(3) - - would return a cursor to the three most recently uploaded files - in GridFS. - - Follows a similar interface to - :meth:`~pymongo.collection.Collection.find` - in :class:`~pymongo.collection.Collection`. - - If a :class:`~pymongo.client_session.ClientSession` is passed to - :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances - are associated with that session. - - :Parameters: - - `filter`: Search query. - - `batch_size` (optional): The number of documents to return per - batch. - - `limit` (optional): The maximum number of documents to return. - - `no_cursor_timeout` (optional): The server normally times out idle - cursors after an inactivity period (10 minutes) to prevent excess - memory use. Set this option to True prevent that. - - `skip` (optional): The number of documents to skip before - returning. - - `sort` (optional): The order by which to sort results. Defaults to - None. - """ - return GridOutCursor(self._collection, *args, **kwargs) - - def open_download_stream_by_name( - self, filename: str, revision: int = -1, session: Optional[ClientSession] = None - ) -> GridOut: - """Opens a Stream from which the application can read the contents of - `filename` and optional `revision`. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - grid_out = fs.open_download_stream_by_name("test_file") - contents = grid_out.read() - - Returns an instance of :class:`~gridfs.grid_file.GridOut`. - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - - Raises :exc:`~ValueError` filename is not a string. - - :Parameters: - - `filename`: The name of the file to read from. - - `revision` (optional): Which revision (documents with the same - filename and different uploadDate) of the file to retrieve. - Defaults to -1 (the most recent revision). - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - :Note: Revision numbers are defined as follows: - - - 0 = the original stored file - - 1 = the first revision - - 2 = the second revision - - etc... - - -2 = the second most recent revision - - -1 = the most recent revision - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - validate_string("filename", filename) - query = {"filename": filename} - _disallow_transactions(session) - cursor = self._files.find(query, session=session) - if revision < 0: - skip = abs(revision) - 1 - cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) - else: - cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) - try: - grid_file = next(cursor) - return GridOut(self._collection, file_document=grid_file, session=session) - except StopIteration: - raise NoFile("no version %d for filename %r" % (revision, filename)) - - @_csot.apply - def download_to_stream_by_name( - self, - filename: str, - destination: Any, - revision: int = -1, - session: Optional[ClientSession] = None, - ) -> None: - """Write the contents of `filename` (with optional `revision`) to - `destination`. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - # Get file to write to - file = open('myfile','wb') - fs.download_to_stream_by_name("test_file", file) - - Raises :exc:`~gridfs.errors.NoFile` if no such version of - that file exists. - - Raises :exc:`~ValueError` if `filename` is not a string. - - :Parameters: - - `filename`: The name of the file to read from. - - `destination`: A file-like object that implements :meth:`write`. - - `revision` (optional): Which revision (documents with the same - filename and different uploadDate) of the file to retrieve. - Defaults to -1 (the most recent revision). - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - :Note: Revision numbers are defined as follows: - - - 0 = the original stored file - - 1 = the first revision - - 2 = the second revision - - etc... - - -2 = the second most recent revision - - -1 = the most recent revision - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - with self.open_download_stream_by_name(filename, revision, session=session) as gout: - for chunk in gout: - destination.write(chunk) - - def rename( - self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None - ) -> None: - """Renames the stored file with the specified file_id. - - For example:: - - my_db = MongoClient().test - fs = GridFSBucket(my_db) - # Get _id of file to rename - file_id = fs.upload_from_stream("test_file", "data I want to store!") - fs.rename(file_id, "new_test_name") - - Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. - - :Parameters: - - `file_id`: The _id of the file to be renamed. - - `new_filename`: The new name of the file. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - _disallow_transactions(session) - result = self._files.update_one( - {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session - ) - if not result.matched_count: - raise NoFile( - "no files could be renamed %r because none " - "matched file_id %i" % (new_filename, file_id) - ) diff --git a/gridfs/asynchronous/__init__.py b/gridfs/asynchronous/__init__.py new file mode 100644 index 0000000000..0826145b11 --- /dev/null +++ b/gridfs/asynchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.asynchronous.grid_file import ( + AsyncGridFS, + AsyncGridFSBucket, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE + +__all__ = [ + "AsyncGridFS", + "AsyncGridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "AsyncGridIn", + "AsyncGridOut", + "AsyncGridOutCursor", +] diff --git a/gridfs/asynchronous/grid_file.py b/gridfs/asynchronous/grid_file.py new file mode 100644 index 0000000000..69a2200d3b --- /dev/null +++ b/gridfs/asynchronous/grid_file.py @@ -0,0 +1,2007 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for representing files stored in GridFS.""" +from __future__ import annotations + +import datetime +import inspect +import io +import math +from collections import abc +from typing import Any, Iterable, Mapping, NoReturn, Optional, cast + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file_shared import ( + _C_INDEX, + _CHUNK_OVERHEAD, + _F_INDEX, + _SEEK_CUR, + _SEEK_END, + _SEEK_SET, + _UPLOAD_BUFFER_CHUNKS, + _UPLOAD_BUFFER_SIZE, + DEFAULT_CHUNK_SIZE, + EMPTY, + NEWLN, + _a_grid_in_property, + _a_grid_out_property, + _clear_entity_type_registry, +) +from pymongo import ASCENDING, DESCENDING, WriteConcern, _csot +from pymongo.asynchronous.client_session import AsyncClientSession +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.common import validate_string +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.read_preferences import ReadPreference, _ServerMode + +_IS_SYNC = False + + +def _disallow_transactions(session: Optional[AsyncClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class AsyncGridFS: + """An instance of GridFS on top of a single Database.""" + + def __init__(self, database: AsyncDatabase[Any], collection: str = "fs"): + """Create a new instance of :class:`GridFS`. + + Raises :class:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + :param database: database to use + :param collection: root collection to use + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.1 + Indexes are only ensured on the first write to the DB. + + .. versionchanged:: 3.0 + `database` must use an acknowledged + :attr:`~pymongo.database.Database.write_concern` + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(database, AsyncDatabase): + raise TypeError(f"database must be an instance of Database, not {type(database)}") + + database = _clear_entity_type_registry(database) + + if not database.write_concern.acknowledged: + raise ConfigurationError("database must use acknowledged write_concern") + + self._collection = database[collection] + self._files = self._collection.files + self._chunks = self._collection.chunks + + def new_file(self, **kwargs: Any) -> AsyncGridIn: + """Create a new file in GridFS. + + Returns a new :class:`~gridfs.grid_file.GridIn` instance to + which data can be written. Any keyword arguments will be + passed through to :meth:`~gridfs.grid_file.GridIn`. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param kwargs: keyword arguments for file creation + """ + return AsyncGridIn(self._collection, **kwargs) + + async def put(self, data: Any, **kwargs: Any) -> Any: + """Put data in GridFS as a new file. + + Equivalent to doing:: + + with fs.new_file(**kwargs) as f: + f.write(data) + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see + :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the + ``"_id"`` of the created file. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation + + .. versionchanged:: 3.0 + w=0 writes to GridFS are now prohibited. + """ + async with AsyncGridIn(self._collection, **kwargs) as grid_file: + await grid_file.write(data) + return grid_file._id + + async def get(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> AsyncGridOut: + """Get a file from GridFS by ``"_id"``. + + Returns an instance of :class:`~gridfs.grid_file.GridOut`, + which provides a file-like interface for reading. + + :param file_id: ``"_id"`` of the file to get + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = AsyncGridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + await gout.open() + return gout + + async def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> AsyncGridOut: + """Get a file from GridFS by ``"filename"`` or metadata fields. + + Returns a version of the file in GridFS whose filename matches + `filename` and whose metadata fields match the supplied keyword + arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. + + Version numbering is a convenience atop the GridFS API provided + by MongoDB. If more than one file matches the query (either by + `filename` alone, by metadata fields, or by a combination of + both), then version ``-1`` will be the most recently uploaded + matching file, ``-2`` the second most recently + uploaded, etc. Version ``0`` will be the first version + uploaded, ``1`` the second version, etc. So if three versions + have been uploaded, then version ``0`` is the same as version + ``-3``, version ``1`` is the same as version ``-2``, and + version ``2`` is the same as version ``-1``. + + Raises :class:`~gridfs.errors.NoFile` if no such version of + that file exists. + + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults + to -1, the most recent version uploaded) + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``get_version`` no longer ensures indexes. + """ + query = kwargs + if filename is not None: + query["filename"] = filename + + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if version is None: + version = -1 + if version < 0: + skip = abs(version) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + try: + doc = await anext(cursor) + return AsyncGridOut(self._collection, file_document=doc, session=session) + except StopAsyncIteration: + raise NoFile("no version %d for filename %r" % (version, filename)) from None + + async def get_last_version( + self, + filename: Optional[str] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> AsyncGridOut: + """Get the most recent version of a file in GridFS by ``"filename"`` + or metadata fields. + + Equivalent to calling :meth:`get_version` with the default + `version` (``-1``). + + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return await self.get_version(filename=filename, session=session, **kwargs) + + # TODO add optional safe mode for chunk removal? + async def delete(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> None: + """Delete a file from GridFS by ``"_id"``. + + Deletes all data belonging to the file with ``"_id"``: + `file_id`. + + .. warning:: Any processes/threads reading from the file while + this method is executing will likely see an invalid/corrupt + file. Care should be taken to avoid concurrent reads to a file + while it is being deleted. + + .. note:: Deletes of non-existent files are considered successful + since the end result is the same: no file with that _id remains. + + :param file_id: ``"_id"`` of the file to delete + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``delete`` no longer ensures indexes. + """ + _disallow_transactions(session) + await self._files.delete_one({"_id": file_id}, session=session) + await self._chunks.delete_many({"files_id": file_id}, session=session) + + async def list(self, session: Optional[AsyncClientSession] = None) -> list[str]: + """List the names of all files stored in this instance of + :class:`GridFS`. + + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``list`` no longer ensures indexes. + """ + _disallow_transactions(session) + # With an index, distinct includes documents with no filename + # as None. + return [ + name + for name in await self._files.distinct("filename", session=session) + if name is not None + ] + + async def find_one( + self, + filter: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[AsyncGridOut]: + """Get a single file from gridfs. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, + or ``None`` if no matching file is found. For example: + + .. code-block: python + + file = fs.find_one({"filename": "lisa.txt"}) + + :param filter: a dictionary specifying + the query to be performing OR any other type to be used as + the value for a query for ``"_id"`` in the file collection. + :param args: any additional positional arguments are + the same as the arguments to :meth:`find`. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + + _disallow_transactions(session) + async for f in self.find(filter, *args, session=session, **kwargs): + return f + + return None + + def find(self, *args: Any, **kwargs: Any) -> AsyncGridOutCursor: + """Query GridFS for files. + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. For example:: + + for grid_out in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_out.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.AsyncClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: A query document that selects which files + to include in the result set. Can be an empty document to include + all files. + :param skip: the number of files to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~gridfs.grid_file.GridOutCursor` + corresponding to this query. + + .. versionchanged:: 3.0 + Removed the read_preference, tag_sets, and + secondary_acceptable_latency_ms options. + .. versionadded:: 2.7 + .. seealso:: The MongoDB documentation on `find `_. + """ + return AsyncGridOutCursor(self._collection, *args, **kwargs) + + async def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> bool: + """Check if a file exists in this instance of :class:`GridFS`. + + The file to check for can be specified by the value of its + ``_id`` key, or by passing in a query document. A query + document can be passed in as dictionary, or by using keyword + arguments. Thus, the following three calls are equivalent: + + >>> fs.exists(file_id) + >>> fs.exists({"_id": file_id}) + >>> fs.exists(_id=file_id) + + As are the following two calls: + + >>> fs.exists({"filename": "mike.txt"}) + >>> fs.exists(filename="mike.txt") + + And the following two: + + >>> fs.exists({"foo": {"$gt": 12}}) + >>> fs.exists(foo={"$gt": 12}) + + Returns ``True`` if a matching file exists, ``False`` + otherwise. Calls to :meth:`exists` will not automatically + create appropriate indexes; application developers should be + sure to create indexes if needed and as appropriate. + + :param document_or_id: query document, or _id of the + document to check for + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + :param kwargs: keyword arguments are used as a + query document, if they're present. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + if kwargs: + f = await self._files.find_one(kwargs, ["_id"], session=session) + else: + f = await self._files.find_one(document_or_id, ["_id"], session=session) + + return f is not None + + +class AsyncGridFSBucket: + """An instance of GridFS on top of a single Database.""" + + def __init__( + self, + db: AsyncDatabase[Any], + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: + """Create a new instance of :class:`GridFSBucket`. + + Raises :exc:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` + is not acknowledged. + + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults + to 255KB. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` + (the default) db.write_concern is used. + :param read_preference: The read preference to use. If + ``None`` (the default) db.read_preference is used. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionadded:: 3.1 + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(db, AsyncDatabase): + raise TypeError(f"database must be an instance of AsyncDatabase, not {type(db)}") + + db = _clear_entity_type_registry(db) + + wtc = write_concern if write_concern is not None else db.write_concern + if not wtc.acknowledged: + raise ConfigurationError("write concern must be acknowledged") + + self._bucket_name = bucket_name + self._collection = db[bucket_name] + self._chunks: AsyncCollection[Any] = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._files: AsyncCollection[Any] = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout + + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> AsyncGridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the filename, and can choose to add any + additional information in the metadata field of the file document or + modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream( + "test_file", chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return AsyncGridIn(self._collection, session=session, **opts) + + def open_upload_stream_with_id( + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> AsyncGridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the file id and filename, and can choose to add + any additional information in the metadata field of the file document + or modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream_with_id( + ObjectId(), + "test_file", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return AsyncGridIn(self._collection, session=session, **opts) + + @_csot.apply + async def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> ObjectId: + """Uploads a user file to a GridFS bucket. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Returns the _id of the uploaded file. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with self.open_upload_stream( + filename, chunk_size_bytes, metadata, session=session + ) as gin: + await gin.write(source) + + return cast(ObjectId, gin._id) + + @_csot.apply + async def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Uploads a user file to a GridFS bucket with a custom file id. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + ObjectId(), + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with self.open_upload_stream_with_id( + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: + await gin.write(source) + + async def open_download_stream( + self, file_id: Any, session: Optional[AsyncClientSession] = None + ) -> AsyncGridOut: + """Opens a Stream from which the application can read the contents of + the stored file specified by file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # get _id of file to read. + file_id = fs.upload_from_stream("test_file", "data I want to store!") + grid_out = fs.open_download_stream(file_id) + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = AsyncGridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + await gout.open() + return gout + + @_csot.apply + async def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[AsyncClientSession] = None + ) -> None: + """Downloads the contents of the stored file specified by file_id and + writes the contents to `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to read + file_id = fs.upload_from_stream("test_file", "data I want to store!") + # Get file to write to + file = open('myfile','wb+') + fs.download_to_stream(file_id, file) + file.seek(0) + contents = file.read() + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with await self.open_download_stream(file_id, session=session) as gout: + while True: + chunk = await gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + @_csot.apply + async def delete(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> None: + """Given an file_id, delete this stored file's files collection document + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to delete + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.delete(file_id) + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be deleted. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + res = await self._files.delete_one({"_id": file_id}, session=session) + await self._chunks.delete_many({"files_id": file_id}, session=session) + if not res.deleted_count: + raise NoFile("no file could be deleted because none matched %s" % file_id) + + @_csot.apply + async def delete_by_name( + self, filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] async for file in files] + res = await self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + await self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + + def find(self, *args: Any, **kwargs: Any) -> AsyncGridOutCursor: + """Find and return the files collection documents that match ``filter`` + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. + + For example:: + + for grid_data in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_data.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.AsyncClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: Search query. + :param batch_size: The number of documents to return per + batch. + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle + cursors after an inactivity period (10 minutes) to prevent excess + memory use. Set this option to True prevent that. + :param skip: The number of documents to skip before + returning. + :param sort: The order by which to sort results. Defaults to + None. + """ + return AsyncGridOutCursor(self._collection, *args, **kwargs) + + async def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[AsyncClientSession] = None + ) -> AsyncGridOut: + """Opens a Stream from which the application can read the contents of + `filename` and optional `revision`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_out = fs.open_download_stream_by_name("test_file") + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` filename is not a string. + + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + query = {"filename": filename} + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if revision < 0: + skip = abs(revision) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) + try: + grid_file = await anext(cursor) + return AsyncGridOut(self._collection, file_document=grid_file, session=session) + except StopAsyncIteration: + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + async def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Write the contents of `filename` (with optional `revision`) to + `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get file to write to + file = open('myfile','wb') + fs.download_to_stream_by_name("test_file", file) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + async with await self.open_download_stream_by_name( + filename, revision, session=session + ) as gout: + while True: + chunk = await gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + async def rename( + self, file_id: Any, new_filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Renames the stored file with the specified file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to rename + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.rename(file_id, "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + result = await self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) + + async def rename_by_name( + self, filename: str, new_filename: str, session: Optional[AsyncClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = AsyncMongoClient().test + fs = AsyncGridFSBucket(my_db) + await fs.upload_from_stream("test_file", "data I want to store!") + await fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.AsyncClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = await self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + + +class AsyncGridIn: + """Class to write data to GridFS.""" + + def __init__( + self, + root_collection: AsyncCollection[Any], + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> None: + """Write a file to GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Raises :class:`TypeError` if `root_collection` is not an + instance of :class:`~pymongo.collection.AsyncCollection`. + + Any of the file level options specified in the `GridFS Spec + `_ may be passed as + keyword arguments. Any additional keyword arguments will be + set as additional fields on the file document. Valid keyword + arguments include: + + - ``"_id"``: unique ID for this file (default: + :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must + not have already been used for another file + + - ``"filename"``: human name for the file + + - ``"contentType"`` or ``"content_type"``: valid mime-type + for the file + + - ``"chunkSize"`` or ``"chunk_size"``: size of each of the + chunks, in bytes (default: 255 kb) + + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. + + :param root_collection: root collection to write to + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` to use for all + commands + :param kwargs: Any: file level options (see above) + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + `root_collection` must use an acknowledged + :attr:`~pymongo.collection.AsyncCollection.write_concern` + """ + if not isinstance(root_collection, AsyncCollection): + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) + + if not root_collection.write_concern.acknowledged: + raise ConfigurationError("root_collection must use acknowledged write_concern") + _disallow_transactions(session) + + # Handle alternative naming + if "content_type" in kwargs: + kwargs["contentType"] = kwargs.pop("content_type") + if "chunk_size" in kwargs: + kwargs["chunkSize"] = kwargs.pop("chunk_size") + + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) + + # Defaults + kwargs["_id"] = kwargs.get("_id", ObjectId()) + kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) + object.__setattr__(self, "_session", session) + object.__setattr__(self, "_coll", coll) + object.__setattr__(self, "_chunks", coll.chunks) + object.__setattr__(self, "_file", kwargs) + object.__setattr__(self, "_buffer", io.BytesIO()) + object.__setattr__(self, "_position", 0) + object.__setattr__(self, "_chunk_number", 0) + object.__setattr__(self, "_closed", False) + object.__setattr__(self, "_ensured_index", False) + object.__setattr__(self, "_buffered_docs", []) + object.__setattr__(self, "_buffered_docs_size", 0) + + async def _create_index( + self, collection: AsyncCollection[Any], index_key: Any, unique: bool + ) -> None: + doc = await collection.find_one(projection={"_id": 1}, session=self._session) + if doc is None: + try: + index_keys = [ + index_spec["key"] + async for index_spec in await collection.list_indexes(session=self._session) + ] + except OperationFailure: + index_keys = [] + if index_key not in index_keys: + await collection.create_index( + index_key.items(), unique=unique, session=self._session + ) + + async def _ensure_indexes(self) -> None: + if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) + await self._create_index(self._coll.files, _F_INDEX, False) + await self._create_index(self._coll.chunks, _C_INDEX, True) + object.__setattr__(self, "_ensured_index", True) + + async def abort(self) -> None: + """Remove all chunks/files that may have been uploaded and close.""" + await self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + await self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) + object.__setattr__(self, "_closed", True) + + @property + def closed(self) -> bool: + """Is this file closed?""" + return self._closed + + _id: Any = _a_grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) + filename: Optional[str] = _a_grid_in_property("filename", "Name of this file.") + name: Optional[str] = _a_grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _a_grid_in_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _a_grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _a_grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _a_grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _a_grid_in_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + closed_only=True, + ) + + _buffer: io.BytesIO + _closed: bool + _buffered_docs: list[dict[str, Any]] + _buffered_docs_size: int + + def __getattr__(self, name: str) -> Any: + if name == "_coll": + return object.__getattribute__(self, name) + elif name in self._file: + return self._file[name] + raise AttributeError("GridIn object has no attribute '%s'" % name) + + def __setattr__(self, name: str, value: Any) -> None: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + # All other attributes are part of the document in db.fs.files. + # Store them to be sent to server on close() or if closed, send + # them now. + self._file[name] = value + if self._closed: + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + + async def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + await self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + + async def _flush_data(self, data: Any, force: bool = False) -> None: + """Flush `data` to a chunk.""" + await self._ensure_indexes() + assert len(data) <= self.chunk_size + if data: + self._buffered_docs.append( + {"files_id": self._file["_id"], "n": self._chunk_number, "data": data} + ) + self._buffered_docs_size += len(data) + _CHUNK_OVERHEAD + if not self._buffered_docs: + return + # Limit to 100,000 chunks or 32MB (+1 chunk) of data. + if ( + force + or self._buffered_docs_size >= _UPLOAD_BUFFER_SIZE + or len(self._buffered_docs) >= _UPLOAD_BUFFER_CHUNKS + ): + try: + await self._chunks.insert_many(self._buffered_docs, session=self._session) + except BulkWriteError as exc: + # For backwards compatibility, raise an insert_one style exception. + write_errors = exc.details["writeErrors"] + for err in write_errors: + if err.get("code") in (11000, 11001, 12582): # Duplicate key errors + self._raise_file_exists(self._file["_id"]) + result = {"writeErrors": write_errors} + wces = exc.details["writeConcernErrors"] + if wces: + result["writeConcernError"] = wces[-1] + _check_write_command_response(result) + raise + self._buffered_docs = [] + self._buffered_docs_size = 0 + self._chunk_number += 1 + self._position += len(data) + + async def _flush_buffer(self, force: bool = False) -> None: + """Flush the buffer contents out to a chunk.""" + await self._flush_data(self._buffer.getvalue(), force=force) + self._buffer.close() + self._buffer = io.BytesIO() + + async def _flush(self) -> Any: + """Flush the file to the database.""" + try: + await self._flush_buffer(force=True) + # The GridFS spec says length SHOULD be an Int64. + self._file["length"] = Int64(self._position) + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) + + return await self._coll.files.insert_one(self._file, session=self._session) + except DuplicateKeyError: + self._raise_file_exists(self._id) + + def _raise_file_exists(self, file_id: Any) -> NoReturn: + """Raise a FileExists exception for the given file_id.""" + raise FileExists("file with _id %r already exists" % file_id) + + async def close(self) -> None: + """Flush the file and close it. + + A closed file cannot be written any more. Calling + :meth:`close` more than once is allowed. + """ + if not self._closed: + await self._flush() + object.__setattr__(self, "_closed", True) + + def read(self, size: int = -1) -> NoReturn: + raise io.UnsupportedOperation("read") + + def readable(self) -> bool: + return False + + def seekable(self) -> bool: + return False + + async def write(self, data: Any) -> None: + """Write data to the file. There is no return value. + + `data` can be either a string of bytes or a file-like object + (implementing :meth:`read`). If the file has an + :attr:`encoding` attribute, `data` can also be a + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. + + Due to buffering, the data may not actually be written to the + database until the :meth:`close` method is called. Raises + :class:`ValueError` if this file is already closed. Raises + :class:`TypeError` if `data` is not an instance of + :class:`bytes`, a file-like object, or an instance of :class:`str`. + Unicode data is only allowed if the file has an :attr:`encoding` + attribute. + + :param data: string of bytes or file-like object to be written + to the file + """ + if self._closed: + raise ValueError("cannot write to a closed file") + + try: + # file-like + read = data.read + except AttributeError: + # string + if not isinstance(data, (str, bytes)): + raise TypeError("can only write strings or file-like objects") from None + if isinstance(data, str): + try: + data = data.encode(self.encoding) + except AttributeError: + raise TypeError( + "must specify an encoding for file in order to write str" + ) from None + read = io.BytesIO(data).read + + if inspect.iscoroutinefunction(read): + await self._write_async(read) + else: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + await self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + await self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + await self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + async def _write_async(self, read: Any) -> None: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = await read(space) + except BaseException: + await self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + await self._flush_buffer() + to_write = await read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + await self._flush_data(to_write) + to_write = await read(self.chunk_size) + self._buffer.write(to_write) + + async def writelines(self, sequence: Iterable[Any]) -> None: + """Write a sequence of strings to the file. + + Does not add separators. + """ + for line in sequence: + await self.write(line) + + def writeable(self) -> bool: + return True + + async def __aenter__(self) -> AsyncGridIn: + """Support for the context manager protocol.""" + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Support for the context manager protocol. + + Close the file if no exceptions occur and allow exceptions to propagate. + """ + if exc_type is None: + # No exceptions happened. + await self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) + + # propagate exceptions + return False + + +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class AsyncGridOut(GRIDOUT_BASE_CLASS): # type: ignore + + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: AsyncCollection[Any], + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Read a file from GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Either `file_id` or `file_document` must be specified, + `file_document` will be given priority if present. Raises + :class:`TypeError` if `root_collection` is not an instance of + :class:`~pymongo.collection.AsyncCollection`. + + :param root_collection: root collection to read from + :param file_id: value of ``"_id"`` for the file to read + :param file_document: file document from + `root_collection.files` + :param session: a + :class:`~pymongo.client_session.AsyncClientSession` to use for all + commands + + .. versionchanged:: 3.8 + For better performance and to better follow the GridFS spec, + :class:`GridOut` now uses a single cursor to read all the chunks in + the file. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Creating a GridOut does not immediately retrieve the file metadata + from the server. Metadata is fetched when first needed. + """ + if not isinstance(root_collection, AsyncCollection): + raise TypeError( + f"root_collection must be an instance of AsyncCollection, not {type(root_collection)}" + ) + _disallow_transactions(session) + + root_collection = _clear_entity_type_registry(root_collection) + + super().__init__() + + self._chunks = root_collection.chunks + self._files = root_collection.files + self._file_id = file_id + self._buffer = EMPTY + # Start position within the current buffered chunk. + self._buffer_pos = 0 + self._chunk_iter = None + # Position within the total file. + self._position = 0 + self._file = file_document + self._session = session + if not _IS_SYNC: + self.closed = False + + _id: Any = _a_grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _a_grid_out_property("filename", "Name of this file.") + name: str = _a_grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _a_grid_out_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _a_grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _a_grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _a_grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) + aliases: Optional[list[str]] = _a_grid_out_property( + "aliases", "DEPRECATED, will be removed in PyMongo 5.0. List of aliases for this file." + ) + metadata: Optional[Mapping[str, Any]] = _a_grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _a_grid_out_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + ) + + _file: Any + _chunk_iter: Any + + if not _IS_SYNC: + closed: bool + + async def __anext__(self) -> bytes: + line = await self.readline() + if line: + return line + raise StopAsyncIteration() + + async def to_list(self) -> list[bytes]: + return [x async for x in self] # noqa: C416, RUF100 + + async def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return await self._read_size_or_line(size=size, line=True) + + async def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + await self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = await self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines + + async def open(self) -> None: + if not self._file: + _disallow_transactions(self._session) + self._file = await self._files.find_one({"_id": self._file_id}, session=self._session) + if not self._file: + raise NoFile( + f"no file in gridfs collection {self._files!r} with _id {self._file_id!r}" + ) + + def __getattr__(self, name: str) -> Any: + if _IS_SYNC: + self.open() # type: ignore[unused-coroutine] + elif not self._file: + raise InvalidOperation( + "You must call AsyncGridOut.open() before accessing the %s property" % name + ) + if name in self._file: + return self._file[name] + raise AttributeError("GridOut object has no attribute '%s'" % name) + + def readable(self) -> bool: + return True + + async def readchunk(self) -> bytes: + """Reads a chunk at a time. If the current position is within a + chunk the remainder of the chunk is returned. + """ + await self.open() + received = len(self._buffer) - self._buffer_pos + chunk_data = EMPTY + chunk_size = int(self.chunk_size) + + if received > 0: + chunk_data = self._buffer[self._buffer_pos :] + elif self._position < int(self.length): + chunk_number = int((received + self._position) / chunk_size) + if self._chunk_iter is None: + self._chunk_iter = _AsyncGridOutChunkIterator( + self, self._chunks, self._session, chunk_number + ) + + chunk = await self._chunk_iter.next() + chunk_data = chunk["data"][self._position % chunk_size :] + + if not chunk_data: + raise CorruptGridFile("truncated chunk") + + self._position += len(chunk_data) + self._buffer = EMPTY + self._buffer_pos = 0 + return chunk_data + + async def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" + await self.open() + remainder = int(self.length) - self._position + if size < 0 or size > remainder: + size = remainder + + if size == 0: + return EMPTY + + received = 0 + data = [] + while received < size: + needed = size - received + if self._buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self._buffer + chunk_start = self._buffer_pos + chunk_data = memoryview(buf)[self._buffer_pos :] + self._buffer = EMPTY + self._buffer_pos = 0 + self._position += len(chunk_data) + else: + buf = await self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self._buffer = buf + self._buffer_pos = chunk_start + needed + self._position -= len(self._buffer) - self._buffer_pos + else: + data.append(chunk_data) + received += len(chunk_data) + + # Detect extra chunks after reading the entire file. + if size == remainder and self._chunk_iter: + try: + await self._chunk_iter.next() + except StopAsyncIteration: + pass + + return b"".join(data) + + async def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`bytes` + If `size` is negative or omitted all data is read. + + :param size: the number of bytes to read + + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return await self._read_size_or_line(size=size) + + def tell(self) -> int: + """Return the current position of this file.""" + return self._position + + async def seek(self, pos: int, whence: int = _SEEK_SET) -> int: + """Set the current position of this file. + + :param pos: the position (or offset if using relative + positioning) to seek to + :param whence: where to seek + from. :attr:`os.SEEK_SET` (``0``) for absolute file + positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative + to the current position, :attr:`os.SEEK_END` (``2``) to + seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. + """ + if whence == _SEEK_SET: + new_pos = pos + elif whence == _SEEK_CUR: + new_pos = self._position + pos + elif whence == _SEEK_END: + new_pos = int(self.length) + pos + else: + raise OSError(22, "Invalid value for `whence`") + + if new_pos < 0: + raise OSError(22, "Invalid value for `pos` - must be positive") + + # Optimization, continue using the same buffer and chunk iterator. + if new_pos == self._position: + return new_pos + + self._position = new_pos + self._buffer = EMPTY + self._buffer_pos = 0 + if self._chunk_iter: + await self._chunk_iter.close() + self._chunk_iter = None + return new_pos + + def seekable(self) -> bool: + return True + + def __aiter__(self) -> AsyncGridOut: + """Return an iterator over all of this file's data. + + The iterator will return lines (delimited by ``b'\\n'``) of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. + + .. versionchanged:: 3.8 + The iterator now raises :class:`CorruptGridFile` when encountering + any truncated, missing, or extra chunk in a file. The previous + behavior was to only raise :class:`CorruptGridFile` on a missing + chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. + """ + return self + + async def close(self) -> None: + """Make GridOut more generically file-like.""" + if self._chunk_iter: + await self._chunk_iter.close() + self._chunk_iter = None + if _IS_SYNC: + super().close() + else: + self.closed = True + + def write(self, value: Any) -> NoReturn: + raise io.UnsupportedOperation("write") + + def writelines(self, lines: Any) -> NoReturn: + raise io.UnsupportedOperation("writelines") + + def writable(self) -> bool: + return False + + async def __aenter__(self) -> AsyncGridOut: + """Makes it possible to use :class:`AsyncGridOut` files + with the async context manager protocol. + """ + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Makes it possible to use :class:`AsyncGridOut` files + with the async context manager protocol. + """ + await self.close() + return False + + def fileno(self) -> NoReturn: + raise io.UnsupportedOperation("fileno") + + def flush(self) -> None: + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self) -> bool: + return False + + def truncate(self, size: Optional[int] = None) -> NoReturn: + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation("truncate") + + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self) -> None: + pass + + +class _AsyncGridOutChunkIterator: + """Iterates over a file's chunks using a single cursor. + + Raises CorruptGridFile when encountering any truncated, missing, or extra + chunk in a file. + """ + + def __init__( + self, + grid_out: AsyncGridOut, + chunks: AsyncCollection[Any], + session: Optional[AsyncClientSession], + next_chunk: Any, + ) -> None: + self._id = grid_out._id + self._chunk_size = int(grid_out.chunk_size) + self._length = int(grid_out.length) + self._chunks = chunks + self._session = session + self._next_chunk = next_chunk + self._num_chunks = math.ceil(float(self._length) / self._chunk_size) + self._cursor = None + + _cursor: Optional[AsyncCursor[Any]] + + def expected_chunk_length(self, chunk_n: int) -> int: + if chunk_n < self._num_chunks - 1: + return self._chunk_size + return self._length - (self._chunk_size * (self._num_chunks - 1)) + + def __aiter__(self) -> _AsyncGridOutChunkIterator: + return self + + def _create_cursor(self) -> None: + filter = {"files_id": self._id} + if self._next_chunk > 0: + filter["n"] = {"$gte": self._next_chunk} + _disallow_transactions(self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) + + async def _next_with_retry(self) -> Mapping[str, Any]: + """Return the next chunk and retry once on CursorNotFound. + + We retry on CursorNotFound to maintain backwards compatibility in + cases where two calls to read occur more than 10 minutes apart (the + server's default cursor timeout). + """ + if self._cursor is None: + self._create_cursor() + assert self._cursor is not None + try: + return await self._cursor.next() + except CursorNotFound: + await self._cursor.close() + self._create_cursor() + return await self._cursor.next() + + async def next(self) -> Mapping[str, Any]: + try: + chunk = await self._next_with_retry() + except StopAsyncIteration: + if self._next_chunk >= self._num_chunks: + raise + raise CorruptGridFile("no chunk #%d" % self._next_chunk) from None + + if chunk["n"] != self._next_chunk: + await self.close() + raise CorruptGridFile( + "Missing chunk: expected chunk #%d but found " + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) + + if chunk["n"] >= self._num_chunks: + # According to spec, ignore extra chunks if they are empty. + if len(chunk["data"]): + await self.close() + raise CorruptGridFile( + "Extra chunk found: expected %d chunks but found " + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) + + expected_length = self.expected_chunk_length(chunk["n"]) + if len(chunk["data"]) != expected_length: + await self.close() + raise CorruptGridFile( + "truncated chunk #%d: expected chunk length to be %d but " + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) + + self._next_chunk += 1 + return chunk + + __anext__ = next + + async def close(self) -> None: + if self._cursor: + await self._cursor.close() + self._cursor = None + + +class AsyncGridOutIterator: + def __init__( + self, grid_out: AsyncGridOut, chunks: AsyncCollection[Any], session: AsyncClientSession + ): + self._chunk_iter = _AsyncGridOutChunkIterator(grid_out, chunks, session, 0) + + def __aiter__(self) -> AsyncGridOutIterator: + return self + + async def next(self) -> bytes: + chunk = await self._chunk_iter.next() + return bytes(chunk["data"]) + + __anext__ = next + + +class AsyncGridOutCursor(AsyncCursor): # type: ignore[type-arg] + """A cursor / iterator for returning GridOut objects as the result + of an arbitrary query against the GridFS files collection. + """ + + def __init__( + self, + collection: AsyncCollection[Any], + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[AsyncClientSession] = None, + ) -> None: + """Create a new cursor, similar to the normal + :class:`~pymongo.cursor.Cursor`. + + Should not be called directly by application developers - see + the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. + + .. versionadded 2.7 + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + _disallow_transactions(session) + collection = _clear_entity_type_registry(collection) + + # Hold on to the base "fs" collection to create GridOut objects later. + self._root_collection = collection + + super().__init__( + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) + + async def next(self) -> AsyncGridOut: + """Get next GridOut object from cursor.""" + _disallow_transactions(self.session) + next_file = await super().next() + return AsyncGridOut(self._root_collection, file_document=next_file, session=self.session) + + async def to_list(self, length: Optional[int] = None) -> list[AsyncGridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x async for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(await self.next()) + return ret + + __anext__ = next + + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncGridOutCursor: + """Creates an empty GridOutCursor for information to be copied into.""" + return AsyncGridOutCursor(self._root_collection, session=session) diff --git a/gridfs/errors.py b/gridfs/errors.py index 39736d55b3..e8c02cef4f 100644 --- a/gridfs/errors.py +++ b/gridfs/errors.py @@ -13,6 +13,7 @@ # limitations under the License. """Exceptions raised by the :mod:`gridfs` package""" +from __future__ import annotations from pymongo.errors import PyMongoError diff --git a/gridfs/grid_file.py b/gridfs/grid_file.py index cec7d57a22..b2cab71515 100644 --- a/gridfs/grid_file.py +++ b/gridfs/grid_file.py @@ -1,4 +1,4 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,896 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for representing files stored in GridFS.""" -import datetime -import io -import math -import os -from typing import Any, Iterable, List, Mapping, NoReturn, Optional +"""Re-import of synchronous gridfs API for compatibility.""" +from __future__ import annotations -from bson.binary import Binary -from bson.int64 import Int64 -from bson.objectid import ObjectId -from bson.son import SON -from gridfs.errors import CorruptGridFile, FileExists, NoFile -from pymongo import ASCENDING -from pymongo.client_session import ClientSession -from pymongo.collection import Collection -from pymongo.cursor import Cursor -from pymongo.errors import ( - ConfigurationError, - CursorNotFound, - DuplicateKeyError, - InvalidOperation, - OperationFailure, -) -from pymongo.read_preferences import ReadPreference - -_SEEK_SET = os.SEEK_SET -_SEEK_CUR = os.SEEK_CUR -_SEEK_END = os.SEEK_END - -EMPTY = b"" -NEWLN = b"\n" - -"""Default chunk size, in bytes.""" -# Slightly under a power of 2, to work well with server's record allocations. -DEFAULT_CHUNK_SIZE = 255 * 1024 - -_C_INDEX: SON[str, Any] = SON([("files_id", ASCENDING), ("n", ASCENDING)]) -_F_INDEX: SON[str, Any] = SON([("filename", ASCENDING), ("uploadDate", ASCENDING)]) - - -def _grid_in_property( - field_name: str, - docstring: str, - read_only: Optional[bool] = False, - closed_only: Optional[bool] = False, -) -> Any: - """Create a GridIn property.""" - - def getter(self: Any) -> Any: - if closed_only and not self._closed: - raise AttributeError("can only get %r on a closed file" % field_name) - # Protect against PHP-237 - if field_name == "length": - return self._file.get(field_name, 0) - return self._file.get(field_name, None) - - def setter(self: Any, value: Any) -> Any: - if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) - self._file[field_name] = value - - if read_only: - docstring += "\n\nThis attribute is read-only." - elif closed_only: - docstring = "%s\n\n%s" % ( - docstring, - "This attribute is read-only and " - "can only be read after :meth:`close` " - "has been called.", - ) - - if not read_only and not closed_only: - return property(getter, setter, doc=docstring) - return property(getter, doc=docstring) - - -def _grid_out_property(field_name: str, docstring: str) -> Any: - """Create a GridOut property.""" - - def getter(self: Any) -> Any: - self._ensure_file() - - # Protect against PHP-237 - if field_name == "length": - return self._file.get(field_name, 0) - return self._file.get(field_name, None) - - docstring += "\n\nThis attribute is read-only." - return property(getter, doc=docstring) - - -def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: - """Clear the given database/collection object's type registry.""" - codecopts = entity.codec_options.with_options(type_registry=None) - return entity.with_options(codec_options=codecopts, **kwargs) - - -def _disallow_transactions(session: Optional[ClientSession]) -> None: - if session and session.in_transaction: - raise InvalidOperation("GridFS does not support multi-document transactions") - - -class GridIn(object): - """Class to write data to GridFS.""" - - def __init__( - self, root_collection: Collection, session: Optional[ClientSession] = None, **kwargs: Any - ) -> None: - """Write a file to GridFS - - Application developers should generally not need to - instantiate this class directly - instead see the methods - provided by :class:`~gridfs.GridFS`. - - Raises :class:`TypeError` if `root_collection` is not an - instance of :class:`~pymongo.collection.Collection`. - - Any of the file level options specified in the `GridFS Spec - `_ may be passed as - keyword arguments. Any additional keyword arguments will be - set as additional fields on the file document. Valid keyword - arguments include: - - - ``"_id"``: unique ID for this file (default: - :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must - not have already been used for another file - - - ``"filename"``: human name for the file - - - ``"contentType"`` or ``"content_type"``: valid mime-type - for the file - - - ``"chunkSize"`` or ``"chunk_size"``: size of each of the - chunks, in bytes (default: 255 kb) - - - ``"encoding"``: encoding used for this file. Any :class:`str` - that is written to the file will be converted to :class:`bytes`. - - :Parameters: - - `root_collection`: root collection to write to - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` to use for all - commands - - `**kwargs: Any` (optional): file level options (see above) - - .. versionchanged:: 4.0 - Removed the `disable_md5` parameter. See - :ref:`removed-gridfs-checksum` for details. - - .. versionchanged:: 3.7 - Added the `disable_md5` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.0 - `root_collection` must use an acknowledged - :attr:`~pymongo.collection.Collection.write_concern` - """ - if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an instance of Collection") - - if not root_collection.write_concern.acknowledged: - raise ConfigurationError("root_collection must use acknowledged write_concern") - _disallow_transactions(session) - - # Handle alternative naming - if "content_type" in kwargs: - kwargs["contentType"] = kwargs.pop("content_type") - if "chunk_size" in kwargs: - kwargs["chunkSize"] = kwargs.pop("chunk_size") - - coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) - - # Defaults - kwargs["_id"] = kwargs.get("_id", ObjectId()) - kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) - object.__setattr__(self, "_session", session) - object.__setattr__(self, "_coll", coll) - object.__setattr__(self, "_chunks", coll.chunks) - object.__setattr__(self, "_file", kwargs) - object.__setattr__(self, "_buffer", io.BytesIO()) - object.__setattr__(self, "_position", 0) - object.__setattr__(self, "_chunk_number", 0) - object.__setattr__(self, "_closed", False) - object.__setattr__(self, "_ensured_index", False) - - def __create_index(self, collection: Collection, index_key: Any, unique: bool) -> None: - doc = collection.find_one(projection={"_id": 1}, session=self._session) - if doc is None: - try: - index_keys = [ - index_spec["key"] - for index_spec in collection.list_indexes(session=self._session) - ] - except OperationFailure: - index_keys = [] - if index_key not in index_keys: - collection.create_index(index_key.items(), unique=unique, session=self._session) - - def __ensure_indexes(self) -> None: - if not object.__getattribute__(self, "_ensured_index"): - _disallow_transactions(self._session) - self.__create_index(self._coll.files, _F_INDEX, False) - self.__create_index(self._coll.chunks, _C_INDEX, True) - object.__setattr__(self, "_ensured_index", True) - - def abort(self) -> None: - """Remove all chunks/files that may have been uploaded and close.""" - self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) - self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) - object.__setattr__(self, "_closed", True) - - @property - def closed(self) -> bool: - """Is this file closed?""" - return self._closed - - _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) - filename: Optional[str] = _grid_in_property("filename", "Name of this file.") - name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") - content_type: Optional[str] = _grid_in_property("contentType", "Mime-type for this file.") - length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) - chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) - upload_date: datetime.datetime = _grid_in_property( - "uploadDate", "Date that this file was uploaded.", closed_only=True - ) - md5: Optional[str] = _grid_in_property( - "md5", "MD5 of the contents of this file if an md5 sum was created.", closed_only=True - ) - - _buffer: io.BytesIO - _closed: bool - - def __getattr__(self, name: str) -> Any: - if name in self._file: - return self._file[name] - raise AttributeError("GridIn object has no attribute '%s'" % name) - - def __setattr__(self, name: str, value: Any) -> None: - # For properties of this instance like _buffer, or descriptors set on - # the class like filename, use regular __setattr__ - if name in self.__dict__ or name in self.__class__.__dict__: - object.__setattr__(self, name, value) - else: - # All other attributes are part of the document in db.fs.files. - # Store them to be sent to server on close() or if closed, send - # them now. - self._file[name] = value - if self._closed: - self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) - - def __flush_data(self, data: Any) -> None: - """Flush `data` to a chunk.""" - self.__ensure_indexes() - if not data: - return - assert len(data) <= self.chunk_size - - chunk = {"files_id": self._file["_id"], "n": self._chunk_number, "data": Binary(data)} - - try: - self._chunks.insert_one(chunk, session=self._session) - except DuplicateKeyError: - self._raise_file_exists(self._file["_id"]) - self._chunk_number += 1 - self._position += len(data) - - def __flush_buffer(self) -> None: - """Flush the buffer contents out to a chunk.""" - self.__flush_data(self._buffer.getvalue()) - self._buffer.close() - self._buffer = io.BytesIO() - - def __flush(self) -> Any: - """Flush the file to the database.""" - try: - self.__flush_buffer() - # The GridFS spec says length SHOULD be an Int64. - self._file["length"] = Int64(self._position) - self._file["uploadDate"] = datetime.datetime.utcnow() - - return self._coll.files.insert_one(self._file, session=self._session) - except DuplicateKeyError: - self._raise_file_exists(self._id) - - def _raise_file_exists(self, file_id: Any) -> NoReturn: - """Raise a FileExists exception for the given file_id.""" - raise FileExists("file with _id %r already exists" % file_id) - - def close(self) -> None: - """Flush the file and close it. - - A closed file cannot be written any more. Calling - :meth:`close` more than once is allowed. - """ - if not self._closed: - self.__flush() - object.__setattr__(self, "_closed", True) - - def read(self, size: int = -1) -> NoReturn: - raise io.UnsupportedOperation("read") - - def readable(self) -> bool: - return False - - def seekable(self) -> bool: - return False - - def write(self, data: Any) -> None: - """Write data to the file. There is no return value. - - `data` can be either a string of bytes or a file-like object - (implementing :meth:`read`). If the file has an - :attr:`encoding` attribute, `data` can also be a - :class:`str` instance, which will be encoded as - :attr:`encoding` before being written. - - Due to buffering, the data may not actually be written to the - database until the :meth:`close` method is called. Raises - :class:`ValueError` if this file is already closed. Raises - :class:`TypeError` if `data` is not an instance of - :class:`bytes`, a file-like object, or an instance of :class:`str`. - Unicode data is only allowed if the file has an :attr:`encoding` - attribute. - - :Parameters: - - `data`: string of bytes or file-like object to be written - to the file - """ - if self._closed: - raise ValueError("cannot write to a closed file") - - try: - # file-like - read = data.read - except AttributeError: - # string - if not isinstance(data, (str, bytes)): - raise TypeError("can only write strings or file-like objects") - if isinstance(data, str): - try: - data = data.encode(self.encoding) - except AttributeError: - raise TypeError("must specify an encoding for file in order to write str") - read = io.BytesIO(data).read - - if self._buffer.tell() > 0: - # Make sure to flush only when _buffer is complete - space = self.chunk_size - self._buffer.tell() - if space: - try: - to_write = read(space) - except BaseException: - self.abort() - raise - self._buffer.write(to_write) - if len(to_write) < space: - return # EOF or incomplete - self.__flush_buffer() - to_write = read(self.chunk_size) - while to_write and len(to_write) == self.chunk_size: - self.__flush_data(to_write) - to_write = read(self.chunk_size) - self._buffer.write(to_write) - - def writelines(self, sequence: Iterable[Any]) -> None: - """Write a sequence of strings to the file. - - Does not add seperators. - """ - for line in sequence: - self.write(line) - - def writeable(self) -> bool: - return True - - def __enter__(self) -> "GridIn": - """Support for the context manager protocol.""" - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: - """Support for the context manager protocol. - - Close the file if no exceptions occur and allow exceptions to propagate. - """ - if exc_type is None: - # No exceptions happened. - self.close() - else: - # Something happened, at minimum mark as closed. - object.__setattr__(self, "_closed", True) - - # propagate exceptions - return False - - -class GridOut(io.IOBase): - """Class to read data out of GridFS.""" - - def __init__( - self, - root_collection: Collection, - file_id: Optional[int] = None, - file_document: Optional[Any] = None, - session: Optional[ClientSession] = None, - ) -> None: - """Read a file from GridFS - - Application developers should generally not need to - instantiate this class directly - instead see the methods - provided by :class:`~gridfs.GridFS`. - - Either `file_id` or `file_document` must be specified, - `file_document` will be given priority if present. Raises - :class:`TypeError` if `root_collection` is not an instance of - :class:`~pymongo.collection.Collection`. - - :Parameters: - - `root_collection`: root collection to read from - - `file_id` (optional): value of ``"_id"`` for the file to read - - `file_document` (optional): file document from - `root_collection.files` - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` to use for all - commands - - .. versionchanged:: 3.8 - For better performance and to better follow the GridFS spec, - :class:`GridOut` now uses a single cursor to read all the chunks in - the file. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.0 - Creating a GridOut does not immediately retrieve the file metadata - from the server. Metadata is fetched when first needed. - """ - if not isinstance(root_collection, Collection): - raise TypeError("root_collection must be an instance of Collection") - _disallow_transactions(session) - - root_collection = _clear_entity_type_registry(root_collection) - - super().__init__() - - self.__chunks = root_collection.chunks - self.__files = root_collection.files - self.__file_id = file_id - self.__buffer = EMPTY - self.__chunk_iter = None - self.__position = 0 - self._file = file_document - self._session = session - - _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") - filename: str = _grid_out_property("filename", "Name of this file.") - name: str = _grid_out_property("filename", "Alias for `filename`.") - content_type: Optional[str] = _grid_out_property("contentType", "Mime-type for this file.") - length: int = _grid_out_property("length", "Length (in bytes) of this file.") - chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") - upload_date: datetime.datetime = _grid_out_property( - "uploadDate", "Date that this file was first uploaded." - ) - aliases: Optional[List[str]] = _grid_out_property("aliases", "List of aliases for this file.") - metadata: Optional[Mapping[str, Any]] = _grid_out_property( - "metadata", "Metadata attached to this file." - ) - md5: Optional[str] = _grid_out_property( - "md5", "MD5 of the contents of this file if an md5 sum was created." - ) - - _file: Any - __chunk_iter: Any - - def _ensure_file(self) -> None: - if not self._file: - _disallow_transactions(self._session) - self._file = self.__files.find_one({"_id": self.__file_id}, session=self._session) - if not self._file: - raise NoFile( - "no file in gridfs collection %r with _id %r" % (self.__files, self.__file_id) - ) - - def __getattr__(self, name: str) -> Any: - self._ensure_file() - if name in self._file: - return self._file[name] - raise AttributeError("GridOut object has no attribute '%s'" % name) - - def readable(self) -> bool: - return True - - def readchunk(self) -> bytes: - """Reads a chunk at a time. If the current position is within a - chunk the remainder of the chunk is returned. - """ - received = len(self.__buffer) - chunk_data = EMPTY - chunk_size = int(self.chunk_size) - - if received > 0: - chunk_data = self.__buffer - elif self.__position < int(self.length): - chunk_number = int((received + self.__position) / chunk_size) - if self.__chunk_iter is None: - self.__chunk_iter = _GridOutChunkIterator( - self, self.__chunks, self._session, chunk_number - ) - - chunk = self.__chunk_iter.next() - chunk_data = chunk["data"][self.__position % chunk_size :] - - if not chunk_data: - raise CorruptGridFile("truncated chunk") - - self.__position += len(chunk_data) - self.__buffer = EMPTY - return chunk_data - - def read(self, size: int = -1) -> bytes: - """Read at most `size` bytes from the file (less if there - isn't enough data). - - The bytes are returned as an instance of :class:`str` (:class:`bytes` - in python 3). If `size` is negative or omitted all data is read. - - :Parameters: - - `size` (optional): the number of bytes to read - - .. versionchanged:: 3.8 - This method now only checks for extra chunks after reading the - entire file. Previously, this method would check for extra chunks - on every call. - """ - self._ensure_file() - - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - if size == 0: - return EMPTY - - received = 0 - data = io.BytesIO() - while received < size: - chunk_data = self.readchunk() - received += len(chunk_data) - data.write(chunk_data) - - # Detect extra chunks after reading the entire file. - if size == remainder and self.__chunk_iter: - try: - self.__chunk_iter.next() - except StopIteration: - pass - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) - - def readline(self, size: int = -1) -> bytes: # type: ignore[override] - """Read one line or up to `size` bytes from the file. - - :Parameters: - - `size` (optional): the maximum number of bytes to read - """ - remainder = int(self.length) - self.__position - if size < 0 or size > remainder: - size = remainder - - if size == 0: - return EMPTY - - received = 0 - data = io.BytesIO() - while received < size: - chunk_data = self.readchunk() - pos = chunk_data.find(NEWLN, 0, size) - if pos != -1: - size = received + pos + 1 - - received += len(chunk_data) - data.write(chunk_data) - if pos != -1: - break - - self.__position -= received - size - - # Return 'size' bytes and store the rest. - data.seek(size) - self.__buffer = data.read() - data.seek(0) - return data.read(size) - - def tell(self) -> int: - """Return the current position of this file.""" - return self.__position - - def seek(self, pos: int, whence: int = _SEEK_SET) -> int: - """Set the current position of this file. - - :Parameters: - - `pos`: the position (or offset if using relative - positioning) to seek to - - `whence` (optional): where to seek - from. :attr:`os.SEEK_SET` (``0``) for absolute file - positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative - to the current position, :attr:`os.SEEK_END` (``2``) to - seek relative to the file's end. - - .. versionchanged:: 4.1 - The method now returns the new position in the file, to - conform to the behavior of :meth:`io.IOBase.seek`. - """ - if whence == _SEEK_SET: - new_pos = pos - elif whence == _SEEK_CUR: - new_pos = self.__position + pos - elif whence == _SEEK_END: - new_pos = int(self.length) + pos - else: - raise IOError(22, "Invalid value for `whence`") - - if new_pos < 0: - raise IOError(22, "Invalid value for `pos` - must be positive") - - # Optimization, continue using the same buffer and chunk iterator. - if new_pos == self.__position: - return new_pos - - self.__position = new_pos - self.__buffer = EMPTY - if self.__chunk_iter: - self.__chunk_iter.close() - self.__chunk_iter = None - return new_pos - - def seekable(self) -> bool: - return True - - def __iter__(self) -> "GridOut": - """Return an iterator over all of this file's data. - - The iterator will return lines (delimited by ``b'\\n'``) of - :class:`bytes`. This can be useful when serving files - using a webserver that handles such an iterator efficiently. - - .. versionchanged:: 3.8 - The iterator now raises :class:`CorruptGridFile` when encountering - any truncated, missing, or extra chunk in a file. The previous - behavior was to only raise :class:`CorruptGridFile` on a missing - chunk. - - .. versionchanged:: 4.0 - The iterator now iterates over *lines* in the file, instead - of chunks, to conform to the base class :py:class:`io.IOBase`. - Use :meth:`GridOut.readchunk` to read chunk by chunk instead - of line by line. - """ - return self - - def close(self) -> None: - """Make GridOut more generically file-like.""" - if self.__chunk_iter: - self.__chunk_iter.close() - self.__chunk_iter = None - super().close() - - def write(self, value: Any) -> NoReturn: - raise io.UnsupportedOperation("write") - - def writelines(self, lines: Any) -> NoReturn: - raise io.UnsupportedOperation("writelines") - - def writable(self) -> bool: - return False - - def __enter__(self) -> "GridOut": - """Makes it possible to use :class:`GridOut` files - with the context manager protocol. - """ - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: - """Makes it possible to use :class:`GridOut` files - with the context manager protocol. - """ - self.close() - return False - - def fileno(self) -> NoReturn: - raise io.UnsupportedOperation("fileno") - - def flush(self) -> None: - # GridOut is read-only, so flush does nothing. - pass - - def isatty(self) -> bool: - return False - - def truncate(self, size: Optional[int] = None) -> NoReturn: - # See https://docs.python.org/3/library/io.html#io.IOBase.writable - # for why truncate has to raise. - raise io.UnsupportedOperation("truncate") - - # Override IOBase.__del__ otherwise it will lead to __getattr__ on - # __IOBase_closed which calls _ensure_file and potentially performs I/O. - # We cannot do I/O in __del__ since it can lead to a deadlock. - def __del__(self) -> None: - pass - - -class _GridOutChunkIterator(object): - """Iterates over a file's chunks using a single cursor. - - Raises CorruptGridFile when encountering any truncated, missing, or extra - chunk in a file. - """ - - def __init__( - self, - grid_out: GridOut, - chunks: Collection, - session: Optional[ClientSession], - next_chunk: Any, - ) -> None: - self._id = grid_out._id - self._chunk_size = int(grid_out.chunk_size) - self._length = int(grid_out.length) - self._chunks = chunks - self._session = session - self._next_chunk = next_chunk - self._num_chunks = math.ceil(float(self._length) / self._chunk_size) - self._cursor = None - - _cursor: Optional[Cursor] - - def expected_chunk_length(self, chunk_n: int) -> int: - if chunk_n < self._num_chunks - 1: - return self._chunk_size - return self._length - (self._chunk_size * (self._num_chunks - 1)) - - def __iter__(self) -> "_GridOutChunkIterator": - return self - - def _create_cursor(self) -> None: - filter = {"files_id": self._id} - if self._next_chunk > 0: - filter["n"] = {"$gte": self._next_chunk} - _disallow_transactions(self._session) - self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) - - def _next_with_retry(self) -> Mapping[str, Any]: - """Return the next chunk and retry once on CursorNotFound. - - We retry on CursorNotFound to maintain backwards compatibility in - cases where two calls to read occur more than 10 minutes apart (the - server's default cursor timeout). - """ - if self._cursor is None: - self._create_cursor() - assert self._cursor is not None - try: - return self._cursor.next() - except CursorNotFound: - self._cursor.close() - self._create_cursor() - return self._cursor.next() - - def next(self) -> Mapping[str, Any]: - try: - chunk = self._next_with_retry() - except StopIteration: - if self._next_chunk >= self._num_chunks: - raise - raise CorruptGridFile("no chunk #%d" % self._next_chunk) - - if chunk["n"] != self._next_chunk: - self.close() - raise CorruptGridFile( - "Missing chunk: expected chunk #%d but found " - "chunk with n=%d" % (self._next_chunk, chunk["n"]) - ) - - if chunk["n"] >= self._num_chunks: - # According to spec, ignore extra chunks if they are empty. - if len(chunk["data"]): - self.close() - raise CorruptGridFile( - "Extra chunk found: expected %d chunks but found " - "chunk with n=%d" % (self._num_chunks, chunk["n"]) - ) - - expected_length = self.expected_chunk_length(chunk["n"]) - if len(chunk["data"]) != expected_length: - self.close() - raise CorruptGridFile( - "truncated chunk #%d: expected chunk length to be %d but " - "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) - ) - - self._next_chunk += 1 - return chunk - - __next__ = next - - def close(self) -> None: - if self._cursor: - self._cursor.close() - self._cursor = None - - -class GridOutIterator(object): - def __init__(self, grid_out: GridOut, chunks: Collection, session: ClientSession): - self.__chunk_iter = _GridOutChunkIterator(grid_out, chunks, session, 0) - - def __iter__(self) -> "GridOutIterator": - return self - - def next(self) -> bytes: - chunk = self.__chunk_iter.next() - return bytes(chunk["data"]) - - __next__ = next - - -class GridOutCursor(Cursor): - """A cursor / iterator for returning GridOut objects as the result - of an arbitrary query against the GridFS files collection. - """ - - def __init__( - self, - collection: Collection, - filter: Optional[Mapping[str, Any]] = None, - skip: int = 0, - limit: int = 0, - no_cursor_timeout: bool = False, - sort: Optional[Any] = None, - batch_size: int = 0, - session: Optional[ClientSession] = None, - ) -> None: - """Create a new cursor, similar to the normal - :class:`~pymongo.cursor.Cursor`. - - Should not be called directly by application developers - see - the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. - - .. versionadded 2.7 - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - _disallow_transactions(session) - collection = _clear_entity_type_registry(collection) - - # Hold on to the base "fs" collection to create GridOut objects later. - self.__root_collection = collection - - super(GridOutCursor, self).__init__( - collection.files, - filter, - skip=skip, - limit=limit, - no_cursor_timeout=no_cursor_timeout, - sort=sort, - batch_size=batch_size, - session=session, - ) - - def next(self) -> GridOut: - """Get next GridOut object from cursor.""" - _disallow_transactions(self.session) - next_file = super(GridOutCursor, self).next() - return GridOut(self.__root_collection, file_document=next_file, session=self.session) - - __next__ = next - - def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: - raise NotImplementedError("Method does not exist for GridOutCursor") - - def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: - raise NotImplementedError("Method does not exist for GridOutCursor") - - def _clone_base(self, session: ClientSession) -> "GridOutCursor": - """Creates an empty GridOutCursor for information to be copied into.""" - return GridOutCursor(self.__root_collection, session=session) +from gridfs.synchronous.grid_file import * # noqa: F403 diff --git a/gridfs/grid_file_shared.py b/gridfs/grid_file_shared.py new file mode 100644 index 0000000000..79a0ad7f8c --- /dev/null +++ b/gridfs/grid_file_shared.py @@ -0,0 +1,168 @@ +from __future__ import annotations + +import os +import warnings +from typing import Any, Optional + +from pymongo import ASCENDING +from pymongo.common import MAX_MESSAGE_SIZE +from pymongo.errors import InvalidOperation + +_SEEK_SET = os.SEEK_SET +_SEEK_CUR = os.SEEK_CUR +_SEEK_END = os.SEEK_END + +EMPTY = b"" +NEWLN = b"\n" + +"""Default chunk size, in bytes.""" +# Slightly under a power of 2, to work well with server's record allocations. +DEFAULT_CHUNK_SIZE = 255 * 1024 +# The number of chunked bytes to buffer before calling insert_many. +_UPLOAD_BUFFER_SIZE = MAX_MESSAGE_SIZE +# The number of chunk documents to buffer before calling insert_many. +_UPLOAD_BUFFER_CHUNKS = 100000 +# Rough BSON overhead of a chunk document not including the chunk data itself. +# Essentially len(encode({"_id": ObjectId(), "files_id": ObjectId(), "n": 1, "data": ""})) +_CHUNK_OVERHEAD = 60 + +_C_INDEX: dict[str, Any] = {"files_id": ASCENDING, "n": ASCENDING} +_F_INDEX: dict[str, Any] = {"filename": ASCENDING, "uploadDate": ASCENDING} + + +def _a_grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: + """Create a GridIn property.""" + + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if closed_only and not self._closed: + raise AttributeError("can only get %r on a closed file" % field_name) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if self._closed: + raise InvalidOperation( + "AsyncGridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use AsyncGridIn.set() instead" + ) + self._file[field_name] = value + + if read_only: + docstring += "\n\nThis attribute is read-only." + elif closed_only: + docstring = "{}\n\n{}".format( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) + + if not read_only and not closed_only: + return property(getter, setter, doc=docstring) + return property(getter, doc=docstring) + + +def _a_grid_out_property(field_name: str, docstring: str) -> Any: + """Create a GridOut property.""" + + def a_getter(self: Any) -> Any: + if not self._file: + raise InvalidOperation( + "You must call GridOut.open() before accessing " "the %s property" % field_name + ) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + docstring += "\n\nThis attribute is read-only." + return property(a_getter, doc=docstring) + + +def _grid_in_property( + field_name: str, + docstring: str, + read_only: Optional[bool] = False, + closed_only: Optional[bool] = False, +) -> Any: + """Create a GridIn property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridIn property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if closed_only and not self._closed: + raise AttributeError("can only get %r on a closed file" % field_name) + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + def setter(self: Any, value: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {field_name: value}}) + self._file[field_name] = value + + if read_only: + docstring += "\n\nThis attribute is read-only." + elif closed_only: + docstring = "{}\n\n{}".format( + docstring, + "This attribute is read-only and " + "can only be read after :meth:`close` " + "has been called.", + ) + + if not read_only and not closed_only: + return property(getter, setter, doc=docstring) + return property(getter, doc=docstring) + + +def _grid_out_property(field_name: str, docstring: str) -> Any: + """Create a GridOut property.""" + warn_str = "" + if docstring.startswith("DEPRECATED,"): + warn_str = ( + f"GridOut property '{field_name}' is deprecated and will be removed in PyMongo 5.0" + ) + + def getter(self: Any) -> Any: + if warn_str: + warnings.warn(warn_str, stacklevel=2, category=DeprecationWarning) + self.open() + + # Protect against PHP-237 + if field_name == "length": + return self._file.get(field_name, 0) + return self._file.get(field_name, None) + + docstring += "\n\nThis attribute is read-only." + return property(getter, doc=docstring) + + +def _clear_entity_type_registry(entity: Any, **kwargs: Any) -> Any: + """Clear the given database/collection object's type registry.""" + codecopts = entity.codec_options.with_options(type_registry=None) + return entity.with_options(codec_options=codecopts, **kwargs) diff --git a/gridfs/synchronous/__init__.py b/gridfs/synchronous/__init__.py new file mode 100644 index 0000000000..bc2704364b --- /dev/null +++ b/gridfs/synchronous/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GridFS is a specification for storing large objects in Mongo. + +The :mod:`gridfs` package is an implementation of GridFS on top of +:mod:`pymongo`, exposing a file-like interface. + +.. seealso:: The MongoDB documentation on `gridfs `_. +""" +from __future__ import annotations + +from gridfs.errors import NoFile +from gridfs.grid_file_shared import DEFAULT_CHUNK_SIZE +from gridfs.synchronous.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutCursor, +) + +__all__ = [ + "GridFS", + "GridFSBucket", + "NoFile", + "DEFAULT_CHUNK_SIZE", + "GridIn", + "GridOut", + "GridOutCursor", +] diff --git a/gridfs/synchronous/grid_file.py b/gridfs/synchronous/grid_file.py new file mode 100644 index 0000000000..7364aedda3 --- /dev/null +++ b/gridfs/synchronous/grid_file.py @@ -0,0 +1,1993 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for representing files stored in GridFS.""" +from __future__ import annotations + +import datetime +import inspect +import io +import math +from collections import abc +from typing import Any, Iterable, Mapping, NoReturn, Optional, cast + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from gridfs.grid_file_shared import ( + _C_INDEX, + _CHUNK_OVERHEAD, + _F_INDEX, + _SEEK_CUR, + _SEEK_END, + _SEEK_SET, + _UPLOAD_BUFFER_CHUNKS, + _UPLOAD_BUFFER_SIZE, + DEFAULT_CHUNK_SIZE, + EMPTY, + NEWLN, + _clear_entity_type_registry, + _grid_in_property, + _grid_out_property, +) +from pymongo import ASCENDING, DESCENDING, WriteConcern, _csot +from pymongo.common import validate_string +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + CursorNotFound, + DuplicateKeyError, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.client_session import ClientSession +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database + +_IS_SYNC = True + + +def _disallow_transactions(session: Optional[ClientSession]) -> None: + if session and session.in_transaction: + raise InvalidOperation("GridFS does not support multi-document transactions") + + +class GridFS: + """An instance of GridFS on top of a single Database.""" + + def __init__(self, database: Database[Any], collection: str = "fs"): + """Create a new instance of :class:`GridFS`. + + Raises :class:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + :param database: database to use + :param collection: root collection to use + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFS operation in a transaction now always raises an + error. GridFS does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.1 + Indexes are only ensured on the first write to the DB. + + .. versionchanged:: 3.0 + `database` must use an acknowledged + :attr:`~pymongo.database.Database.write_concern` + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(database, Database): + raise TypeError(f"database must be an instance of Database, not {type(database)}") + + database = _clear_entity_type_registry(database) + + if not database.write_concern.acknowledged: + raise ConfigurationError("database must use acknowledged write_concern") + + self._collection = database[collection] + self._files = self._collection.files + self._chunks = self._collection.chunks + + def new_file(self, **kwargs: Any) -> GridIn: + """Create a new file in GridFS. + + Returns a new :class:`~gridfs.grid_file.GridIn` instance to + which data can be written. Any keyword arguments will be + passed through to :meth:`~gridfs.grid_file.GridIn`. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param kwargs: keyword arguments for file creation + """ + return GridIn(self._collection, **kwargs) + + def put(self, data: Any, **kwargs: Any) -> Any: + """Put data in GridFS as a new file. + + Equivalent to doing:: + + with fs.new_file(**kwargs) as f: + f.write(data) + + `data` can be either an instance of :class:`bytes` or a file-like + object providing a :meth:`read` method. If an `encoding` keyword + argument is passed, `data` can also be a :class:`str` instance, which + will be encoded as `encoding` before being written. Any keyword + arguments will be passed through to the created file - see + :meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the + ``"_id"`` of the created file. + + If the ``"_id"`` of the file is manually specified, it must + not already exist in GridFS. Otherwise + :class:`~gridfs.errors.FileExists` is raised. + + :param data: data to be written as a file. + :param kwargs: keyword arguments for file creation + + .. versionchanged:: 3.0 + w=0 writes to GridFS are now prohibited. + """ + with GridIn(self._collection, **kwargs) as grid_file: + grid_file.write(data) + return grid_file._id + + def get(self, file_id: Any, session: Optional[ClientSession] = None) -> GridOut: + """Get a file from GridFS by ``"_id"``. + + Returns an instance of :class:`~gridfs.grid_file.GridOut`, + which provides a file-like interface for reading. + + :param file_id: ``"_id"`` of the file to get + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + def get_version( + self, + filename: Optional[str] = None, + version: Optional[int] = -1, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get a file from GridFS by ``"filename"`` or metadata fields. + + Returns a version of the file in GridFS whose filename matches + `filename` and whose metadata fields match the supplied keyword + arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. + + Version numbering is a convenience atop the GridFS API provided + by MongoDB. If more than one file matches the query (either by + `filename` alone, by metadata fields, or by a combination of + both), then version ``-1`` will be the most recently uploaded + matching file, ``-2`` the second most recently + uploaded, etc. Version ``0`` will be the first version + uploaded, ``1`` the second version, etc. So if three versions + have been uploaded, then version ``0`` is the same as version + ``-3``, version ``1`` is the same as version ``-2``, and + version ``2`` is the same as version ``-1``. + + Raises :class:`~gridfs.errors.NoFile` if no such version of + that file exists. + + :param filename: ``"filename"`` of the file to get, or `None` + :param version: version of the file to get (defaults + to -1, the most recent version uploaded) + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``get_version`` no longer ensures indexes. + """ + query = kwargs + if filename is not None: + query["filename"] = filename + + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if version is None: + version = -1 + if version < 0: + skip = abs(version) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) + try: + doc = next(cursor) + return GridOut(self._collection, file_document=doc, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (version, filename)) from None + + def get_last_version( + self, + filename: Optional[str] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> GridOut: + """Get the most recent version of a file in GridFS by ``"filename"`` + or metadata fields. + + Equivalent to calling :meth:`get_version` with the default + `version` (``-1``). + + :param filename: ``"filename"`` of the file to get, or `None` + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: find files by custom metadata. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return self.get_version(filename=filename, session=session, **kwargs) + + # TODO add optional safe mode for chunk removal? + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Delete a file from GridFS by ``"_id"``. + + Deletes all data belonging to the file with ``"_id"``: + `file_id`. + + .. warning:: Any processes/threads reading from the file while + this method is executing will likely see an invalid/corrupt + file. Care should be taken to avoid concurrent reads to a file + while it is being deleted. + + .. note:: Deletes of non-existent files are considered successful + since the end result is the same: no file with that _id remains. + + :param file_id: ``"_id"`` of the file to delete + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``delete`` no longer ensures indexes. + """ + _disallow_transactions(session) + self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + + def list(self, session: Optional[ClientSession] = None) -> list[str]: + """List the names of all files stored in this instance of + :class:`GridFS`. + + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.1 + ``list`` no longer ensures indexes. + """ + _disallow_transactions(session) + # With an index, distinct includes documents with no filename + # as None. + return [ + name for name in self._files.distinct("filename", session=session) if name is not None + ] + + def find_one( + self, + filter: Optional[Any] = None, + session: Optional[ClientSession] = None, + *args: Any, + **kwargs: Any, + ) -> Optional[GridOut]: + """Get a single file from gridfs. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single :class:`~gridfs.grid_file.GridOut`, + or ``None`` if no matching file is found. For example: + + .. code-block: python + + file = fs.find_one({"filename": "lisa.txt"}) + + :param filter: a dictionary specifying + the query to be performing OR any other type to be used as + the value for a query for ``"_id"`` in the file collection. + :param args: any additional positional arguments are + the same as the arguments to :meth:`find`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + + _disallow_transactions(session) + for f in self.find(filter, *args, session=session, **kwargs): + return f + + return None + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Query GridFS for files. + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. For example:: + + for grid_out in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_out.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: A query document that selects which files + to include in the result set. Can be an empty document to include + all files. + :param skip: the number of files to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~gridfs.grid_file.GridOutCursor` + corresponding to this query. + + .. versionchanged:: 3.0 + Removed the read_preference, tag_sets, and + secondary_acceptable_latency_ms options. + .. versionadded:: 2.7 + .. seealso:: The MongoDB documentation on `find `_. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def exists( + self, + document_or_id: Optional[Any] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> bool: + """Check if a file exists in this instance of :class:`GridFS`. + + The file to check for can be specified by the value of its + ``_id`` key, or by passing in a query document. A query + document can be passed in as dictionary, or by using keyword + arguments. Thus, the following three calls are equivalent: + + >>> fs.exists(file_id) + >>> fs.exists({"_id": file_id}) + >>> fs.exists(_id=file_id) + + As are the following two calls: + + >>> fs.exists({"filename": "mike.txt"}) + >>> fs.exists(filename="mike.txt") + + And the following two: + + >>> fs.exists({"foo": {"$gt": 12}}) + >>> fs.exists(foo={"$gt": 12}) + + Returns ``True`` if a matching file exists, ``False`` + otherwise. Calls to :meth:`exists` will not automatically + create appropriate indexes; application developers should be + sure to create indexes if needed and as appropriate. + + :param document_or_id: query document, or _id of the + document to check for + :param session: a + :class:`~pymongo.client_session.ClientSession` + :param kwargs: keyword arguments are used as a + query document, if they're present. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + if kwargs: + f = self._files.find_one(kwargs, ["_id"], session=session) + else: + f = self._files.find_one(document_or_id, ["_id"], session=session) + + return f is not None + + +class GridFSBucket: + """An instance of GridFS on top of a single Database.""" + + def __init__( + self, + db: Database[Any], + bucket_name: str = "fs", + chunk_size_bytes: int = DEFAULT_CHUNK_SIZE, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + ) -> None: + """Create a new instance of :class:`GridFSBucket`. + + Raises :exc:`TypeError` if `database` is not an instance of + :class:`~pymongo.database.Database`. + + Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern` + is not acknowledged. + + :param database: database to use. + :param bucket_name: The name of the bucket. Defaults to 'fs'. + :param chunk_size_bytes: The chunk size in bytes. Defaults + to 255KB. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use. If ``None`` + (the default) db.write_concern is used. + :param read_preference: The read preference to use. If + ``None`` (the default) db.read_preference is used. + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.11 + Running a GridFSBucket operation in a transaction now always raises + an error. GridFSBucket does not support multi-document transactions. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionadded:: 3.1 + + .. seealso:: The MongoDB documentation on `gridfs `_. + """ + if not isinstance(db, Database): + raise TypeError(f"database must be an instance of Database, not {type(db)}") + + db = _clear_entity_type_registry(db) + + wtc = write_concern if write_concern is not None else db.write_concern + if not wtc.acknowledged: + raise ConfigurationError("write concern must be acknowledged") + + self._bucket_name = bucket_name + self._collection = db[bucket_name] + self._chunks: Collection[Any] = self._collection.chunks.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._files: Collection[Any] = self._collection.files.with_options( + write_concern=write_concern, read_preference=read_preference + ) + + self._chunk_size_bytes = chunk_size_bytes + self._timeout = db.client.options.timeout + + def open_upload_stream( + self, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the filename, and can choose to add any + additional information in the metadata field of the file document or + modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream( + "test_file", chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return GridIn(self._collection, session=session, **opts) + + def open_upload_stream_with_id( + self, + file_id: Any, + filename: str, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> GridIn: + """Opens a Stream that the application can write the contents of the + file to. + + The user must specify the file id and filename, and can choose to add + any additional information in the metadata field of the file document + or modify the chunk size. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + with fs.open_upload_stream_with_id( + ObjectId(), + "test_file", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) as grid_in: + grid_in.write("data I want to store!") + # uploaded on close + + Returns an instance of :class:`~gridfs.grid_file.GridIn`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes in :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + + opts = { + "_id": file_id, + "filename": filename, + "chunk_size": ( + chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes + ), + } + if metadata is not None: + opts["metadata"] = metadata + + return GridIn(self._collection, session=session, **opts) + + @_csot.apply + def upload_from_stream( + self, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> ObjectId: + """Uploads a user file to a GridFS bucket. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Returns the _id of the uploaded file. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream(filename, chunk_size_bytes, metadata, session=session) as gin: + gin.write(source) + + return cast(ObjectId, gin._id) + + @_csot.apply + def upload_from_stream_with_id( + self, + file_id: Any, + filename: str, + source: Any, + chunk_size_bytes: Optional[int] = None, + metadata: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + ) -> None: + """Uploads a user file to a GridFS bucket with a custom file id. + + Reads the contents of the user file from `source` and uploads + it to the file `filename`. Source can be a string or file-like object. + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + file_id = fs.upload_from_stream( + ObjectId(), + "test_file", + "data I want to store!", + chunk_size_bytes=4, + metadata={"contentType": "text/plain"}) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + Raises :exc:`~ValueError` if `filename` is not a string. + + :param file_id: The id to use for this file. The id must not have + already been used for another file. + :param filename: The name of the file to upload. + :param source: The source stream of the content to be uploaded. Must be + a file-like object that implements :meth:`read` or a string. + :param chunk_size_bytes` (options): The number of bytes per chunk of this + file. Defaults to the chunk_size_bytes of :class:`GridFSBucket`. + :param metadata: User data for the 'metadata' field of the + files collection document. If not provided the metadata field will + be omitted from the files collection document. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_upload_stream_with_id( + file_id, filename, chunk_size_bytes, metadata, session=session + ) as gin: + gin.write(source) + + def open_download_stream( + self, file_id: Any, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + the stored file specified by file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # get _id of file to read. + file_id = fs.upload_from_stream("test_file", "data I want to store!") + grid_out = fs.open_download_stream(file_id) + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + gout = GridOut(self._collection, file_id, session=session) + + # Raise NoFile now, instead of on first attribute access. + gout.open() + return gout + + @_csot.apply + def download_to_stream( + self, file_id: Any, destination: Any, session: Optional[ClientSession] = None + ) -> None: + """Downloads the contents of the stored file specified by file_id and + writes the contents to `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to read + file_id = fs.upload_from_stream("test_file", "data I want to store!") + # Get file to write to + file = open('myfile','wb+') + fs.download_to_stream(file_id, file) + file.seek(0) + contents = file.read() + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be downloaded. + :param destination: a file-like object implementing :meth:`write`. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream(file_id, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + @_csot.apply + def delete(self, file_id: Any, session: Optional[ClientSession] = None) -> None: + """Given an file_id, delete this stored file's files collection document + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to delete + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.delete(file_id) + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + res = self._files.delete_one({"_id": file_id}, session=session) + self._chunks.delete_many({"files_id": file_id}, session=session) + if not res.deleted_count: + raise NoFile("no file could be deleted because none matched %s" % file_id) + + @_csot.apply + def delete_by_name(self, filename: str, session: Optional[ClientSession] = None) -> None: + """Given a filename, delete this stored file's files collection document(s) + and associated chunks from a GridFS bucket. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.delete_by_name("test_file") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The name of the file to be deleted. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + files = self._files.find({"filename": filename}, {"_id": 1}, session=session) + file_ids = [file["_id"] for file in files] + res = self._files.delete_many({"_id": {"$in": file_ids}}, session=session) + self._chunks.delete_many({"files_id": {"$in": file_ids}}, session=session) + if not res.deleted_count: + raise NoFile(f"no file could be deleted because none matched filename {filename!r}") + + def find(self, *args: Any, **kwargs: Any) -> GridOutCursor: + """Find and return the files collection documents that match ``filter`` + + Returns a cursor that iterates across files matching + arbitrary queries on the files collection. Can be combined + with other modifiers for additional control. + + For example:: + + for grid_data in fs.find({"filename": "lisa.txt"}, + no_cursor_timeout=True): + data = grid_data.read() + + would iterate through all versions of "lisa.txt" stored in GridFS. + Note that setting no_cursor_timeout to True may be important to + prevent the cursor from timing out during long multi-file processing + work. + + As another example, the call:: + + most_recent_three = fs.find().sort("uploadDate", -1).limit(3) + + would return a cursor to the three most recently uploaded files + in GridFS. + + Follows a similar interface to + :meth:`~pymongo.collection.Collection.find` + in :class:`~pymongo.collection.Collection`. + + If a :class:`~pymongo.client_session.ClientSession` is passed to + :meth:`find`, all returned :class:`~gridfs.grid_file.GridOut` instances + are associated with that session. + + :param filter: Search query. + :param batch_size: The number of documents to return per + batch. + :param limit: The maximum number of documents to return. + :param no_cursor_timeout: The server normally times out idle + cursors after an inactivity period (10 minutes) to prevent excess + memory use. Set this option to True prevent that. + :param skip: The number of documents to skip before + returning. + :param sort: The order by which to sort results. Defaults to + None. + """ + return GridOutCursor(self._collection, *args, **kwargs) + + def open_download_stream_by_name( + self, filename: str, revision: int = -1, session: Optional[ClientSession] = None + ) -> GridOut: + """Opens a Stream from which the application can read the contents of + `filename` and optional `revision`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + grid_out = fs.open_download_stream_by_name("test_file") + contents = grid_out.read() + + Returns an instance of :class:`~gridfs.grid_file.GridOut`. + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` filename is not a string. + + :param filename: The name of the file to read from. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + validate_string("filename", filename) + query = {"filename": filename} + _disallow_transactions(session) + cursor = self._files.find(query, session=session) + if revision < 0: + skip = abs(revision) - 1 + cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) + else: + cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) + try: + grid_file = next(cursor) + return GridOut(self._collection, file_document=grid_file, session=session) + except StopIteration: + raise NoFile("no version %d for filename %r" % (revision, filename)) from None + + @_csot.apply + def download_to_stream_by_name( + self, + filename: str, + destination: Any, + revision: int = -1, + session: Optional[ClientSession] = None, + ) -> None: + """Write the contents of `filename` (with optional `revision`) to + `destination`. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get file to write to + file = open('myfile','wb') + fs.download_to_stream_by_name("test_file", file) + + Raises :exc:`~gridfs.errors.NoFile` if no such version of + that file exists. + + Raises :exc:`~ValueError` if `filename` is not a string. + + :param filename: The name of the file to read from. + :param destination: A file-like object that implements :meth:`write`. + :param revision: Which revision (documents with the same + filename and different uploadDate) of the file to retrieve. + Defaults to -1 (the most recent revision). + :param session: a + :class:`~pymongo.client_session.ClientSession` + + :Note: Revision numbers are defined as follows: + + - 0 = the original stored file + - 1 = the first revision + - 2 = the second revision + - etc... + - -2 = the second most recent revision + - -1 = the most recent revision + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + with self.open_download_stream_by_name(filename, revision, session=session) as gout: + while True: + chunk = gout.readchunk() + if not len(chunk): + break + destination.write(chunk) + + def rename( + self, file_id: Any, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified file_id. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + # Get _id of file to rename + file_id = fs.upload_from_stream("test_file", "data I want to store!") + fs.rename(file_id, "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with file_id exists. + + :param file_id: The _id of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a + :class:`~pymongo.client_session.ClientSession` + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + _disallow_transactions(session) + result = self._files.update_one( + {"_id": file_id}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + "no files could be renamed %r because none " + "matched file_id %i" % (new_filename, file_id) + ) + + def rename_by_name( + self, filename: str, new_filename: str, session: Optional[ClientSession] = None + ) -> None: + """Renames the stored file with the specified filename. + + For example:: + + my_db = MongoClient().test + fs = GridFSBucket(my_db) + fs.upload_from_stream("test_file", "data I want to store!") + fs.rename_by_name("test_file", "new_test_name") + + Raises :exc:`~gridfs.errors.NoFile` if no file with the given filename exists. + + :param filename: The filename of the file to be renamed. + :param new_filename: The new name of the file. + :param session: a :class:`~pymongo.client_session.ClientSession` + + .. versionadded:: 4.12 + """ + _disallow_transactions(session) + result = self._files.update_many( + {"filename": filename}, {"$set": {"filename": new_filename}}, session=session + ) + if not result.matched_count: + raise NoFile( + f"no files could be renamed {new_filename!r} because none matched filename {filename!r}" + ) + + +class GridIn: + """Class to write data to GridFS.""" + + def __init__( + self, + root_collection: Collection[Any], + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: + """Write a file to GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Raises :class:`TypeError` if `root_collection` is not an + instance of :class:`~pymongo.collection.Collection`. + + Any of the file level options specified in the `GridFS Spec + `_ may be passed as + keyword arguments. Any additional keyword arguments will be + set as additional fields on the file document. Valid keyword + arguments include: + + - ``"_id"``: unique ID for this file (default: + :class:`~bson.objectid.ObjectId`) - this ``"_id"`` must + not have already been used for another file + + - ``"filename"``: human name for the file + + - ``"contentType"`` or ``"content_type"``: valid mime-type + for the file + + - ``"chunkSize"`` or ``"chunk_size"``: size of each of the + chunks, in bytes (default: 255 kb) + + - ``"encoding"``: encoding used for this file. Any :class:`str` + that is written to the file will be converted to :class:`bytes`. + + :param root_collection: root collection to write to + :param session: a + :class:`~pymongo.client_session.ClientSession` to use for all + commands + :param kwargs: Any: file level options (see above) + + .. versionchanged:: 4.0 + Removed the `disable_md5` parameter. See + :ref:`removed-gridfs-checksum` for details. + + .. versionchanged:: 3.7 + Added the `disable_md5` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + `root_collection` must use an acknowledged + :attr:`~pymongo.collection.Collection.write_concern` + """ + if not isinstance(root_collection, Collection): + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) + + if not root_collection.write_concern.acknowledged: + raise ConfigurationError("root_collection must use acknowledged write_concern") + _disallow_transactions(session) + + # Handle alternative naming + if "content_type" in kwargs: + kwargs["contentType"] = kwargs.pop("content_type") + if "chunk_size" in kwargs: + kwargs["chunkSize"] = kwargs.pop("chunk_size") + + coll = _clear_entity_type_registry(root_collection, read_preference=ReadPreference.PRIMARY) + + # Defaults + kwargs["_id"] = kwargs.get("_id", ObjectId()) + kwargs["chunkSize"] = kwargs.get("chunkSize", DEFAULT_CHUNK_SIZE) + object.__setattr__(self, "_session", session) + object.__setattr__(self, "_coll", coll) + object.__setattr__(self, "_chunks", coll.chunks) + object.__setattr__(self, "_file", kwargs) + object.__setattr__(self, "_buffer", io.BytesIO()) + object.__setattr__(self, "_position", 0) + object.__setattr__(self, "_chunk_number", 0) + object.__setattr__(self, "_closed", False) + object.__setattr__(self, "_ensured_index", False) + object.__setattr__(self, "_buffered_docs", []) + object.__setattr__(self, "_buffered_docs_size", 0) + + def _create_index(self, collection: Collection[Any], index_key: Any, unique: bool) -> None: + doc = collection.find_one(projection={"_id": 1}, session=self._session) + if doc is None: + try: + index_keys = [ + index_spec["key"] + for index_spec in collection.list_indexes(session=self._session) + ] + except OperationFailure: + index_keys = [] + if index_key not in index_keys: + collection.create_index(index_key.items(), unique=unique, session=self._session) + + def _ensure_indexes(self) -> None: + if not object.__getattribute__(self, "_ensured_index"): + _disallow_transactions(self._session) + self._create_index(self._coll.files, _F_INDEX, False) + self._create_index(self._coll.chunks, _C_INDEX, True) + object.__setattr__(self, "_ensured_index", True) + + def abort(self) -> None: + """Remove all chunks/files that may have been uploaded and close.""" + self._coll.chunks.delete_many({"files_id": self._file["_id"]}, session=self._session) + self._coll.files.delete_one({"_id": self._file["_id"]}, session=self._session) + object.__setattr__(self, "_closed", True) + + @property + def closed(self) -> bool: + """Is this file closed?""" + return self._closed + + _id: Any = _grid_in_property("_id", "The ``'_id'`` value for this file.", read_only=True) + filename: Optional[str] = _grid_in_property("filename", "Name of this file.") + name: Optional[str] = _grid_in_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_in_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_in_property("length", "Length (in bytes) of this file.", closed_only=True) + chunk_size: int = _grid_in_property("chunkSize", "Chunk size for this file.", read_only=True) + upload_date: datetime.datetime = _grid_in_property( + "uploadDate", "Date that this file was uploaded.", closed_only=True + ) + md5: Optional[str] = _grid_in_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + closed_only=True, + ) + + _buffer: io.BytesIO + _closed: bool + _buffered_docs: list[dict[str, Any]] + _buffered_docs_size: int + + def __getattr__(self, name: str) -> Any: + if name == "_coll": + return object.__getattribute__(self, name) + elif name in self._file: + return self._file[name] + raise AttributeError("GridIn object has no attribute '%s'" % name) + + def __setattr__(self, name: str, value: Any) -> None: + # For properties of this instance like _buffer, or descriptors set on + # the class like filename, use regular __setattr__ + if name in self.__dict__ or name in self.__class__.__dict__: + object.__setattr__(self, name, value) + else: + # All other attributes are part of the document in db.fs.files. + # Store them to be sent to server on close() or if closed, send + # them now. + self._file[name] = value + if self._closed: + if _IS_SYNC: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + else: + raise AttributeError( + "GridIn does not support __setattr__ after being closed(). Set the attribute before closing the file or use GridIn.set() instead" + ) + + def set(self, name: str, value: Any) -> None: + self._file[name] = value + if self._closed: + self._coll.files.update_one({"_id": self._file["_id"]}, {"$set": {name: value}}) + + def _flush_data(self, data: Any, force: bool = False) -> None: + """Flush `data` to a chunk.""" + self._ensure_indexes() + assert len(data) <= self.chunk_size + if data: + self._buffered_docs.append( + {"files_id": self._file["_id"], "n": self._chunk_number, "data": data} + ) + self._buffered_docs_size += len(data) + _CHUNK_OVERHEAD + if not self._buffered_docs: + return + # Limit to 100,000 chunks or 32MB (+1 chunk) of data. + if ( + force + or self._buffered_docs_size >= _UPLOAD_BUFFER_SIZE + or len(self._buffered_docs) >= _UPLOAD_BUFFER_CHUNKS + ): + try: + self._chunks.insert_many(self._buffered_docs, session=self._session) + except BulkWriteError as exc: + # For backwards compatibility, raise an insert_one style exception. + write_errors = exc.details["writeErrors"] + for err in write_errors: + if err.get("code") in (11000, 11001, 12582): # Duplicate key errors + self._raise_file_exists(self._file["_id"]) + result = {"writeErrors": write_errors} + wces = exc.details["writeConcernErrors"] + if wces: + result["writeConcernError"] = wces[-1] + _check_write_command_response(result) + raise + self._buffered_docs = [] + self._buffered_docs_size = 0 + self._chunk_number += 1 + self._position += len(data) + + def _flush_buffer(self, force: bool = False) -> None: + """Flush the buffer contents out to a chunk.""" + self._flush_data(self._buffer.getvalue(), force=force) + self._buffer.close() + self._buffer = io.BytesIO() + + def _flush(self) -> Any: + """Flush the file to the database.""" + try: + self._flush_buffer(force=True) + # The GridFS spec says length SHOULD be an Int64. + self._file["length"] = Int64(self._position) + self._file["uploadDate"] = datetime.datetime.now(tz=datetime.timezone.utc) + + return self._coll.files.insert_one(self._file, session=self._session) + except DuplicateKeyError: + self._raise_file_exists(self._id) + + def _raise_file_exists(self, file_id: Any) -> NoReturn: + """Raise a FileExists exception for the given file_id.""" + raise FileExists("file with _id %r already exists" % file_id) + + def close(self) -> None: + """Flush the file and close it. + + A closed file cannot be written any more. Calling + :meth:`close` more than once is allowed. + """ + if not self._closed: + self._flush() + object.__setattr__(self, "_closed", True) + + def read(self, size: int = -1) -> NoReturn: + raise io.UnsupportedOperation("read") + + def readable(self) -> bool: + return False + + def seekable(self) -> bool: + return False + + def write(self, data: Any) -> None: + """Write data to the file. There is no return value. + + `data` can be either a string of bytes or a file-like object + (implementing :meth:`read`). If the file has an + :attr:`encoding` attribute, `data` can also be a + :class:`str` instance, which will be encoded as + :attr:`encoding` before being written. + + Due to buffering, the data may not actually be written to the + database until the :meth:`close` method is called. Raises + :class:`ValueError` if this file is already closed. Raises + :class:`TypeError` if `data` is not an instance of + :class:`bytes`, a file-like object, or an instance of :class:`str`. + Unicode data is only allowed if the file has an :attr:`encoding` + attribute. + + :param data: string of bytes or file-like object to be written + to the file + """ + if self._closed: + raise ValueError("cannot write to a closed file") + + try: + # file-like + read = data.read + except AttributeError: + # string + if not isinstance(data, (str, bytes)): + raise TypeError("can only write strings or file-like objects") from None + if isinstance(data, str): + try: + data = data.encode(self.encoding) + except AttributeError: + raise TypeError( + "must specify an encoding for file in order to write str" + ) from None + read = io.BytesIO(data).read + + if inspect.iscoroutinefunction(read): + self._write_async(read) + else: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + def _write_async(self, read: Any) -> None: + if self._buffer.tell() > 0: + # Make sure to flush only when _buffer is complete + space = self.chunk_size - self._buffer.tell() + if space: + try: + to_write = read(space) + except BaseException: + self.abort() + raise + self._buffer.write(to_write) + if len(to_write) < space: + return # EOF or incomplete + self._flush_buffer() + to_write = read(self.chunk_size) + while to_write and len(to_write) == self.chunk_size: + self._flush_data(to_write) + to_write = read(self.chunk_size) + self._buffer.write(to_write) + + def writelines(self, sequence: Iterable[Any]) -> None: + """Write a sequence of strings to the file. + + Does not add separators. + """ + for line in sequence: + self.write(line) + + def writeable(self) -> bool: + return True + + def __enter__(self) -> GridIn: + """Support for the context manager protocol.""" + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Support for the context manager protocol. + + Close the file if no exceptions occur and allow exceptions to propagate. + """ + if exc_type is None: + # No exceptions happened. + self.close() + else: + # Something happened, at minimum mark as closed. + object.__setattr__(self, "_closed", True) + + # propagate exceptions + return False + + +GRIDOUT_BASE_CLASS = io.IOBase if _IS_SYNC else object # type: Any + + +class GridOut(GRIDOUT_BASE_CLASS): # type: ignore + + """Class to read data out of GridFS.""" + + def __init__( + self, + root_collection: Collection[Any], + file_id: Optional[int] = None, + file_document: Optional[Any] = None, + session: Optional[ClientSession] = None, + ) -> None: + """Read a file from GridFS + + Application developers should generally not need to + instantiate this class directly - instead see the methods + provided by :class:`~gridfs.GridFS`. + + Either `file_id` or `file_document` must be specified, + `file_document` will be given priority if present. Raises + :class:`TypeError` if `root_collection` is not an instance of + :class:`~pymongo.collection.Collection`. + + :param root_collection: root collection to read from + :param file_id: value of ``"_id"`` for the file to read + :param file_document: file document from + `root_collection.files` + :param session: a + :class:`~pymongo.client_session.ClientSession` to use for all + commands + + .. versionchanged:: 3.8 + For better performance and to better follow the GridFS spec, + :class:`GridOut` now uses a single cursor to read all the chunks in + the file. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Creating a GridOut does not immediately retrieve the file metadata + from the server. Metadata is fetched when first needed. + """ + if not isinstance(root_collection, Collection): + raise TypeError( + f"root_collection must be an instance of Collection, not {type(root_collection)}" + ) + _disallow_transactions(session) + + root_collection = _clear_entity_type_registry(root_collection) + + super().__init__() + + self._chunks = root_collection.chunks + self._files = root_collection.files + self._file_id = file_id + self._buffer = EMPTY + # Start position within the current buffered chunk. + self._buffer_pos = 0 + self._chunk_iter = None + # Position within the total file. + self._position = 0 + self._file = file_document + self._session = session + if not _IS_SYNC: + self.closed = False + + _id: Any = _grid_out_property("_id", "The ``'_id'`` value for this file.") + filename: str = _grid_out_property("filename", "Name of this file.") + name: str = _grid_out_property("filename", "Alias for `filename`.") + content_type: Optional[str] = _grid_out_property( + "contentType", "DEPRECATED, will be removed in PyMongo 5.0. Mime-type for this file." + ) + length: int = _grid_out_property("length", "Length (in bytes) of this file.") + chunk_size: int = _grid_out_property("chunkSize", "Chunk size for this file.") + upload_date: datetime.datetime = _grid_out_property( + "uploadDate", "Date that this file was first uploaded." + ) + aliases: Optional[list[str]] = _grid_out_property( + "aliases", "DEPRECATED, will be removed in PyMongo 5.0. List of aliases for this file." + ) + metadata: Optional[Mapping[str, Any]] = _grid_out_property( + "metadata", "Metadata attached to this file." + ) + md5: Optional[str] = _grid_out_property( + "md5", + "DEPRECATED, will be removed in PyMongo 5.0. MD5 of the contents of this file if an md5 sum was created.", + ) + + _file: Any + _chunk_iter: Any + + if not _IS_SYNC: + closed: bool + + def __next__(self) -> bytes: + line = self.readline() + if line: + return line + raise StopIteration() + + def to_list(self) -> list[bytes]: + return [x for x in self] # noqa: C416, RUF100 + + def readline(self, size: int = -1) -> bytes: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + return self._read_size_or_line(size=size, line=True) + + def readlines(self, size: int = -1) -> list[bytes]: + """Read one line or up to `size` bytes from the file. + + :param size: the maximum number of bytes to read + """ + self.open() + lines = [] + remainder = int(self.length) - self._position + bytes_read = 0 + while remainder > 0: + line = self._read_size_or_line(line=True) + bytes_read += len(line) + lines.append(line) + remainder = int(self.length) - self._position + if 0 < size < bytes_read: + break + + return lines + + def open(self) -> None: + if not self._file: + _disallow_transactions(self._session) + self._file = self._files.find_one({"_id": self._file_id}, session=self._session) + if not self._file: + raise NoFile( + f"no file in gridfs collection {self._files!r} with _id {self._file_id!r}" + ) + + def __getattr__(self, name: str) -> Any: + if _IS_SYNC: + self.open() # type: ignore[unused-coroutine] + elif not self._file: + raise InvalidOperation( + "You must call GridOut.open() before accessing the %s property" % name + ) + if name in self._file: + return self._file[name] + raise AttributeError("GridOut object has no attribute '%s'" % name) + + def readable(self) -> bool: + return True + + def readchunk(self) -> bytes: + """Reads a chunk at a time. If the current position is within a + chunk the remainder of the chunk is returned. + """ + self.open() + received = len(self._buffer) - self._buffer_pos + chunk_data = EMPTY + chunk_size = int(self.chunk_size) + + if received > 0: + chunk_data = self._buffer[self._buffer_pos :] + elif self._position < int(self.length): + chunk_number = int((received + self._position) / chunk_size) + if self._chunk_iter is None: + self._chunk_iter = GridOutChunkIterator( + self, self._chunks, self._session, chunk_number + ) + + chunk = self._chunk_iter.next() + chunk_data = chunk["data"][self._position % chunk_size :] + + if not chunk_data: + raise CorruptGridFile("truncated chunk") + + self._position += len(chunk_data) + self._buffer = EMPTY + self._buffer_pos = 0 + return chunk_data + + def _read_size_or_line(self, size: int = -1, line: bool = False) -> bytes: + """Internal read() and readline() helper.""" + self.open() + remainder = int(self.length) - self._position + if size < 0 or size > remainder: + size = remainder + + if size == 0: + return EMPTY + + received = 0 + data = [] + while received < size: + needed = size - received + if self._buffer: + # Optimization: Read the buffer with zero byte copies. + buf = self._buffer + chunk_start = self._buffer_pos + chunk_data = memoryview(buf)[self._buffer_pos :] + self._buffer = EMPTY + self._buffer_pos = 0 + self._position += len(chunk_data) + else: + buf = self.readchunk() + chunk_start = 0 + chunk_data = memoryview(buf) + if line: + pos = buf.find(NEWLN, chunk_start, chunk_start + needed) - chunk_start + if pos >= 0: + # Decrease size to exit the loop. + size = received + pos + 1 + needed = pos + 1 + if len(chunk_data) > needed: + data.append(chunk_data[:needed]) + # Optimization: Save the buffer with zero byte copies. + self._buffer = buf + self._buffer_pos = chunk_start + needed + self._position -= len(self._buffer) - self._buffer_pos + else: + data.append(chunk_data) + received += len(chunk_data) + + # Detect extra chunks after reading the entire file. + if size == remainder and self._chunk_iter: + try: + self._chunk_iter.next() + except StopIteration: + pass + + return b"".join(data) + + def read(self, size: int = -1) -> bytes: + """Read at most `size` bytes from the file (less if there + isn't enough data). + + The bytes are returned as an instance of :class:`bytes` + If `size` is negative or omitted all data is read. + + :param size: the number of bytes to read + + .. versionchanged:: 3.8 + This method now only checks for extra chunks after reading the + entire file. Previously, this method would check for extra chunks + on every call. + """ + return self._read_size_or_line(size=size) + + def tell(self) -> int: + """Return the current position of this file.""" + return self._position + + def seek(self, pos: int, whence: int = _SEEK_SET) -> int: + """Set the current position of this file. + + :param pos: the position (or offset if using relative + positioning) to seek to + :param whence: where to seek + from. :attr:`os.SEEK_SET` (``0``) for absolute file + positioning, :attr:`os.SEEK_CUR` (``1``) to seek relative + to the current position, :attr:`os.SEEK_END` (``2``) to + seek relative to the file's end. + + .. versionchanged:: 4.1 + The method now returns the new position in the file, to + conform to the behavior of :meth:`io.IOBase.seek`. + """ + if whence == _SEEK_SET: + new_pos = pos + elif whence == _SEEK_CUR: + new_pos = self._position + pos + elif whence == _SEEK_END: + new_pos = int(self.length) + pos + else: + raise OSError(22, "Invalid value for `whence`") + + if new_pos < 0: + raise OSError(22, "Invalid value for `pos` - must be positive") + + # Optimization, continue using the same buffer and chunk iterator. + if new_pos == self._position: + return new_pos + + self._position = new_pos + self._buffer = EMPTY + self._buffer_pos = 0 + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None + return new_pos + + def seekable(self) -> bool: + return True + + def __iter__(self) -> GridOut: + """Return an iterator over all of this file's data. + + The iterator will return lines (delimited by ``b'\\n'``) of + :class:`bytes`. This can be useful when serving files + using a webserver that handles such an iterator efficiently. + + .. versionchanged:: 3.8 + The iterator now raises :class:`CorruptGridFile` when encountering + any truncated, missing, or extra chunk in a file. The previous + behavior was to only raise :class:`CorruptGridFile` on a missing + chunk. + + .. versionchanged:: 4.0 + The iterator now iterates over *lines* in the file, instead + of chunks, to conform to the base class :py:class:`io.IOBase`. + Use :meth:`GridOut.readchunk` to read chunk by chunk instead + of line by line. + """ + return self + + def close(self) -> None: + """Make GridOut more generically file-like.""" + if self._chunk_iter: + self._chunk_iter.close() + self._chunk_iter = None + if _IS_SYNC: + super().close() + else: + self.closed = True + + def write(self, value: Any) -> NoReturn: + raise io.UnsupportedOperation("write") + + def writelines(self, lines: Any) -> NoReturn: + raise io.UnsupportedOperation("writelines") + + def writable(self) -> bool: + return False + + def __enter__(self) -> GridOut: + """Makes it possible to use :class:`GridOut` files + with the async context manager protocol. + """ + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: + """Makes it possible to use :class:`GridOut` files + with the async context manager protocol. + """ + self.close() + return False + + def fileno(self) -> NoReturn: + raise io.UnsupportedOperation("fileno") + + def flush(self) -> None: + # GridOut is read-only, so flush does nothing. + pass + + def isatty(self) -> bool: + return False + + def truncate(self, size: Optional[int] = None) -> NoReturn: + # See https://docs.python.org/3/library/io.html#io.IOBase.writable + # for why truncate has to raise. + raise io.UnsupportedOperation("truncate") + + # Override IOBase.__del__ otherwise it will lead to __getattr__ on + # __IOBase_closed which calls _ensure_file and potentially performs I/O. + # We cannot do I/O in __del__ since it can lead to a deadlock. + def __del__(self) -> None: + pass + + +class GridOutChunkIterator: + """Iterates over a file's chunks using a single cursor. + + Raises CorruptGridFile when encountering any truncated, missing, or extra + chunk in a file. + """ + + def __init__( + self, + grid_out: GridOut, + chunks: Collection[Any], + session: Optional[ClientSession], + next_chunk: Any, + ) -> None: + self._id = grid_out._id + self._chunk_size = int(grid_out.chunk_size) + self._length = int(grid_out.length) + self._chunks = chunks + self._session = session + self._next_chunk = next_chunk + self._num_chunks = math.ceil(float(self._length) / self._chunk_size) + self._cursor = None + + _cursor: Optional[Cursor[Any]] + + def expected_chunk_length(self, chunk_n: int) -> int: + if chunk_n < self._num_chunks - 1: + return self._chunk_size + return self._length - (self._chunk_size * (self._num_chunks - 1)) + + def __iter__(self) -> GridOutChunkIterator: + return self + + def _create_cursor(self) -> None: + filter = {"files_id": self._id} + if self._next_chunk > 0: + filter["n"] = {"$gte": self._next_chunk} + _disallow_transactions(self._session) + self._cursor = self._chunks.find(filter, sort=[("n", 1)], session=self._session) + + def _next_with_retry(self) -> Mapping[str, Any]: + """Return the next chunk and retry once on CursorNotFound. + + We retry on CursorNotFound to maintain backwards compatibility in + cases where two calls to read occur more than 10 minutes apart (the + server's default cursor timeout). + """ + if self._cursor is None: + self._create_cursor() + assert self._cursor is not None + try: + return self._cursor.next() + except CursorNotFound: + self._cursor.close() + self._create_cursor() + return self._cursor.next() + + def next(self) -> Mapping[str, Any]: + try: + chunk = self._next_with_retry() + except StopIteration: + if self._next_chunk >= self._num_chunks: + raise + raise CorruptGridFile("no chunk #%d" % self._next_chunk) from None + + if chunk["n"] != self._next_chunk: + self.close() + raise CorruptGridFile( + "Missing chunk: expected chunk #%d but found " + "chunk with n=%d" % (self._next_chunk, chunk["n"]) + ) + + if chunk["n"] >= self._num_chunks: + # According to spec, ignore extra chunks if they are empty. + if len(chunk["data"]): + self.close() + raise CorruptGridFile( + "Extra chunk found: expected %d chunks but found " + "chunk with n=%d" % (self._num_chunks, chunk["n"]) + ) + + expected_length = self.expected_chunk_length(chunk["n"]) + if len(chunk["data"]) != expected_length: + self.close() + raise CorruptGridFile( + "truncated chunk #%d: expected chunk length to be %d but " + "found chunk with length %d" % (chunk["n"], expected_length, len(chunk["data"])) + ) + + self._next_chunk += 1 + return chunk + + __next__ = next + + def close(self) -> None: + if self._cursor: + self._cursor.close() + self._cursor = None + + +class GridOutIterator: + def __init__(self, grid_out: GridOut, chunks: Collection[Any], session: ClientSession): + self._chunk_iter = GridOutChunkIterator(grid_out, chunks, session, 0) + + def __iter__(self) -> GridOutIterator: + return self + + def next(self) -> bytes: + chunk = self._chunk_iter.next() + return bytes(chunk["data"]) + + __next__ = next + + +class GridOutCursor(Cursor): # type: ignore[type-arg] + """A cursor / iterator for returning GridOut objects as the result + of an arbitrary query against the GridFS files collection. + """ + + def __init__( + self, + collection: Collection[Any], + filter: Optional[Mapping[str, Any]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + sort: Optional[Any] = None, + batch_size: int = 0, + session: Optional[ClientSession] = None, + ) -> None: + """Create a new cursor, similar to the normal + :class:`~pymongo.cursor.Cursor`. + + Should not be called directly by application developers - see + the :class:`~gridfs.GridFS` method :meth:`~gridfs.GridFS.find` instead. + + .. versionadded 2.7 + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + _disallow_transactions(session) + collection = _clear_entity_type_registry(collection) + + # Hold on to the base "fs" collection to create GridOut objects later. + self._root_collection = collection + + super().__init__( + collection.files, + filter, + skip=skip, + limit=limit, + no_cursor_timeout=no_cursor_timeout, + sort=sort, + batch_size=batch_size, + session=session, + ) + + def next(self) -> GridOut: + """Get next GridOut object from cursor.""" + _disallow_transactions(self.session) + next_file = super().next() + return GridOut(self._root_collection, file_document=next_file, session=self.session) + + def to_list(self, length: Optional[int] = None) -> list[GridOut]: + """Convert the cursor to a list.""" + if length is None: + return [x for x in self] # noqa: C416,RUF100 + if length < 1: + raise ValueError("to_list() length must be greater than 0") + ret = [] + for _ in range(length): + ret.append(self.next()) + return ret + + __next__ = next + + def add_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def remove_option(self, *args: Any, **kwargs: Any) -> NoReturn: + raise NotImplementedError("Method does not exist for GridOutCursor") + + def _clone_base(self, session: Optional[ClientSession]) -> GridOutCursor: + """Creates an empty GridOutCursor for information to be copied into.""" + return GridOutCursor(self._root_collection, session=session) diff --git a/hatch_build.py b/hatch_build.py new file mode 100644 index 0000000000..40271972dd --- /dev/null +++ b/hatch_build.py @@ -0,0 +1,36 @@ +"""A custom hatch build hook for pymongo.""" +from __future__ import annotations + +import os +import subprocess +import sys +from pathlib import Path + +from hatchling.builders.hooks.plugin.interface import BuildHookInterface + + +class CustomHook(BuildHookInterface): + """The pymongo build hook.""" + + def initialize(self, version, build_data): + """Initialize the hook.""" + if self.target_name == "sdist": + return + here = Path(__file__).parent.resolve() + sys.path.insert(0, str(here)) + + subprocess.run([sys.executable, "_setup.py", "build_ext", "-i"], check=True) + + # Ensure wheel is marked as binary and contains the binary files. + build_data["infer_tag"] = True + build_data["pure_python"] = False + if os.name == "nt": + patt = ".pyd" + else: + patt = ".so" + for pkg in ["bson", "pymongo"]: + dpath = here / pkg + for fpath in dpath.glob(f"*{patt}"): + relpath = os.path.relpath(fpath, here) + build_data["artifacts"].append(relpath) + build_data["force_include"][relpath] = relpath diff --git a/integration_tests/README.md b/integration_tests/README.md new file mode 100644 index 0000000000..fb64a9066f --- /dev/null +++ b/integration_tests/README.md @@ -0,0 +1,42 @@ +# Integration Tests + +A set of tests that verify the usage of PyMongo with downstream packages or frameworks. + +Each test uses [PEP 723 inline metadata](https://packaging.python.org/en/latest/specifications/inline-script-metadata/) and can be run using `pipx` or `uv`. + +The `run.sh` convenience script can be used to run all of the files using `uv`. + +Here is an example header for the script with the inline dependencies: + +```python +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +``` + +Here is an example of using the test helper function to create a configured client for the test: + + +```python +import asyncio +import sys +from pathlib import Path + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +asyncio.run(main()) +``` diff --git a/integration_tests/run.sh b/integration_tests/run.sh new file mode 100755 index 0000000000..051e2b8a75 --- /dev/null +++ b/integration_tests/run.sh @@ -0,0 +1,11 @@ +#!/bin/bash +# Run all of the integration test files using `uv run`. +set -eu + +for file in integration_tests/test_*.py ; do + echo "-----------------" + echo "Running $file..." + uv run $file + echo "Running $file...done." + echo "-----------------" +done diff --git a/integration_tests/test_uv_loop.py b/integration_tests/test_uv_loop.py new file mode 100644 index 0000000000..88a3ad73ab --- /dev/null +++ b/integration_tests/test_uv_loop.py @@ -0,0 +1,27 @@ +# /// script +# dependencies = [ +# "uvloop>=0.18" +# ] +# requires-python = ">=3.10" +# /// +from __future__ import annotations + +import sys +from pathlib import Path + +import uvloop + +# Use pymongo from parent directory. +root = Path(__file__).parent.parent +sys.path.insert(0, str(root)) + +from test.asynchronous import async_simple_test_client # noqa: E402 + + +async def main(): + async with async_simple_test_client() as client: + result = await client.admin.command("ping") + assert result["ok"] + + +uvloop.run(main()) diff --git a/justfile b/justfile new file mode 100644 index 0000000000..17b95e87b7 --- /dev/null +++ b/justfile @@ -0,0 +1,85 @@ +# See https://just.systems/man/en/ for instructions +set shell := ["bash", "-c"] + +# Commonly used command segments. +typing_run := "uv run --group typing --extra aws --extra encryption --extra ocsp --extra snappy --extra test --extra zstd" +docs_run := "uv run --extra docs" +doc_build := "./doc/_build" +mypy_args := "--install-types --non-interactive" + +# Make the default recipe private so it doesn't show up in the list. +[private] +default: + @just --list + +[private] +resync: + @uv sync --quiet + +install: + bash .evergreen/scripts/setup-dev-env.sh + uvx pre-commit install + +[group('docs')] +docs: && resync + {{docs_run}} sphinx-build -W -b html doc {{doc_build}}/html + +[group('docs')] +docs-serve: && resync + {{docs_run}} sphinx-autobuild -W -b html doc --watch ./pymongo --watch ./bson --watch ./gridfs {{doc_build}}/serve + +[group('docs')] +docs-linkcheck: && resync + {{docs_run}} sphinx-build -E -b linkcheck doc {{doc_build}}/linkcheck + +[group('typing')] +typing: && resync + just typing-mypy + just typing-pyright + +[group('typing')] +typing-mypy: && resync + {{typing_run}} mypy {{mypy_args}} bson gridfs tools pymongo + {{typing_run}} mypy {{mypy_args}} --config-file mypy_test.ini test + {{typing_run}} mypy {{mypy_args}} test/test_typing.py test/test_typing_strict.py + +[group('typing')] +typing-pyright: && resync + {{typing_run}} pyright test/test_typing.py test/test_typing_strict.py + {{typing_run}} pyright -p strict_pyrightconfig.json test/test_typing_strict.py + +[group('lint')] +lint *args="": && resync + uvx pre-commit run --all-files {{args}} + +[group('lint')] +lint-manual *args="": && resync + uvx pre-commit run --all-files --hook-stage manual {{args}} + +[group('test')] +test *args="-v --durations=5 --maxfail=10": && resync + uv run --extra test pytest {{args}} + +[group('test')] +run-tests *args: && resync + bash ./.evergreen/run-tests.sh {{args}} + +[group('test')] +setup-tests *args="": + bash .evergreen/scripts/setup-tests.sh {{args}} + +[group('test')] +teardown-tests: + bash .evergreen/scripts/teardown-tests.sh + +[group('test')] +integration-tests: + bash integration_tests/run.sh + +[group('server')] +run-server *args="": + bash .evergreen/scripts/run-server.sh {{args}} + +[group('server')] +stop-server: + bash .evergreen/scripts/stop-server.sh diff --git a/mypy.ini b/mypy.ini deleted file mode 100644 index 9b1348472c..0000000000 --- a/mypy.ini +++ /dev/null @@ -1,42 +0,0 @@ -[mypy] -check_untyped_defs = true -disallow_subclassing_any = true -disallow_incomplete_defs = true -no_implicit_optional = true -pretty = true -show_error_context = true -show_error_codes = true -strict_equality = true -warn_unused_configs = true -warn_unused_ignores = true -warn_redundant_casts = true - -[mypy-gevent.*] -ignore_missing_imports = True - -[mypy-kerberos.*] -ignore_missing_imports = True - -[mypy-mockupdb] -ignore_missing_imports = True - -[mypy-pymongo_auth_aws.*] -ignore_missing_imports = True - -[mypy-pymongocrypt.*] -ignore_missing_imports = True - -[mypy-service_identity.*] -ignore_missing_imports = True - -[mypy-snappy.*] -ignore_missing_imports = True - -[mypy-test.test_mypy] -warn_unused_ignores = false - -[mypy-winkerberos.*] -ignore_missing_imports = True - -[mypy-xmlrunner.*] -ignore_missing_imports = True diff --git a/mypy_test.ini b/mypy_test.ini new file mode 100644 index 0000000000..9fdc664e32 --- /dev/null +++ b/mypy_test.ini @@ -0,0 +1,15 @@ +[mypy] +strict = true +show_error_codes = true +disable_error_code = attr-defined, union-attr, var-annotated, assignment, no-redef, type-arg, import, no-untyped-call, no-untyped-def, index, no-any-return, misc +exclude = (?x)( + ^test/mypy_fails/*.*$ + | ^test/conftest.py$ + ) + +[mypy-pymongo.synchronous.*,gridfs.synchronous.*,test.*] +warn_unused_ignores = false +disable_error_code = unused-coroutine + +[mypy-pymongo.asynchronous.*,test.asynchronous.*] +warn_unused_ignores = false diff --git a/pymongo/__init__.py b/pymongo/__init__.py index 7eaa793648..ac540d94db 100644 --- a/pymongo/__init__.py +++ b/pymongo/__init__.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,8 +13,9 @@ # limitations under the License. """Python driver for MongoDB.""" +from __future__ import annotations -from typing import ContextManager, Optional, Tuple, Union +from typing import ContextManager, Optional __all__ = [ "ASCENDING", @@ -32,6 +33,7 @@ "MIN_SUPPORTED_WIRE_VERSION", "CursorType", "MongoClient", + "AsyncMongoClient", "DeleteMany", "DeleteOne", "IndexModel", @@ -53,7 +55,7 @@ GEO2D = "2d" """Index specifier for a 2-dimensional `geospatial index`_. -.. _geospatial index: http://mongodb.com/docs/manual/core/2d/ +.. _geospatial index: https://mongodb.com/docs/manual/core/2d/ """ GEOSPHERE = "2dsphere" @@ -61,7 +63,7 @@ .. versionadded:: 2.5 -.. _spherical geospatial index: http://mongodb.com/docs/manual/core/2dsphere/ +.. _spherical geospatial index: https://mongodb.com/docs/manual/core/2dsphere/ """ HASHED = "hashed" @@ -69,7 +71,7 @@ .. versionadded:: 2.5 -.. _hashed index: http://mongodb.com/docs/manual/core/index-hashed/ +.. _hashed index: https://mongodb.com/docs/manual/core/index-hashed/ """ TEXT = "text" @@ -81,28 +83,14 @@ .. versionadded:: 2.7.1 -.. _text index: http://mongodb.com/docs/manual/core/index-text/ +.. _text index: https://mongodb.com/docs/manual/core/index-text/ """ -version_tuple: Tuple[Union[int, str], ...] = (4, 2, 0, ".dev2") - - -def get_version_string() -> str: - if isinstance(version_tuple[-1], str): - return ".".join(map(str, version_tuple[:-1])) + version_tuple[-1] - return ".".join(map(str, version_tuple)) - - -__version__: str = get_version_string() -version = __version__ - -"""Current version of PyMongo.""" - from pymongo import _csot -from pymongo.collection import ReturnDocument -from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION +from pymongo._version import __version__, get_version_string, version_tuple +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import MAX_SUPPORTED_WIRE_VERSION, MIN_SUPPORTED_WIRE_VERSION, has_c from pymongo.cursor import CursorType -from pymongo.mongo_client import MongoClient from pymongo.operations import ( DeleteMany, DeleteOne, @@ -113,20 +101,25 @@ def get_version_string() -> str: UpdateOne, ) from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +# Public module compatibility imports +# isort: off +from pymongo import uri_parser # noqa: F401 +from pymongo import change_stream # noqa: F401 +from pymongo import client_session # noqa: F401 +from pymongo import collection # noqa: F401 +from pymongo import command_cursor # noqa: F401 +from pymongo import database # noqa: F401 +# isort: on -def has_c() -> bool: - """Is the C extension installed?""" - try: - from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 - - return True - except ImportError: - return False +version = __version__ +"""Current version of PyMongo.""" -def timeout(seconds: Optional[float]) -> ContextManager: +def timeout(seconds: Optional[float]) -> ContextManager[None]: """**(Provisional)** Apply the given timeout for a block of operations. .. note:: :func:`~pymongo.timeout` is currently provisional. Backwards @@ -155,8 +148,8 @@ def timeout(seconds: Optional[float]) -> ContextManager: else: print(f"failed with non-timeout error: {exc!r}") - When nesting :func:`~pymongo.timeout`, the newly computed deadline is capped to at most - the existing deadline. The deadline can only be shortened, not extended. + When nesting :func:`~pymongo.timeout`, the nested deadline is capped by + the outer deadline. The deadline can only be shortened, not extended. When exiting the block, the previous deadline is restored:: with pymongo.timeout(5): @@ -168,16 +161,16 @@ def timeout(seconds: Optional[float]) -> ContextManager: coll.find_one() # Still uses the original 5 second deadline. coll.find_one() # Uses the original 5 second deadline. - :Parameters: - - `seconds`: A non-negative floating point number expressing seconds, or None. + :param seconds: A non-negative floating point number expressing seconds, or None. + + :raises: :py:class:`ValueError`: When `seconds` is negative. - :Raises: - - :py:class:`ValueError`: When `seconds` is negative. + See `Limit Server Execution Time `_ for more examples. .. versionadded:: 4.2 """ if not isinstance(seconds, (int, float, type(None))): - raise TypeError("timeout must be None, an int, or a float") + raise TypeError(f"timeout must be None, an int, or a float, not {type(seconds)}") if seconds and seconds < 0: raise ValueError("timeout cannot be negative") if seconds is not None: diff --git a/pymongo/_asyncio_lock.py b/pymongo/_asyncio_lock.py new file mode 100644 index 0000000000..5ca09982fa --- /dev/null +++ b/pymongo/_asyncio_lock.py @@ -0,0 +1,309 @@ +# Copyright (c) 2001-2024 Python Software Foundation; All Rights Reserved + +"""Lock and Condition classes vendored from https://github.com/python/cpython/blob/main/Lib/asyncio/locks.py +to port 3.13 fixes to older versions of Python. +Can be removed once we drop Python 3.12 support.""" + +from __future__ import annotations + +import collections +import threading +from asyncio import events, exceptions +from typing import Any, Coroutine, Optional + +_global_lock = threading.Lock() + + +class _LoopBoundMixin: + _loop = None + + def _get_loop(self) -> Any: + loop = events._get_running_loop() + + if self._loop is None: + with _global_lock: + if self._loop is None: + self._loop = loop + if loop is not self._loop: + raise RuntimeError(f"{self!r} is bound to a different event loop") + return loop + + +class _ContextManagerMixin: + async def __aenter__(self) -> None: + await self.acquire() # type: ignore[attr-defined] + # We have no use for the "as ..." clause in the with + # statement for locks. + return + + async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None: + self.release() # type: ignore[attr-defined] + + +class Lock(_ContextManagerMixin, _LoopBoundMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular task when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another task changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one task is blocked in acquire() waiting for + the state to turn to unlocked, only one task proceeds when a + release() call resets the state to unlocked; successive release() + calls will unblock tasks in FIFO order. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self) -> None: + self._waiters: Optional[collections.deque[Any]] = None + self._locked = False + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self._locked else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + def locked(self) -> bool: + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self) -> bool: + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + # Implement fair scheduling, where thread always waits + # its turn. Jumping the queue if all are cancelled is an optimization. + if not self._locked and ( + self._waiters is None or all(w.cancelled() for w in self._waiters) + ): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + # Currently the only exception designed be able to occur here. + + # Ensure the lock invariant: If lock is not claimed (or about + # to be claimed by us) and there is a Task in waiters, + # ensure that the Task at the head will run. + if not self._locked: + self._wake_up_first() + raise + + # assert self._locked is False + self._locked = True + return True + + def release(self) -> None: + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other tasks are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError("Lock is not acquired") + + def _wake_up_first(self) -> None: + """Ensure that the first waiter will wake up.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() means that the waiter is already set to wake up. + if not fut.done(): + fut.set_result(True) + + +class Condition(_ContextManagerMixin, _LoopBoundMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more tasks to wait until they are notified by another + task. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock: Optional[Lock] = None) -> None: + if lock is None: + lock = Lock() + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters: collections.deque[Any] = collections.deque() + + def __repr__(self) -> str: + res = super().__repr__() + extra = "locked" if self.locked() else "unlocked" + if self._waiters: + extra = f"{extra}, waiters:{len(self._waiters)}" + return f"<{res[1:-1]} [{extra}]>" + + async def wait(self) -> bool: + """Wait until notified. + + If the calling task has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another task. Once + awakened, it re-acquires the lock and returns True. + + This method may return spuriously, + which is why the caller should always + re-check the state and be prepared to wait() again. + """ + if not self.locked(): + raise RuntimeError("cannot wait on un-acquired lock") + + fut = self._get_loop().create_future() + self.release() + try: + try: + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must re-acquire lock even if wait is cancelled. + # We only catch CancelledError here, since we don't want any + # other (fatal) errors with the future to cause us to spin. + err = None + while True: + try: + await self.acquire() + break + except exceptions.CancelledError as e: + err = e + + if err is not None: + try: + raise err # Re-raise most recent exception instance. + finally: + err = None # Break reference cycles. + except BaseException: + # Any error raised out of here _may_ have occurred after this Task + # believed to have been successfully notified. + # Make sure to notify another Task instead. This may result + # in a "spurious wakeup", which is allowed as part of the + # Condition Variable protocol. + self._notify(1) + raise + + async def wait_for(self, predicate: Any) -> Coroutine[Any, Any, Any]: + """Wait until a predicate becomes true. + + The predicate should be a callable whose result will be + interpreted as a boolean value. The method will repeatedly + wait() until it evaluates to true. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n: int = 1) -> None: + """By default, wake up one task waiting on this condition, if any. + If the calling task has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up n of the tasks waiting for the condition + variable; if fewer than n are waiting, they are all awoken. + + Note: an awakened task does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError("cannot notify on un-acquired lock") + self._notify(n) + + def _notify(self, n: int) -> None: + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self) -> None: + """Wake up all tasks waiting on this condition. This method acts + like notify(), but wakes up all waiting tasks instead of one. If the + calling task has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) diff --git a/pymongo/_asyncio_task.py b/pymongo/_asyncio_task.py new file mode 100644 index 0000000000..118471963a --- /dev/null +++ b/pymongo/_asyncio_task.py @@ -0,0 +1,49 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A custom asyncio.Task that allows checking if a task has been sent a cancellation request. +Can be removed once we drop Python 3.10 support in favor of asyncio.Task.cancelling.""" + + +from __future__ import annotations + +import asyncio +import sys +from typing import Any, Coroutine, Optional + + +# TODO (https://jira.mongodb.org/browse/PYTHON-4981): Revisit once the underlying cause of the swallowed cancellations is uncovered +class _Task(asyncio.Task[Any]): + def __init__(self, coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> None: + super().__init__(coro, name=name) + self._cancel_requests = 0 + asyncio._register_task(self) + + def cancel(self, msg: Optional[str] = None) -> bool: + self._cancel_requests += 1 + return super().cancel(msg=msg) + + def uncancel(self) -> int: + if self._cancel_requests > 0: + self._cancel_requests -= 1 + return self._cancel_requests + + def cancelling(self) -> int: + return self._cancel_requests + + +def create_task(coro: Coroutine[Any, Any, Any], *, name: Optional[str] = None) -> asyncio.Task[Any]: + if sys.version_info >= (3, 11): + return asyncio.create_task(coro, name=name) + return _Task(coro, name=name) diff --git a/pymongo/_azure_helpers.py b/pymongo/_azure_helpers.py new file mode 100644 index 0000000000..8a7af0b407 --- /dev/null +++ b/pymongo/_azure_helpers.py @@ -0,0 +1,57 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Azure helpers.""" +from __future__ import annotations + +import json +from typing import Any, Optional + + +def _get_azure_response( + resource: str, client_id: Optional[str] = None, timeout: float = 5 +) -> dict[str, Any]: + # Deferred import to save overall import time. + from urllib.request import Request, urlopen + + url = "http://169.254.169.254/metadata/identity/oauth2/token" + url += "?api-version=2018-02-01" + url += f"&resource={resource}" + if client_id: + url += f"&client_id={client_id}" + headers = {"Metadata": "true", "Accept": "application/json"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + try: + data = json.loads(body) + except Exception: + raise ValueError("Azure IMDS response must be in JSON format") from None + + for key in ["access_token", "expires_in"]: + if not data.get(key): + msg = "Azure IMDS response must contain %s, but was %s." + msg = msg % (key, body) + raise ValueError(msg) + + return data diff --git a/pymongo/_client_bulk_shared.py b/pymongo/_client_bulk_shared.py new file mode 100644 index 0000000000..5814025566 --- /dev/null +++ b/pymongo/_client_bulk_shared.py @@ -0,0 +1,79 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across Client Bulk Write API implementations.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, NoReturn + +from pymongo.errors import ClientBulkWriteException, OperationFailure +from pymongo.helpers_shared import _get_wce_doc + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut + + +def _merge_command( + ops: list[tuple[str, Mapping[str, Any]]], + offset: int, + full_result: MutableMapping[str, Any], + result: Mapping[str, Any], +) -> None: + """Merge result of a single bulk write batch into the full result.""" + if result.get("error"): + full_result["error"] = result["error"] + + full_result["nInserted"] += result.get("nInserted", 0) + full_result["nDeleted"] += result.get("nDeleted", 0) + full_result["nMatched"] += result.get("nMatched", 0) + full_result["nModified"] += result.get("nModified", 0) + full_result["nUpserted"] += result.get("nUpserted", 0) + + write_errors = result.get("writeErrors") + if write_errors: + for doc in write_errors: + # Leave the server response intact for APM. + replacement = doc.copy() + original_index = doc["idx"] + offset + replacement["idx"] = original_index + # Add the failed operation to the error document. + replacement["op"] = ops[original_index][1] + full_result["writeErrors"].append(replacement) + + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) + + +def _throw_client_bulk_write_exception( + full_result: _DocumentOut, verbose_results: bool +) -> NoReturn: + """Raise a ClientBulkWriteException from the full result.""" + # retryWrites on MMAPv1 should raise an actionable error. + if full_result["writeErrors"]: + full_result["writeErrors"].sort(key=lambda error: error["idx"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) + if isinstance(full_result["error"], BaseException): + raise ClientBulkWriteException(full_result, verbose_results) from full_result["error"] + raise ClientBulkWriteException(full_result, verbose_results) diff --git a/pymongo/_cmessagemodule.c b/pymongo/_cmessagemodule.c index 2f03ce73e0..a506863737 100644 --- a/pymongo/_cmessagemodule.c +++ b/pymongo/_cmessagemodule.c @@ -5,7 +5,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -28,6 +28,10 @@ struct module_state { PyObject* _cbson; + PyObject* _max_bson_size_str; + PyObject* _max_message_size_str; + PyObject* _max_write_batch_size_str; + PyObject* _max_split_size_str; }; /* See comments about module initialization in _cbsonmodule.c */ @@ -41,7 +45,7 @@ struct module_state { * * Returns a new ref */ static PyObject* _error(char* name) { - PyObject* error; + PyObject* error = NULL; PyObject* errors = PyImport_ImportModule("pymongo.errors"); if (!errors) { return NULL; @@ -64,8 +68,6 @@ static int buffer_write_bytes_ssize_t(buffer_t buffer, const char* data, Py_ssiz static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { /* NOTE just using a random number as the request_id */ - struct module_state *state = GETSTATE(self); - int request_id = rand(); unsigned int flags; char* collection_name = NULL; @@ -73,21 +75,27 @@ static PyObject* _cbson_query_message(PyObject* self, PyObject* args) { int begin, cur_size, max_size = 0; int num_to_skip; int num_to_return; - PyObject* query; - PyObject* field_selector; + PyObject* query = NULL; + PyObject* field_selector = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; PyObject* result = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "Iet#iiOOO&", + if (!(PyArg_ParseTuple(args, "Iet#iiOOO", &flags, "utf-8", &collection_name, &collection_name_length, &num_to_skip, &num_to_return, &query, &field_selector, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -210,16 +218,15 @@ static PyObject* _cbson_get_more_message(PyObject* self, PyObject* args) { * only checked *after* generating the entire message. */ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { - struct module_state *state = GETSTATE(self); - /* NOTE just using a random number as the request_id */ int request_id = rand(); unsigned int flags; - PyObject* command; + PyObject* command = NULL; char* identifier = NULL; Py_ssize_t identifier_length = 0; - PyObject* docs; - PyObject* doc; + PyObject* docs = NULL; + PyObject* doc = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer = NULL; int length_location, message_length; @@ -227,16 +234,21 @@ static PyObject* _cbson_op_msg(PyObject* self, PyObject* args) { int max_doc_size = 0; PyObject* result = NULL; PyObject* iterator = NULL; + struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } /*flags, command, identifier, docs, opts*/ - if (!PyArg_ParseTuple(args, "IOet#OO&", + if (!(PyArg_ParseTuple(args, "IOet#OO", &flags, &command, "utf-8", &identifier, &identifier_length, &docs, - convert_codec_options, &options)) { + &options_obj) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } buffer = pymongo_buffer_new(); @@ -362,21 +374,21 @@ _batched_op_msg( PyObject* iterator = NULL; char* flags = ack ? "\x00\x00\x00\x00" : "\x02\x00\x00\x00"; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { return 0; } - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { return 0; } - max_message_size_obj = PyObject_GetAttrString(ctx, "max_message_size"); + max_message_size_obj = PyObject_GetAttr(ctx, state->_max_message_size_str); max_message_size = PyLong_AsLong(max_message_size_obj); Py_XDECREF(max_message_size_obj); if (max_message_size == -1) { @@ -523,19 +535,23 @@ static PyObject* _cbson_encode_batched_op_msg(PyObject* self, PyObject* args) { unsigned char op; unsigned char ack; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -576,19 +592,23 @@ _cbson_batched_op_msg(PyObject* self, PyObject* args) { unsigned char ack; int request_id; int position; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "bOObO&O", + if (!(PyArg_ParseTuple(args, "bOObOO", &op, &command, &docs, &ack, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -661,7 +681,7 @@ _batched_write_command( PyObject* doc = NULL; PyObject* iterator = NULL; - max_bson_size_obj = PyObject_GetAttrString(ctx, "max_bson_size"); + max_bson_size_obj = PyObject_GetAttr(ctx, state->_max_bson_size_str); max_bson_size = PyLong_AsLong(max_bson_size_obj); Py_XDECREF(max_bson_size_obj); if (max_bson_size == -1) { @@ -673,7 +693,7 @@ _batched_write_command( */ max_cmd_size = max_bson_size + 16382; - max_write_batch_size_obj = PyObject_GetAttrString(ctx, "max_write_batch_size"); + max_write_batch_size_obj = PyObject_GetAttr(ctx, state->_max_write_batch_size_str); max_write_batch_size = PyLong_AsLong(max_write_batch_size_obj); Py_XDECREF(max_write_batch_size_obj); if (max_write_batch_size == -1) { @@ -683,7 +703,7 @@ _batched_write_command( // max_split_size is the size at which to perform a batch split. // Normally this this value is equal to max_bson_size (16MiB). However, // when auto encryption is enabled max_split_size is reduced to 2MiB. - max_split_size_obj = PyObject_GetAttrString(ctx, "max_split_size"); + max_split_size_obj = PyObject_GetAttr(ctx, state->_max_split_size_str); max_split_size = PyLong_AsLong(max_split_size_obj); Py_XDECREF(max_split_size_obj); if (max_split_size == -1) { @@ -761,8 +781,11 @@ _batched_write_command( int cur_doc_begin; int cur_size; int enough_data = 0; - char key[16]; - INT2STRING(key, idx); + char key[BUF_SIZE]; + int res = LL2STR(key, (long long)idx); + if (res == -1) { + return 0; + } if (!buffer_write_bytes(buffer, "\x03", 1) || !buffer_write_bytes(buffer, key, (int)strlen(key) + 1)) { goto fail; @@ -845,19 +868,23 @@ _cbson_encode_batched_write_command(PyObject* self, PyObject* args) { char *ns = NULL; unsigned char op; Py_ssize_t ns_len; - PyObject* command; - PyObject* docs; + PyObject* command = NULL; + PyObject* docs = NULL; PyObject* ctx = NULL; PyObject* to_publish = NULL; PyObject* result = NULL; + PyObject* options_obj = NULL; codec_options_t options; buffer_t buffer; struct module_state *state = GETSTATE(self); + if (!state) { + return NULL; + } - if (!PyArg_ParseTuple(args, "et#bOOO&O", "utf-8", + if (!(PyArg_ParseTuple(args, "et#bOOOO", "utf-8", &ns, &ns_len, &op, &command, &docs, - convert_codec_options, &options, - &ctx)) { + &options_obj, &ctx) && + convert_codec_options(state->_cbson, options_obj, &options))) { return NULL; } if (!(buffer = pymongo_buffer_new())) { @@ -911,35 +938,42 @@ static PyMethodDef _CMessageMethods[] = { {NULL, NULL, 0, NULL} }; -#define INITERROR return NULL +#define INITERROR return -1; static int _cmessage_traverse(PyObject *m, visitproc visit, void *arg) { - Py_VISIT(GETSTATE(m)->_cbson); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_VISIT(state->_cbson); + Py_VISIT(state->_max_bson_size_str); + Py_VISIT(state->_max_message_size_str); + Py_VISIT(state->_max_split_size_str); + Py_VISIT(state->_max_write_batch_size_str); return 0; } static int _cmessage_clear(PyObject *m) { - Py_CLEAR(GETSTATE(m)->_cbson); + struct module_state *state = GETSTATE(m); + if (!state) { + return 0; + } + Py_CLEAR(state->_cbson); + Py_CLEAR(state->_max_bson_size_str); + Py_CLEAR(state->_max_message_size_str); + Py_CLEAR(state->_max_split_size_str); + Py_CLEAR(state->_max_write_batch_size_str); return 0; } -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "_cmessage", - NULL, - sizeof(struct module_state), - _CMessageMethods, - NULL, - _cmessage_traverse, - _cmessage_clear, - NULL -}; - -PyMODINIT_FUNC -PyInit__cmessage(void) +/* Multi-phase extension module initialization code. + * See https://peps.python.org/pep-0489/. +*/ +static int +_cmessage_exec(PyObject *m) { PyObject *_cbson = NULL; PyObject *c_api_object = NULL; - PyObject *m = NULL; + struct module_state* state = NULL; /* Store a reference to the _cbson module since it's needed to call some * of its functions @@ -961,17 +995,20 @@ PyInit__cmessage(void) goto fail; } - /* Returns a new reference. */ - m = PyModule_Create(&moduledef); - if (m == NULL) { + state = GETSTATE(m); + if (state == NULL) { goto fail; } - - GETSTATE(m)->_cbson = _cbson; + state->_cbson = _cbson; + if (!((state->_max_bson_size_str = PyUnicode_FromString("max_bson_size")) && + (state->_max_message_size_str = PyUnicode_FromString("max_message_size")) && + (state->_max_write_batch_size_str = PyUnicode_FromString("max_write_batch_size")) && + (state->_max_split_size_str = PyUnicode_FromString("max_split_size")))) { + goto fail; + } Py_DECREF(c_api_object); - - return m; + return 0; fail: Py_XDECREF(m); @@ -979,3 +1016,34 @@ PyInit__cmessage(void) Py_XDECREF(_cbson); INITERROR; } + + +static PyModuleDef_Slot _cmessage_slots[] = { + {Py_mod_exec, _cmessage_exec}, +#ifdef Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED + {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_SUPPORTED}, +#endif +#if PY_VERSION_HEX >= 0x030D0000 + {Py_mod_gil, Py_MOD_GIL_NOT_USED}, +#endif + {0, NULL}, +}; + + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_cmessage", + NULL, + sizeof(struct module_state), + _CMessageMethods, + _cmessage_slots, + _cmessage_traverse, + _cmessage_clear, + NULL +}; + +PyMODINIT_FUNC +PyInit__cmessage(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/pymongo/_csot.py b/pymongo/_csot.py index 5170c0d8ca..ce72a66486 100644 --- a/pymongo/_csot.py +++ b/pymongo/_csot.py @@ -14,18 +14,30 @@ """Internal helpers for CSOT.""" +from __future__ import annotations + import functools +import inspect import time +from collections import deque +from contextlib import AbstractContextManager from contextvars import ContextVar, Token -from typing import Any, Callable, MutableMapping, Optional, Tuple, TypeVar, cast +from typing import TYPE_CHECKING, Any, Callable, Deque, MutableMapping, Optional, TypeVar, cast -from pymongo.write_concern import WriteConcern +if TYPE_CHECKING: + from pymongo.write_concern import WriteConcern TIMEOUT: ContextVar[Optional[float]] = ContextVar("TIMEOUT", default=None) RTT: ContextVar[float] = ContextVar("RTT", default=0.0) DEADLINE: ContextVar[float] = ContextVar("DEADLINE", default=float("inf")) +def reset_all() -> None: + TIMEOUT.set(None) + RTT.set(0.0) + DEADLINE.set(float("inf")) + + def get_timeout() -> Optional[float]: return TIMEOUT.get(None) @@ -56,7 +68,7 @@ def clamp_remaining(max_timeout: float) -> float: return min(timeout, max_timeout) -class _TimeoutContext(object): +class _TimeoutContext(AbstractContextManager[Any]): """Internal timeout context manager. Use :func:`pymongo.timeout` instead:: @@ -65,22 +77,19 @@ class _TimeoutContext(object): client.test.test.insert_one({}) """ - __slots__ = ("_timeout", "_tokens") - def __init__(self, timeout: Optional[float]): self._timeout = timeout - self._tokens: Optional[Tuple[Token, Token, Token]] = None + self._tokens: Optional[tuple[Token[Optional[float]], Token[float], Token[float]]] = None - def __enter__(self): + def __enter__(self) -> None: timeout_token = TIMEOUT.set(self._timeout) prev_deadline = DEADLINE.get() next_deadline = time.monotonic() + self._timeout if self._timeout else float("inf") deadline_token = DEADLINE.set(min(prev_deadline, next_deadline)) rtt_token = RTT.set(0.0) self._tokens = (timeout_token, deadline_token, rtt_token) - return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: if self._tokens: timeout_token, deadline_token, rtt_token = self._tokens TIMEOUT.reset(timeout_token) @@ -93,21 +102,34 @@ def __exit__(self, exc_type, exc_val, exc_tb): def apply(func: F) -> F: - """Apply the client's timeoutMS to this operation.""" - - @functools.wraps(func) - def csot_wrapper(self, *args, **kwargs): - if get_timeout() is None: - timeout = self._timeout - if timeout is not None: - with _TimeoutContext(timeout): - return func(self, *args, **kwargs) - return func(self, *args, **kwargs) + """Apply the client's timeoutMS to this operation. Can wrap both asynchronous and synchronous methods""" + if inspect.iscoroutinefunction(func): + + @functools.wraps(func) + async def csot_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return await func(self, *args, **kwargs) + return await func(self, *args, **kwargs) + else: + + @functools.wraps(func) + def csot_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any: + if get_timeout() is None: + timeout = self._timeout + if timeout is not None: + with _TimeoutContext(timeout): + return func(self, *args, **kwargs) + return func(self, *args, **kwargs) return cast(F, csot_wrapper) -def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcern]) -> None: +def apply_write_concern( + cmd: MutableMapping[str, Any], write_concern: Optional[WriteConcern] +) -> None: """Apply the given write concern to a command.""" if not write_concern or write_concern.is_server_default: return @@ -116,3 +138,30 @@ def apply_write_concern(cmd: MutableMapping, write_concern: Optional[WriteConcer wc.pop("wtimeout", None) if wc: cmd["writeConcern"] = wc + + +_MAX_RTT_SAMPLES: int = 10 +_MIN_RTT_SAMPLES: int = 2 + + +class MovingMinimum: + """Tracks a minimum RTT within the last 10 RTT samples.""" + + samples: Deque[float] + + def __init__(self) -> None: + self.samples = deque(maxlen=_MAX_RTT_SAMPLES) + + def add_sample(self, sample: float) -> None: + if sample < 0: + raise ValueError(f"duration cannot be negative {sample}") + self.samples.append(sample) + + def get(self) -> float: + """Get the min, or 0.0 if there aren't enough samples yet.""" + if len(self.samples) >= _MIN_RTT_SAMPLES: + return min(self.samples) + return 0.0 + + def reset(self) -> None: + self.samples.clear() diff --git a/pymongo/_gcp_helpers.py b/pymongo/_gcp_helpers.py new file mode 100644 index 0000000000..7979d1e807 --- /dev/null +++ b/pymongo/_gcp_helpers.py @@ -0,0 +1,40 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GCP helpers.""" +from __future__ import annotations + +from typing import Any + + +def _get_gcp_response(resource: str, timeout: float = 5) -> dict[str, Any]: + from urllib.request import Request, urlopen + + url = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity" + url += f"?audience={resource}" + headers = {"Metadata-Flavor": "Google"} + request = Request(url, headers=headers) # noqa: S310 + try: + with urlopen(request, timeout=timeout) as response: # noqa: S310 + status = response.status + body = response.read().decode("utf8") + except Exception as e: + msg = "Failed to acquire IMDS access token: %s" % e + raise ValueError(msg) from None + + if status != 200: + msg = "Failed to acquire IMDS access token." + raise ValueError(msg) + + return dict(access_token=body) diff --git a/pymongo/_version.py b/pymongo/_version.py new file mode 100644 index 0000000000..c6ba82ab13 --- /dev/null +++ b/pymongo/_version.py @@ -0,0 +1,43 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Current version of PyMongo.""" +from __future__ import annotations + +import re +from typing import List, Tuple, Union + +__version__ = "4.16.0.dev0" + + +def get_version_tuple(version: str) -> Tuple[Union[int, str], ...]: + pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" + match = re.match(pattern, version) + if match: + parts: List[Union[int, str]] = [int(match[part]) for part in ["major", "minor", "patch"]] + if match["rest"]: + parts.append(match["rest"]) + elif re.match(r"\d+.\d+", version): + parts = [int(part) for part in version.split(".")] + else: + raise ValueError("Could not parse version") + return tuple(parts) + + +version_tuple = get_version_tuple(__version__) +version = __version__ + + +def get_version_string() -> str: + return __version__ diff --git a/pymongo/aggregation.py b/pymongo/aggregation.py deleted file mode 100644 index 62fe4bd055..0000000000 --- a/pymongo/aggregation.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright 2019-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Perform aggregation operations on a collection or database.""" - -from bson.son import SON -from pymongo import common -from pymongo.collation import validate_collation_or_none -from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref - - -class _AggregationCommand(object): - """The internal abstract base class for aggregation cursors. - - Should not be called directly by application developers. Use - :meth:`pymongo.collection.Collection.aggregate`, or - :meth:`pymongo.database.Database.aggregate` instead. - """ - - def __init__( - self, - target, - cursor_class, - pipeline, - options, - explicit_session, - let=None, - user_fields=None, - result_processor=None, - comment=None, - ): - if "explain" in options: - raise ConfigurationError( - "The explain option is not supported. Use Database.command instead." - ) - - self._target = target - - pipeline = common.validate_list("pipeline", pipeline) - self._pipeline = pipeline - self._performs_write = False - if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): - self._performs_write = True - - common.validate_is_mapping("options", options) - if let is not None: - common.validate_is_mapping("let", let) - options["let"] = let - if comment is not None: - options["comment"] = comment - self._options = options - - # This is the batchSize that will be used for setting the initial - # batchSize for the cursor, as well as the subsequent getMores. - self._batch_size = common.validate_non_negative_integer_or_none( - "batchSize", self._options.pop("batchSize", None) - ) - - # If the cursor option is already specified, avoid overriding it. - self._options.setdefault("cursor", {}) - # If the pipeline performs a write, we ignore the initial batchSize - # since the server doesn't return results in this case. - if self._batch_size is not None and not self._performs_write: - self._options["cursor"]["batchSize"] = self._batch_size - - self._cursor_class = cursor_class - self._explicit_session = explicit_session - self._user_fields = user_fields - self._result_processor = result_processor - - self._collation = validate_collation_or_none(options.pop("collation", None)) - - self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) - self._write_preference = None - - @property - def _aggregation_target(self): - """The argument to pass to the aggregate command.""" - raise NotImplementedError - - @property - def _cursor_namespace(self): - """The namespace in which the aggregate command is run.""" - raise NotImplementedError - - def _cursor_collection(self, cursor_doc): - """The Collection used for the aggregate command cursor.""" - raise NotImplementedError - - @property - def _database(self): - """The database against which the aggregation command is run.""" - raise NotImplementedError - - def get_read_preference(self, session): - if self._write_preference: - return self._write_preference - pref = self._target._read_preference_for(session) - if self._performs_write and pref != ReadPreference.PRIMARY: - self._write_preference = pref = _AggWritePref(pref) - return pref - - def get_cursor(self, session, server, sock_info, read_preference): - # Serialize command. - cmd = SON([("aggregate", self._aggregation_target), ("pipeline", self._pipeline)]) - cmd.update(self._options) - - # Apply this target's read concern if: - # readConcern has not been specified as a kwarg and either - # - server version is >= 4.2 or - # - server version is >= 3.2 and pipeline doesn't use $out - if ("readConcern" not in cmd) and ( - not self._performs_write or (sock_info.max_wire_version >= 8) - ): - read_concern = self._target.read_concern - else: - read_concern = None - - # Apply this target's write concern if: - # writeConcern has not been specified as a kwarg and pipeline doesn't - # perform a write operation - if "writeConcern" not in cmd and self._performs_write: - write_concern = self._target._write_concern_for(session) - else: - write_concern = None - - # Run command. - result = sock_info.command( - self._database.name, - cmd, - read_preference, - self._target.codec_options, - parse_write_concern_error=True, - read_concern=read_concern, - write_concern=write_concern, - collation=self._collation, - session=session, - client=self._database.client, - user_fields=self._user_fields, - ) - - if self._result_processor: - self._result_processor(result, sock_info) - - # Extract cursor from result or mock/fake one if necessary. - if "cursor" in result: - cursor = result["cursor"] - else: - # Unacknowledged $out/$merge write. Fake a cursor. - cursor = { - "id": 0, - "firstBatch": result.get("result", []), - "ns": self._cursor_namespace, - } - - # Create and return cursor instance. - cmd_cursor = self._cursor_class( - self._cursor_collection(cursor), - cursor, - sock_info.address, - batch_size=self._batch_size or 0, - max_await_time_ms=self._max_await_time_ms, - session=session, - explicit_session=self._explicit_session, - comment=self._options.get("comment"), - ) - cmd_cursor._maybe_pin_connection(sock_info) - return cmd_cursor - - -class _CollectionAggregationCommand(_AggregationCommand): - @property - def _aggregation_target(self): - return self._target.name - - @property - def _cursor_namespace(self): - return self._target.full_name - - def _cursor_collection(self, cursor): - """The Collection used for the aggregate command cursor.""" - return self._target - - @property - def _database(self): - return self._target.database - - -class _CollectionRawAggregationCommand(_CollectionAggregationCommand): - def __init__(self, *args, **kwargs): - super(_CollectionRawAggregationCommand, self).__init__(*args, **kwargs) - - # For raw-batches, we set the initial batchSize for the cursor to 0. - if not self._performs_write: - self._options["cursor"]["batchSize"] = 0 - - -class _DatabaseAggregationCommand(_AggregationCommand): - @property - def _aggregation_target(self): - return 1 - - @property - def _cursor_namespace(self): - return "%s.$cmd.aggregate" % (self._target.name,) - - @property - def _database(self): - return self._target - - def _cursor_collection(self, cursor): - """The Collection used for the aggregate command cursor.""" - # Collection level aggregate may not always return the "ns" field - # according to our MockupDB tests. Let's handle that case for db level - # aggregate too by defaulting to the .$cmd.aggregate namespace. - _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) - return self._database[collname] diff --git a/pymongo/asynchronous/__init__.py b/pymongo/asynchronous/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pymongo/asynchronous/aggregation.py b/pymongo/asynchronous/aggregation.py new file mode 100644 index 0000000000..6ca60ad9c3 --- /dev/null +++ b/pymongo/asynchronous/aggregation.py @@ -0,0 +1,254 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo import common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ConfigurationError +from pymongo.read_preferences import ReadPreference, _AggWritePref + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.command_cursor import AsyncCommandCursor + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _DocumentType, _Pipeline + +_IS_SYNC = False + + +class _AggregationCommand: + """The internal abstract base class for aggregation cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.asynchronous.collection.AsyncCollection.aggregate`, or + :meth:`pymongo.asynchronous.database.AsyncDatabase.aggregate` instead. + """ + + def __init__( + self, + target: Union[AsyncDatabase[Any], AsyncCollection[Any]], + cursor_class: type[AsyncCommandCursor[Any]], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], AsyncConnection], None]] = None, + comment: Any = None, + ) -> None: + if "explain" in options: + raise ConfigurationError( + "The explain option is not supported. Use AsyncDatabase.command instead." + ) + + self._target = target + + pipeline = common.validate_list("pipeline", pipeline) + self._pipeline = pipeline + self._performs_write = False + if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): + self._performs_write = True + + common.validate_is_mapping("options", options) + if let is not None: + common.validate_is_mapping("let", let) + options["let"] = let + if comment is not None: + options["comment"] = comment + + self._options = options + + # This is the batchSize that will be used for setting the initial + # batchSize for the cursor, as well as the subsequent getMores. + self._batch_size = common.validate_non_negative_integer_or_none( + "batchSize", self._options.pop("batchSize", None) + ) + + # If the cursor option is already specified, avoid overriding it. + self._options.setdefault("cursor", {}) + # If the pipeline performs a write, we ignore the initial batchSize + # since the server doesn't return results in this case. + if self._batch_size is not None and not self._performs_write: + self._options["cursor"]["batchSize"] = self._batch_size + + self._cursor_class = cursor_class + self._user_fields = user_fields + self._result_processor = result_processor + + self._collation = validate_collation_or_none(options.pop("collation", None)) + + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) + self._write_preference: Optional[_AggWritePref] = None + + @property + def _aggregation_target(self) -> Union[str, int]: + """The argument to pass to the aggregate command.""" + raise NotImplementedError + + @property + def _cursor_namespace(self) -> str: + """The namespace in which the aggregate command is run.""" + raise NotImplementedError + + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + raise NotImplementedError + + @property + def _database(self) -> AsyncDatabase[Any]: + """The database against which the aggregation command is run.""" + raise NotImplementedError + + def get_read_preference( + self, session: Optional[AsyncClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) # type: ignore[assignment] + return pref + + async def get_cursor( + self, + session: Optional[AsyncClientSession], + server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[_DocumentType]: + # Serialize command. + cmd = {"aggregate": self._aggregation_target, "pipeline": self._pipeline} + cmd.update(self._options) + + # Apply this target's read concern if: + # readConcern has not been specified as a kwarg and either + # - server version is >= 4.2 or + # - server version is >= 3.2 and pipeline doesn't use $out + if ("readConcern" not in cmd) and ( + not self._performs_write or (conn.max_wire_version >= 8) + ): + read_concern = self._target.read_concern + else: + read_concern = None + + # Apply this target's write concern if: + # writeConcern has not been specified as a kwarg and pipeline doesn't + # perform a write operation + if "writeConcern" not in cmd and self._performs_write: + write_concern = self._target._write_concern_for(session) + else: + write_concern = None + + # Run command. + result = await conn.command( + self._database.name, + cmd, + read_preference, + self._target.codec_options, + parse_write_concern_error=True, + read_concern=read_concern, + write_concern=write_concern, + collation=self._collation, + session=session, + client=self._database.client, + user_fields=self._user_fields, + ) + + if self._result_processor: + self._result_processor(result, conn) + + # Extract cursor from result or mock/fake one if necessary. + if "cursor" in result: + cursor = result["cursor"] + else: + # Unacknowledged $out/$merge write. Fake a cursor. + cursor = { + "id": 0, + "firstBatch": result.get("result", []), + "ns": self._cursor_namespace, + } + + # Create and return cursor instance. + cmd_cursor = self._cursor_class( + self._cursor_collection(cursor), + cursor, + conn.address, + batch_size=self._batch_size or 0, + max_await_time_ms=self._max_await_time_ms, + session=session, + comment=self._options.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + +class _CollectionAggregationCommand(_AggregationCommand): + _target: AsyncCollection[Any] + + @property + def _aggregation_target(self) -> str: + return self._target.name + + @property + def _cursor_namespace(self) -> str: + return self._target.full_name + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + return self._target + + @property + def _database(self) -> AsyncDatabase[Any]: + return self._target.database + + +class _CollectionRawAggregationCommand(_CollectionAggregationCommand): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + # For raw-batches, we set the initial batchSize for the cursor to 0. + if not self._performs_write: + self._options["cursor"]["batchSize"] = 0 + + +class _DatabaseAggregationCommand(_AggregationCommand): + _target: AsyncDatabase[Any] + + @property + def _aggregation_target(self) -> int: + return 1 + + @property + def _cursor_namespace(self) -> str: + return f"{self._target.name}.$cmd.aggregate" + + @property + def _database(self) -> AsyncDatabase[Any]: + return self._target + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> AsyncCollection[Any]: + """The AsyncCollection used for the aggregate command cursor.""" + # AsyncCollection level aggregate may not always return the "ns" field + # according to our MockupDB tests. Let's handle that case for db level + # aggregate too by defaulting to the .$cmd.aggregate namespace. + _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) + return self._database[collname] diff --git a/pymongo/asynchronous/auth.py b/pymongo/asynchronous/auth.py new file mode 100644 index 0000000000..c1321f1d90 --- /dev/null +++ b/pymongo/asynchronous/auth.py @@ -0,0 +1,455 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication helpers.""" +from __future__ import annotations + +import functools +import hashlib +import hmac +import socket +from base64 import standard_b64decode, standard_b64encode +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Coroutine, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote + +from bson.binary import Binary +from pymongo.asynchronous.auth_aws import _authenticate_aws +from pymongo.asynchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, +) +from pymongo.asynchronous.helpers import _getaddrinfo +from pymongo.auth_shared import ( + MongoCredential, + _authenticate_scram_start, + _parse_scram_response, + _xor, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.hello import Hello + +HAVE_KERBEROS = True +_USE_PRINCIPAL = False +try: + import winkerberos as kerberos # type:ignore[import] + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): + _USE_PRINCIPAL = True +except ImportError: + try: + import kerberos # type:ignore[import] + except ImportError: + HAVE_KERBEROS = False + + +_IS_SYNC = False + + +async def _authenticate_scram( + credentials: MongoCredential, conn: AsyncConnection, mechanism: str +) -> None: + """Authenticate using SCRAM.""" + username = credentials.username + if mechanism == "SCRAM-SHA-256": + digest = "sha256" + digestmod = hashlib.sha256 + data = saslprep(credentials.password).encode("utf-8") + else: + digest = "sha1" + digestmod = hashlib.sha1 + data = _password_digest(username, credentials.password).encode("utf-8") + source = credentials.source + cache = credentials.cache + + # Make local + _hmac = hmac.HMAC + + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = await conn.command(source, cmd) + + assert res is not None + server_first = res["payload"] + parsed = _parse_scram_response(server_first) + iterations = int(parsed[b"i"]) + if iterations < 4096: + raise OperationFailure("Server returned an invalid iteration count.") + salt = parsed[b"s"] + rnonce = parsed[b"r"] + if not rnonce.startswith(nonce): + raise OperationFailure("Server returned an invalid nonce.") + + without_proof = b"c=biws,r=" + rnonce + if cache.data: + client_key, server_key, csalt, citerations = cache.data + else: + client_key, server_key, csalt, citerations = None, None, None, None + + # Salt and / or iterations could change for a number of different + # reasons. Either changing invalidates the cache. + if not client_key or salt != csalt or iterations != citerations: + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) + client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() + server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() + cache.data = (client_key, server_key, salt, iterations) + stored_key = digestmod(client_key).digest() + auth_msg = b",".join((first_bare, server_first, without_proof)) + client_sig = _hmac(stored_key, auth_msg, digestmod).digest() + client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) + client_final = b",".join((without_proof, client_proof)) + + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) + + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } + res = await conn.command(source, cmd) + + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): + raise OperationFailure("Server returned an invalid signature.") + + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } + res = await conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") + if len(password) == 0: + raise ValueError("password can't be empty") + if not isinstance(username, str): + raise TypeError(f"username must be an instance of str, not {type(username)}") + + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" + digest = _password_digest(username, password) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +async def _canonicalize_hostname(hostname: str, option: str | bool) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + + af, socktype, proto, canonname, sockaddr = ( + await _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] + + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + +async def _authenticate_gssapi(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using GSSAPI.""" + if not HAVE_KERBEROS: + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) + + try: + username = credentials.username + password = credentials.password + props = credentials.mechanism_properties + # Starting here and continuing through the while loop below - establish + # the security context. See RFC 4752, Section 3.1, first paragraph. + host = props.service_host or conn.address[0] + host = await _canonicalize_hostname(host, props.canonicalize_host_name) + service = props.service_name + "@" + host + if props.service_realm is not None: + service = service + "@" + props.service_realm + + if password is not None: + if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. + principal = ":".join((quote(username), quote(password))) + result, ctx = kerberos.authGSSClientInit( + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) + else: + if "@" in username: + user, domain = username.split("@", 1) + else: + user, domain = username, None + result, ctx = kerberos.authGSSClientInit( + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) + else: + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + + if result != kerberos.AUTH_GSS_COMPLETE: + raise OperationFailure("Kerberos context failed to initialize.") + + try: + # pykerberos uses a weird mix of exceptions and return values + # to indicate errors. + # 0 == continue, 1 == complete, -1 == error + # Only authGSSClientStep can return 0. + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") + + # Start a SASL conversation with mongod/s + # Note: pykerberos deals with base64 encoded byte strings. + # Since mongo accepts base64 strings as the payload we don't + # have to use bson.binary.Binary. + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } + response = await conn.command("$external", cmd) + + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) + if result == -1: + raise OperationFailure("Unknown kerberos failure in step function.") + + payload = kerberos.authGSSClientResponse(ctx) or "" + + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + response = await conn.command("$external", cmd) + + if result == kerberos.AUTH_GSS_COMPLETE: + break + else: + raise OperationFailure("Kerberos authentication failed to complete.") + + # Once the security context is established actually authenticate. + # See RFC 4752, Section 3.1, last two paragraphs. + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") + + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") + + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + await conn.command("$external", cmd) + + finally: + kerberos.authGSSClientClean(ctx) + + except kerberos.KrbError as exc: + raise OperationFailure(str(exc)) from None + + +async def _authenticate_plain(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" + source = credentials.source + username = credentials.username + password = credentials.password + payload = (f"\x00{username}\x00{password}").encode() + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } + await conn.command(source, cmd) + + +async def _authenticate_x509(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + await conn.command("$external", cmd) + + +async def _authenticate_default(credentials: MongoCredential, conn: AsyncConnection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = (await conn.command(source, cmd, publish_events=False)).get( + "saslSupportedMechs", [] + ) + if "SCRAM-SHA-256" in mechs: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-256") + else: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + else: + return await _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., Coroutine[Any, Any, None]]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, +} + + +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.get_spec_auth_cmd() + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} + + +async def authenticate( + credentials: MongoCredential, conn: AsyncConnection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + await _authenticate_oidc(credentials, conn, reauthenticate) + else: + await auth_func(credentials, conn) diff --git a/pymongo/asynchronous/auth_aws.py b/pymongo/asynchronous/auth_aws.py new file mode 100644 index 0000000000..210d306046 --- /dev/null +++ b/pymongo/asynchronous/auth_aws.py @@ -0,0 +1,100 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Type + +import bson +from bson.binary import Binary +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from bson.typings import _ReadableBuffer + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.auth_shared import MongoCredential + +_IS_SYNC = False + + +async def _authenticate_aws(credentials: MongoCredential, conn: AsyncConnection) -> None: + """Authenticate using MONGODB-AWS.""" + try: + import pymongo_auth_aws # type:ignore[import] + except ImportError as e: + raise ConfigurationError( + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'" + ) from e + # Delayed import. + from pymongo_auth_aws.auth import ( # type:ignore[import] + set_cached_credentials, + set_use_cached_credentials, + ) + + set_use_cached_credentials(True) + + if conn.max_wire_version < 9: + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + class AwsSaslContext(pymongo_auth_aws.AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + + try: + ctx = AwsSaslContext( + pymongo_auth_aws.AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) + client_payload = ctx.step(None) + client_first = {"saslStart": 1, "mechanism": "MONGODB-AWS", "payload": client_payload} + server_first = await conn.command("$external", client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res["payload"]) + cmd = { + "saslContinue": 1, + "conversationId": server_first["conversationId"], + "payload": client_payload, + } + res = await conn.command("$external", cmd) + if res["done"]: + # SASL complete. + break + except pymongo_auth_aws.PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure( + f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})" + ) from None + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/pymongo/asynchronous/auth_oidc.py b/pymongo/asynchronous/auth_oidc.py new file mode 100644 index 0000000000..f8f046bd94 --- /dev/null +++ b/pymongo/asynchronous/auth_oidc.py @@ -0,0 +1,305 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import asyncio +import threading +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union + +import bson +from bson.binary import Binary +from pymongo._csot import remaining +from pymongo.auth_oidc_shared import ( + CALLBACK_VERSION, + HUMAN_CALLBACK_TIMEOUT_SECONDS, + MACHINE_CALLBACK_TIMEOUT_SECONDS, + TIME_BETWEEN_CALLS_SECONDS, + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + _OIDCProperties, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _async_create_lock + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.auth_shared import MongoCredential + +_IS_SYNC = False + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if properties.human_callback is not None: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + token_gen_id: int = field(default=0) + if not _IS_SYNC: + lock: Lock = field(default_factory=_async_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_async_create_lock) # type: ignore[assignment, no-redef] + + last_call_time: float = field(default=0) + + async def reauthenticate(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return await self._authenticate_machine(conn) + return await self._authenticate_human(conn) + + async def authenticate(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return await self._authenticate_machine(conn) + return await self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + async def _authenticate_machine(self, conn: AsyncConnection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return await self._authenticate_machine(conn) + raise + return await self._sasl_start_jwt(conn) + + async def _authenticate_human(self, conn: AsyncConnection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return await self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return await self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return await self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = await self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return await self._sasl_continue_jwt(conn, start_resp) + + async def _get_access_token(self) -> Optional[str]: + properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult + + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback + + prev_token = self.access_token + if prev_token: + return prev_token + + if cb is None and not prev_token: + return None + + if not prev_token and cb is not None: + async with self.lock: # type: ignore[attr-defined] + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + await asyncio.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + if not _IS_SYNC: + resp = await asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token + self.token_gen_id += 1 + + return self.access_token + + async def _run_command( + self, conn: AsyncConnection, cmd: MutableMapping[str, Any] + ) -> Mapping[str, Any]: + try: + return await conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) + raise + + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: AsyncConnection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return + self.access_token = None + + async def _sasl_continue_jwt( + self, conn: AsyncConnection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = await self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return await self._run_command(conn, cmd) + + async def _sasl_start_jwt(self, conn: AsyncConnection) -> Mapping[str, Any]: + access_token = await self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_start_command({"jwt": access_token}) + return await self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } + + +async def _authenticate_oidc( + credentials: MongoCredential, conn: AsyncConnection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return await authenticator.reauthenticate(conn) + else: + return await authenticator.authenticate(conn) diff --git a/pymongo/asynchronous/bulk.py b/pymongo/asynchronous/bulk.py new file mode 100644 index 0000000000..4a54f9eb3f --- /dev/null +++ b/pymongo/asynchronous/bulk.py @@ -0,0 +1,753 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The bulk write operations interface. + +.. versionadded:: 2.7 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.asynchronous.client_session import AsyncClientSession, _validate_session_write_concern +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.bulk_shared import ( + _COMMANDS, + _DELETE_ALL, + _merge_command, + _raise_bulk_write_error, + _Run, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + InvalidOperation, + NotPrimaryError, + OperationFailure, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _convert_exception, + _convert_write_result, + _EncryptedBulkWriteContext, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline + +_IS_SYNC = False + + +class _AsyncBulk: + """The private guts of the bulk write API.""" + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + ordered: bool, + bypass_document_validation: Optional[bool], + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: + """Initialize a _AsyncBulk instance.""" + self.collection = collection.with_options( + codec_options=collection.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.comment: Optional[str] = comment + self.ordered = ordered + self.ops: list[tuple[int, Mapping[str, Any]]] = [] + self.executed = False + self.bypass_doc_val = bypass_document_validation + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + self.uses_sort = False + self.is_retryable = True + self.retrying = False + self.started_retryable_write = False + # Extra state so that we know where to pick up on a retry attempt. + self.current_run = None + self.next_run = None + self.is_encrypted = False + + @property + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: + encrypter = self.collection.database.client._encrypter + if encrypter and not encrypter._bypass_auto_encryption: + self.is_encrypted = True + return _EncryptedBulkWriteContext + else: + self.is_encrypted = False + return _BulkWriteContext + + def add_insert(self, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + self.ops.append((_INSERT, document)) + + def add_update( + self, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append((_UPDATE, cmd)) + + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + self.ops.append((_UPDATE, cmd)) + + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd: dict[str, Any] = {"q": selector, "limit": limit} + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if limit == _DELETE_ALL: + # A bulk_write containing a delete_many is not retryable. + self.is_retryable = False + self.ops.append((_DELETE, cmd)) + + def gen_ordered(self) -> Iterator[Optional[_Run]]: + """Generate batches of operations, batched by type of + operation, in the order **provided**. + """ + run = None + for idx, (op_type, operation) in enumerate(self.ops): + if run is None: + run = _Run(op_type) + elif run.op_type != op_type: + yield run + run = _Run(op_type) + run.add(idx, operation) + yield run + + def gen_unordered(self) -> Iterator[_Run]: + """Generate batches of operations, batched by type of + operation, in arbitrary order. + """ + operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] + for idx, (op_type, operation) in enumerate(self.ops): + operations[op_type].add(idx, operation) + + for run in operations: + if run.ops: + yield run + + @_handle_reauth + async def write_command( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> dict[str, Any]: + """A proxy for SocketInfo.write_command that handles event publishing.""" + cmd[bwc.field] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, docs) + try: + reply = await bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + await client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + await client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + raise + return reply # type: ignore[return-value] + + async def unack_write( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + max_doc_size: int, + docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for AsyncConnection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, docs) + try: + result = await bwc.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + raise + return result # type: ignore[return-value] + + async def _execute_batch_unack( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> list[Mapping[str, Any]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + await bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + write_concern=WriteConcern(w=0), + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + # Though this isn't strictly a "legacy" write, the helper + # handles publishing commands and sending our message + # without receiving a result. Send 0 for max_doc_size + # to disable size checking. Size checking is handled while + # the documents are encoded to BSON. + await self.unack_write(bwc, cmd, request_id, msg, 0, to_send, client) # type: ignore[arg-type] + + return to_send + + async def _execute_batch( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + result = await bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + codec_options=bwc.codec, + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + result = await self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] + + return result, to_send # type: ignore[return-value] + + async def _execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + + if not self.current_run: + self.current_run = next(generator) + self.next_run = None + run = self.current_run + + # AsyncConnection.command validates the session, but we use + # AsyncConnection.write_command + conn.validate_session(client, session) + last_run = False + + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + + cmd = {cmd_name: self.collection.name, "ordered": self.ordered} + if self.comment: + cmd["comment"] = self.comment + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(client, cmd) + ops = islice(run.ops, run.idx_offset, None) + + # Run as many ops as possible in one command. + if write_concern.acknowledged: + result, to_send = await self._execute_batch(bwc, cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = await self._execute_batch_unack(bwc, cmd, ops, client) + + run.idx_offset += len(to_send) + + # We're supposed to continue if errors are + # at the write concern level (e.g. wtimeout) + if self.ordered and full_result["writeErrors"]: + break + # Reset our state + self.current_run = run = self.next_run + + async def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + operation: str, + ) -> dict[str, Any]: + """Execute using write commands.""" + # nModified is only reported for write commands, not legacy ops. + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + op_id = _randint() + + async def retryable_bulk( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable: bool + ) -> None: + await self._execute_command( + generator, + write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + client = self.collection.database.client + _ = await client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, # type: ignore[arg-type] + operation_id=op_id, + ) + + if full_result["writeErrors"] or full_result["writeConcernErrors"]: + _raise_bulk_write_error(full_result) + return full_result + + async def execute_op_msg_no_results( + self, conn: AsyncConnection, generator: Iterator[Any] + ) -> None: + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + op_id = _randint() + + if not self.current_run: + self.current_run = next(generator) + run = self.current_run + + while run: + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + cmd = { + cmd_name: self.collection.name, + "ordered": False, + "writeConcern": {"w": 0}, + } + conn.add_server_api(cmd) + ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible. + to_send = await self._execute_batch_unack(bwc, cmd, ops, client) + run.idx_offset += len(to_send) + self.current_run = run = next(generator, None) + + async def execute_command_no_results( + self, + conn: AsyncConnection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + await self._execute_command( + generator, + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + write_concern, + ) + except OperationFailure: + pass + + async def execute_no_results( + self, + conn: AsyncConnection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if unack and self.uses_hint_update and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return await self.execute_command_no_results(conn, generator, write_concern) + return await self.execute_op_msg_no_results(conn, generator) + + async def execute( + self, + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + write_concern = write_concern or self.collection.write_concern + session = _validate_session_write_concern(session, write_concern) + + if self.ordered: + generator = self.gen_ordered() + else: + generator = self.gen_unordered() + + client = self.collection.database.client + if not write_concern.acknowledged: + async with await client._conn_for_writes(session, operation) as connection: + await self.execute_no_results(connection, generator, write_concern) + return None + else: + return await self.execute_command(generator, write_concern, session, operation) diff --git a/pymongo/asynchronous/change_stream.py b/pymongo/asynchronous/change_stream.py new file mode 100644 index 0000000000..b2b78b0660 --- /dev/null +++ b/pymongo/asynchronous/change_stream.py @@ -0,0 +1,496 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union + +from bson import CodecOptions, _bson_to_dict +from bson.raw_bson import RawBSONDocument +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.asynchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.operations import _Op +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline + +_IS_SYNC = False + +# The change streams spec considers the following server errors from the +# getMore command non-resumable. All other getMore errors are resumable. +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class AsyncChangeStream(Generic[_DocumentType]): + """The internal abstract base class for change stream cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.asynchronous.collection.AsyncCollection.watch`, + :meth:`pymongo.asynchronous.database.AsyncDatabase.watch`, or + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch` instead. + + .. versionadded:: 3.6 + .. seealso:: The MongoDB documentation on `changeStreams `_. + """ + + def __init__( + self, + target: Union[ + AsyncMongoClient[_DocumentType], + AsyncDatabase[_DocumentType], + AsyncCollection[_DocumentType], + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[AsyncClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: + if pipeline is None: + pipeline = [] + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) + validate_collation_or_none(collation) + common.validate_non_negative_integer_or_none("batchSize", batch_size) + + self._decode_custom = False + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options + if target.codec_options.type_registry._decoder_map: + self._decode_custom = True + # Keep the type registry so that we support encoding custom types + # in the pipeline. + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) + else: + self._target = target + + self._pipeline = copy.deepcopy(pipeline) + self._full_document = full_document + self._full_document_before_change = full_document_before_change + self._uses_start_after = start_after is not None + self._uses_resume_after = resume_after is not None + self._resume_token = copy.deepcopy(start_after or resume_after) + self._max_await_time_ms = max_await_time_ms + self._batch_size = batch_size + self._collation = collation + self._start_at_operation_time = start_at_operation_time + self._session = session + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events + + async def _initialize_cursor(self) -> None: + # Initialize cursor. + self._cursor = await self._create_cursor() + + @property + def _aggregation_command_class(self) -> Type[_AggregationCommand]: + """The aggregation command class to be used.""" + raise NotImplementedError + + @property + def _client(self) -> AsyncMongoClient: # type: ignore[type-arg] + """The client against which the aggregation commands for + this AsyncChangeStream will be run. + """ + raise NotImplementedError + + def _change_stream_options(self) -> dict[str, Any]: + """Return the options dict for the $changeStream pipeline stage.""" + options: dict[str, Any] = {} + if self._full_document is not None: + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + + resume_token = self.resume_token + if resume_token is not None: + if self._uses_start_after: + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token + + elif self._start_at_operation_time is not None: + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + + return options + + def _command_options(self) -> dict[str, Any]: + """Return the options dict for the aggregation command.""" + options = {} + if self._max_await_time_ms is not None: + options["maxAwaitTimeMS"] = self._max_await_time_ms + if self._batch_size is not None: + options["batchSize"] = self._batch_size + return options + + def _aggregation_pipeline(self) -> list[dict[str, Any]]: + """Return the full aggregation pipeline for this AsyncChangeStream.""" + options = self._change_stream_options() + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] + full_pipeline.extend(self._pipeline) + return full_pipeline + + def _process_result(self, result: Mapping[str, Any], conn: AsyncConnection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. + + This is implemented as a callback because we need access to the wire + version in order to determine whether to cache this value. + """ + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + async def _run_aggregation_cmd( + self, session: Optional[AsyncClientSession] + ) -> AsyncCommandCursor: # type: ignore[type-arg] + """Run the full aggregation pipeline for this AsyncChangeStream and return + the corresponding AsyncCommandCursor. + """ + cmd = self._aggregation_command_class( + self._target, + AsyncCommandCursor, + self._aggregation_pipeline(), + self._command_options(), + result_processor=self._process_result, + comment=self._comment, + ) + return await self._client._retryable_read( + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, + ) + + async def _create_cursor(self) -> AsyncCommandCursor: # type: ignore[type-arg] + async with self._client._tmp_session(self._session) as s: + return await self._run_aggregation_cmd(session=s) + + async def _resume(self) -> None: + """Reestablish this change stream after a resumable error.""" + try: + await self._cursor.close() + except PyMongoError: + pass + self._cursor = await self._create_cursor() + + async def close(self) -> None: + """Close this AsyncChangeStream.""" + self._closed = True + await self._cursor.close() + + def __aiter__(self) -> AsyncChangeStream[_DocumentType]: + return self + + @property + def resume_token(self) -> Optional[Mapping[str, Any]]: + """The cached resume token that will be used to resume after the most + recently returned change. + + .. versionadded:: 3.9 + """ + return copy.deepcopy(self._resume_token) + + @_csot.apply + async def next(self) -> _DocumentType: + """Advance the cursor. + + This method blocks until the next change document is returned or an + unrecoverable error is raised. This method is used when iterating over + all changes in the cursor. For example:: + + try: + resume_token = None + pipeline = [{'$match': {'operationType': 'insert'}}] + async with await db.collection.watch(pipeline) as stream: + async for insert_change in stream: + print(insert_change) + resume_token = stream.resume_token + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + if resume_token is None: + # There is no usable resume token because there was a + # failure during AsyncChangeStream initialization. + logging.error('...') + else: + # Use the interrupted AsyncChangeStream's resume token to create + # a new AsyncChangeStream. The new stream will continue from the + # last seen insert change without missing any events. + async with await db.collection.watch( + pipeline, resume_after=resume_token) as stream: + async for insert_change in stream: + print(insert_change) + + Raises :exc:`StopIteration` if this AsyncChangeStream is closed. + """ + while self.alive: + doc = await self.try_next() + if doc is not None: + return doc + + raise StopAsyncIteration + + __anext__ = next + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration` and :meth:`try_next` can return ``None``. + + .. versionadded:: 3.8 + """ + return not self._closed + + @_csot.apply + async def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next change document without waiting + indefinitely for the next change. For example:: + + async with await db.collection.watch() as stream: + while stream.alive: + change = await stream.try_next() + # Note that the AsyncChangeStream's resume token may be updated + # even when no changes are returned. + print("Current resume token: %r" % (stream.resume_token,)) + if change is not None: + print("Change document: %r" % (change,)) + continue + # We end up here when there are no recent changes. + # Sleep for a while before trying again to avoid flooding + # the server with getMore requests when no changes are + # available. + asyncio.sleep(10) + + If no change document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there have been no changes) then ``None`` is returned. + + :return: The next change document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 3.8 + """ + if not self._closed and not self._cursor.alive: + await self._resume() + + # Attempt to get the next change with at most one getMore and at most + # one resume attempt. + try: + try: + change = await self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + await self._resume() + change = await self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + await self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + await self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + + # If no changes are available. + if change is None: + # We have either iterated over all documents in the cursor, + # OR the most-recently returned batch is empty. In either case, + # update the cached resume token with the postBatchResumeToken if + # one was returned. We also clear the startAtOperationTime. + if self._cursor._post_batch_resume_token is not None: + self._resume_token = self._cursor._post_batch_resume_token + self._start_at_operation_time = None + return change + + # Else, changes are available. + try: + resume_token = change["_id"] + except KeyError: + await self.close() + raise InvalidOperation( + "Cannot provide resume functionality when the resume token is missing." + ) from None + + # If this is the last change document from the current batch, cache the + # postBatchResumeToken. + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: + resume_token = self._cursor._post_batch_resume_token + + # Hereafter, don't use startAfter; instead use resumeAfter. + self._uses_start_after = False + self._uses_resume_after = True + + # Cache the resume token and clear startAtOperationTime. + self._resume_token = resume_token + self._start_at_operation_time = None + + if self._decode_custom: + return _bson_to_dict(change.raw, self._orig_codec_options) + return change + + async def __aenter__(self) -> AsyncChangeStream[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + +class AsyncCollectionChangeStream(AsyncChangeStream[_DocumentType]): + """A change stream that watches changes on a single collection. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.collection.AsyncCollection.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: AsyncCollection[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: + return _CollectionAggregationCommand + + @property + def _client(self) -> AsyncMongoClient[_DocumentType]: + return self._target.database.client + + +class AsyncDatabaseChangeStream(AsyncChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in a database. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.database.AsyncDatabase.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: AsyncDatabase[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: + return _DatabaseAggregationCommand + + @property + def _client(self) -> AsyncMongoClient[_DocumentType]: + return self._target.client + + +class AsyncClusterChangeStream(AsyncDatabaseChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in the cluster. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch` instead. + + .. versionadded:: 3.7 + """ + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() + options["allChangesForCluster"] = True + return options diff --git a/pymongo/asynchronous/client_bulk.py b/pymongo/asynchronous/client_bulk.py new file mode 100644 index 0000000000..151942c8a8 --- /dev/null +++ b/pymongo/asynchronous/client_bulk.py @@ -0,0 +1,756 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.asynchronous.client_session import AsyncClientSession, _validate_session_write_concern +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class _AsyncClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: AsyncMongoClient[Any], + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _AsyncClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + self.executed = False + self.uses_collation = False + self.uses_array_filters = False + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": -1, "document": document} + self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": -1, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": -1, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + cmd["upsert"] = upsert + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": -1, "filter": selector, "multi": multi} + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + @_handle_reauth + async def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> dict[str, Any]: + """A proxy for AsyncConnection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = await bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + await self.client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + await self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + await self.client._process_response({}, bwc.session) # type: ignore[arg-type] + return reply # type: ignore[return-value] + + async def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: AsyncMongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for AsyncConnection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = await bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + return reply + + async def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + await self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + async def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + result = await self.write_command( + bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client + ) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + async def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: AsyncConnection, + session: Optional[AsyncClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + if session: + session._leave_alive = True + coll = AsyncCollection( + database=AsyncDatabase(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = AsyncCommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + comment=self.comment, + ) + await cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + async for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + await cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + async def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # AsyncConnection.command validates the session, but we use + # AsyncConnection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = await self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + hasattr(error, "details") + and isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + await self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + async def execute_command( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + async def retryable_bulk( + session: Optional[AsyncClientSession], + conn: AsyncConnection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + await self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + async def execute_command_unack( + self, + conn: AsyncConnection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = True + cmd["ordered"] = False + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = await self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + async def execute_no_results( + self, + conn: AsyncConnection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + return await self.execute_command_unack(conn) + + async def execute( + self, + session: Optional[AsyncClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + async with await self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + await self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = await self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/asynchronous/client_session.py b/pymongo/asynchronous/client_session.py new file mode 100644 index 0000000000..8674e98447 --- /dev/null +++ b/pymongo/asynchronous/client_session.py @@ -0,0 +1,1187 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logical sessions for ordering sequential operations. + +.. versionadded:: 3.6 + +Causally Consistent Reads +========================= + +.. code-block:: python + + async with client.start_session(causal_consistency=True) as session: + collection = client.db.collection + await collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) + + # A secondary read waits for replication of the write. + await secondary_c.find_one({"_id": 1}, session=session) + +If `causal_consistency` is True (the default), read operations that use +the session are causally after previous read and write operations. Using a +causally consistent session, an application can read its own writes and is +guaranteed monotonic reads, even when reading from replica set secondaries. + +.. seealso:: The MongoDB documentation on `causal-consistency `_. + +.. _async-transactions-ref: + +Transactions +============ + +.. versionadded:: 3.7 + +MongoDB 4.0 adds support for transactions on replica set primaries. A +transaction is associated with a :class:`AsyncClientSession`. To start a transaction +on a session, use :meth:`AsyncClientSession.start_transaction` in a with-statement. +Then, execute an operation within the transaction by passing the session to the +operation: + +.. code-block:: python + + orders = client.db.orders + inventory = client.db.inventory + async with client.start_session() as session: + async with await session.start_transaction(): + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) + +Upon normal completion of ``async with await session.start_transaction()`` block, the +transaction automatically calls :meth:`AsyncClientSession.commit_transaction`. +If the block exits with an exception, the transaction automatically calls +:meth:`AsyncClientSession.abort_transaction`. + +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an +insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. + +A session may only have a single active transaction at a time, multiple +transactions on the same session can be executed in sequence. + +Sharded Transactions +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.9 + +PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB +>=4.2. Sharded transactions have the same API as replica set transactions. +When running a transaction against a sharded cluster, the session is +pinned to the mongos server selected for the first operation in the +transaction. All subsequent operations that are part of the same transaction +are routed to the same mongos server. When the transaction is completed, by +running either commitTransaction or abortTransaction, the session is unpinned. + +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _async-snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + async with client.start_session(snapshot=True) as session: + order = await orders.find_one({"sku": "abc123"}, session=session) + inventory = await inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.count_documents` +- :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct` (on unsharded collections) + +Classes +======= +""" + +from __future__ import annotations + +import collections +import time +import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Coroutine, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) + +from bson.binary import Binary +from bson.int64 import Int64 +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.asynchronous.cursor import _ConnectionManager +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = False + + +class SessionOptions: + """Options for a new :class:`AsyncClientSession`. + + :param causal_consistency: If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. + :param default_transaction_options: The default + TransactionOptions to use for transactions started on this session. + :param snapshot: If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. + """ + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True + self._causal_consistency = causal_consistency + if default_transaction_options is not None: + if not isinstance(default_transaction_options, TransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of " + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) + self._default_transaction_options = default_transaction_options + self._snapshot = snapshot + + @property + def causal_consistency(self) -> bool: + """Whether causal consistency is configured.""" + return self._causal_consistency + + @property + def default_transaction_options(self) -> Optional[TransactionOptions]: + """The default TransactionOptions to use for transactions started on + this session. + + .. versionadded:: 3.7 + """ + return self._default_transaction_options + + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + + +class TransactionOptions: + """Options for :meth:`AsyncClientSession.start_transaction`. + + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. + If ``None`` (the default) the :attr:`read_preference` of + the :class:`AsyncMongoClient` is used. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` of + the :class:`AsyncMongoClient` is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. Transactions which read must use + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param max_commit_time_ms: The maximum amount of time to allow a + single commitTransaction command to run. This option is an alias for + maxTimeMS option on the commitTransaction command. If ``None`` (the + default) maxTimeMS is not used. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: + self._read_concern = read_concern + self._write_concern = write_concern + self._read_preference = read_preference + self._max_commit_time_ms = max_commit_time_ms + if read_concern is not None: + if not isinstance(read_concern, ReadConcern): + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) + if write_concern is not None: + if not isinstance(write_concern, WriteConcern): + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) + if not write_concern.acknowledged: + raise ConfigurationError( + "transactions do not support unacknowledged write concern" + f": {write_concern!r}" + ) + if read_preference is not None: + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) + + @property + def read_concern(self) -> Optional[ReadConcern]: + """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" + return self._read_concern + + @property + def write_concern(self) -> Optional[WriteConcern]: + """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" + return self._write_concern + + @property + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" + return self._read_preference + + @property + def max_commit_time_ms(self) -> Optional[int]: + """The maxTimeMS to use when running a commitTransaction command. + + .. versionadded:: 3.9 + """ + return self._max_commit_time_ms + + +def _validate_session_write_concern( + session: Optional[AsyncClientSession], write_concern: Optional[WriteConcern] +) -> Optional[AsyncClientSession]: + """Validate that an explicit session is not used with an unack'ed write. + + Returns the session to use for the next operation. + """ + if session: + if write_concern is not None and not write_concern.acknowledged: + # For unacknowledged writes without an explicit session, + # drivers SHOULD NOT use an implicit session. If a driver + # creates an implicit session for unacknowledged writes + # without an explicit session, the driver MUST NOT send the + # session ID. + if session._implicit: + return None + else: + raise ConfigurationError( + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) + return session + + +class _TransactionContext: + """Internal transaction context manager for start_transaction.""" + + def __init__(self, session: AsyncClientSession): + self.__session = session + + async def __aenter__(self) -> _TransactionContext: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self.__session.in_transaction: + if exc_val is None: + await self.__session.commit_transaction() + else: + await self.__session.abort_transaction() + + +class _TxnState: + NONE = 1 + STARTING = 2 + IN_PROGRESS = 3 + COMMITTED = 4 + COMMITTED_EMPTY = 5 + ABORTED = 6 + + +class _Transaction: + """Internal class to hold transaction information in a AsyncClientSession.""" + + def __init__(self, opts: Optional[TransactionOptions], client: AsyncMongoClient[Any]): + self.opts = opts + self.state = _TxnState.NONE + self.sharded = False + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None + self.recovery_token = None + self.attempt = 0 + self.client = client + + def active(self) -> bool: + return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[AsyncConnection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: AsyncConnection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + async def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + await self.conn_mgr.close() + self.conn_mgr = None + + async def reset(self) -> None: + await self.unpin() + self.state = _TxnState.NONE + self.sharded = False + self.recovery_token = None + self.attempt = 0 + + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None + + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: + """Re-raise an exception with the UnknownTransactionCommitResult label.""" + exc._add_error_label("UnknownTransactionCommitResult") + raise + + +def _max_time_expired_error(exc: PyMongoError) -> bool: + """Return true if exc is a MaxTimeMSExpired error.""" + return isinstance(exc, OperationFailure) and exc.code == 50 + + +# From the transactions spec, all the retryable writes errors plus +# WriteConcernTimeout. +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] + [ + 64, # WriteConcernTimeout + 50, # MaxTimeMSExpired + ] +) + +# From the Convenient API for Transactions spec, with_transaction must +# halt retries after 120 seconds. +# This limit is non-configurable and was chosen to be twice the 60 second +# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. +_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 + + +def _within_time_limit(start_time: float) -> bool: + """Are we within the with_transaction retry limit?""" + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.asynchronous.mongo_client import AsyncMongoClient + + +class AsyncClientSession: + """A session for ordering sequential operations. + + :class:`AsyncClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`AsyncClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`AsyncClientSession`, call + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.start_session`. + """ + + def __init__( + self, + client: AsyncMongoClient[Any], + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: + # An AsyncMongoClient, a _ServerSession, a SessionOptions, and a set. + self._client: AsyncMongoClient[Any] = client + self._server_session = server_session + self._options = options + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None + # Is this an implicitly created session? + self._implicit = implicit + self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False + + async def end_session(self) -> None: + """Finish this session. If a transaction has started, abort it. + + It is an error to use the session after the session has ended. + """ + await self._end_session(lock=True) + + async def _end_session(self, lock: bool) -> None: + if self._server_session is not None: + try: + if self.in_transaction: + await self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + await self._unpin() + finally: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _end_implicit_session(self) -> None: + # Implicit sessions can't be part of transactions or pinned connections + if not self._leave_alive and self._server_session is not None: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _check_ended(self) -> None: + if self._server_session is None: + raise InvalidOperation("Cannot use ended session") + + async def __aenter__(self) -> AsyncClientSession: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self._end_session(lock=True) + + @property + def client(self) -> AsyncMongoClient[Any]: + """The :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` this session was + created from. + """ + return self._client + + @property + def options(self) -> SessionOptions: + """The :class:`SessionOptions` this session was created with.""" + return self._options + + @property + def session_id(self) -> Mapping[str, Any]: + """A BSON document, the opaque server session identifier.""" + self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.session_id + + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + """The cluster time returned by the last operation executed + in this session. + """ + return self._cluster_time + + @property + def operation_time(self) -> Optional[Timestamp]: + """The operation time returned by the last operation executed + in this session. + """ + return self._operation_time + + def _inherit_option(self, name: str, val: _T) -> _T: + """Return the inherited TransactionOption value.""" + if val: + return val + txn_opts = self.options.default_transaction_options + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val + return getattr(self.client, name) + + async def with_transaction( + self, + callback: Callable[[AsyncClientSession], Coroutine[Any, Any, _T]], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: + """Execute a callback in a transaction. + + This method starts a transaction on this session, executes ``callback`` + once, and then commits the transaction. For example:: + + async def callback(session): + orders = session.client.db.orders + inventory = session.client.db.inventory + await orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + await inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, session=session) + + async with client.start_session() as session: + await session.with_transaction(callback) + + To pass arbitrary arguments to the ``callback``, wrap your callable + with a ``lambda`` like this:: + + async def callback(session, custom_arg, custom_kwarg=None): + # Transaction operations... + + async with client.start_session() as session: + await session.with_transaction( + lambda s: callback(s, "custom_arg", custom_kwarg=1)) + + In the event of an exception, ``with_transaction`` may retry the commit + or the entire transaction, therefore ``callback`` may be invoked + multiple times by a single call to ``with_transaction``. Developers + should be mindful of this possibility when writing a ``callback`` that + modifies application state or has any other side-effects. + Note that even when the ``callback`` is invoked multiple times, + ``with_transaction`` ensures that the transaction will be committed + at-most-once on the server. + + The ``callback`` should not attempt to start new transactions, but + should simply run operations meant to be contained within a + transaction. The ``callback`` should also not commit the transaction; + this is handled automatically by ``with_transaction``. If the + ``callback`` does commit or abort the transaction without error, + however, ``with_transaction`` will return without taking further + action. + + :class:`AsyncClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + + When ``callback`` raises an exception, ``with_transaction`` + automatically aborts the current transaction. When ``callback`` or + :meth:`~AsyncClientSession.commit_transaction` raises an exception that + includes the ``"TransientTransactionError"`` error label, + ``with_transaction`` starts a new transaction and re-executes + the ``callback``. + + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + + When :meth:`~AsyncClientSession.commit_transaction` raises an exception with + the ``"UnknownTransactionCommitResult"`` error label, + ``with_transaction`` retries the commit until the result of the + transaction is known. + + This method will cease retrying after 120 seconds has elapsed. This + timeout is not configurable and any exception raised by the + ``callback`` or by :meth:`AsyncClientSession.commit_transaction` after the + timeout is reached will be re-raised. Applications that desire a + different timeout duration should not use this method. + + :param callback: The callable ``callback`` to run inside a transaction. + The callable must accept a single argument, this session. Note, + under certain error conditions the callback may be run multiple + times. + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this + transaction. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. + :param read_preference: The read preference to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` + of this :class:`AsyncDatabase` is used. See + :mod:`~pymongo.read_preferences` for options. + + :return: The return value of the ``callback``. + + .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback + """ + start_time = time.monotonic() + while True: + await self.start_transaction( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + try: + ret = await callback(self) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: + if self.in_transaction: + await self.abort_transaction() + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): + # Retry the entire transaction. + continue + raise + + if not self.in_transaction: + # Assume callback intentionally ended the transaction. + return ret + + while True: + try: + await self.commit_transaction() + except PyMongoError as exc: + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): + # Retry the commit. + continue + + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): + # Retry the entire transaction. + break + raise + + # Commit succeeded. + return ret + + async def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> AsyncContextManager[Any]: + """Start a multi-statement transaction. + + Takes the same arguments as :class:`TransactionOptions`. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + self._check_ended() + + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + + if self.in_transaction: + raise InvalidOperation("Transaction already in progress") + + read_concern = self._inherit_option("read_concern", read_concern) + write_concern = self._inherit_option("write_concern", write_concern) + read_preference = self._inherit_option("read_preference", read_preference) + if max_commit_time_ms is None: + opts = self.options.default_transaction_options + if opts: + max_commit_time_ms = opts.max_commit_time_ms + + self._transaction.opts = TransactionOptions( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + await self._transaction.reset() + self._transaction.state = _TxnState.STARTING + self._start_retryable_write() + return _TransactionContext(self) + + async def commit_transaction(self) -> None: + """Commit a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.COMMITTED_EMPTY + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") + elif state is _TxnState.COMMITTED: + # We're explicitly retrying the commit, move the state back to + # "in progress" so that in_transaction returns true. + self._transaction.state = _TxnState.IN_PROGRESS + + try: + await self._finish_transaction_with_retry("commitTransaction") + except ConnectionFailure as exc: + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + exc._remove_error_label("TransientTransactionError") + _reraise_with_unknown_commit(exc) + except WTimeoutError as exc: + # We do not know if the commit has satisfied the provided write + # concern, add the unknown commit error label. + _reraise_with_unknown_commit(exc) + except OperationFailure as exc: + if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: + # The server reports errorLabels in the case. + raise + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + _reraise_with_unknown_commit(exc) + finally: + self._transaction.state = _TxnState.COMMITTED + + async def abort_transaction(self) -> None: + """Abort a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state is _TxnState.STARTING: + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.ABORTED + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call abortTransaction twice") + elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") + + try: + await self._finish_transaction_with_retry("abortTransaction") + except (OperationFailure, ConnectionFailure): + # The transactions spec says to ignore abortTransaction errors. + pass + finally: + self._transaction.state = _TxnState.ABORTED + await self._unpin() + + async def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: + """Run commit or abort with one retry after any retryable error. + + :param command_name: Either "commitTransaction" or "abortTransaction". + """ + + async def func( + _session: Optional[AsyncClientSession], conn: AsyncConnection, _retryable: bool + ) -> dict[str, Any]: + return await self._finish_transaction(conn, command_name) + + return await self._client._retry_internal( + func, self, None, retryable=True, operation=command_name + ) + + async def _finish_transaction(self, conn: AsyncConnection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 + opts = self._transaction.opts + assert opts + wc = opts.write_concern + cmd = {command_name: 1} + if command_name == "commitTransaction": + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms + + # Transaction spec says that after the initial commit attempt, + # subsequent commitTransaction commands should be upgraded to use + # w:"majority" and set a default value of 10 seconds for wtimeout. + if self._transaction.attempt > 1: + assert wc + wc_doc = wc.document + wc_doc["w"] = "majority" + wc_doc.setdefault("wtimeout", 10000) + wc = WriteConcern(**wc_doc) + + if self._transaction.recovery_token: + cmd["recoveryToken"] = self._transaction.recovery_token + + return await self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) + + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + """Internal cluster time helper.""" + if self._cluster_time is None: + self._cluster_time = cluster_time + elif cluster_time is not None: + if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: + self._cluster_time = cluster_time + + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: + """Update the cluster time for this session. + + :param cluster_time: The + :data:`~pymongo.asynchronous.client_session.AsyncClientSession.cluster_time` from + another `AsyncClientSession` instance. + """ + if not isinstance(cluster_time, _Mapping): + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) + if not isinstance(cluster_time.get("clusterTime"), Timestamp): + raise ValueError("Invalid cluster_time") + self._advance_cluster_time(cluster_time) + + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: + """Internal operation time helper.""" + if self._operation_time is None: + self._operation_time = operation_time + elif operation_time is not None: + if operation_time > self._operation_time: + self._operation_time = operation_time + + def advance_operation_time(self, operation_time: Timestamp) -> None: + """Update the operation time for this session. + + :param operation_time: The + :data:`~pymongo.asynchronous.client_session.AsyncClientSession.operation_time` from + another `AsyncClientSession` instance. + """ + if not isinstance(operation_time, Timestamp): + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) + self._advance_operation_time(operation_time) + + def _process_response(self, reply: Mapping[str, Any]) -> None: + """Process a response to a command that was run with this session.""" + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct + if self.in_transaction and self._transaction.sharded: + recovery_token = reply.get("recoveryToken") + if recovery_token: + self._transaction.recovery_token = recovery_token + + @property + def has_ended(self) -> bool: + """True if this session is finished.""" + return self._server_session is None + + @property + def in_transaction(self) -> bool: + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ + return self._transaction.active() + + @property + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: + """The mongos address this transaction was created on.""" + if self._transaction.active(): + return self._transaction.pinned_address + return None + + @property + def _pinned_connection(self) -> Optional[AsyncConnection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server: Server, conn: AsyncConnection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) + + async def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + await self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: + """Return read preference of this transaction or None.""" + if self.in_transaction: + assert self._transaction.opts + return self._transaction.opts.read_preference + return None + + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: AsyncConnection, + ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return + self._check_ended() + self._materialize(conn.logical_session_timeout_minutes) + if self.options.snapshot: + self._update_read_concern(command, conn) + + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id + + if is_retryable: + command["txnNumber"] = self._server_session.transaction_id + return + + if self.in_transaction: + if read_preference != ReadPreference.PRIMARY: + raise InvalidOperation( + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) + + if self._transaction.state == _TxnState.STARTING: + # First command begins a new transaction. + self._transaction.state = _TxnState.IN_PROGRESS + command["startTransaction"] = True + + assert self._transaction.opts + if self._transaction.opts.read_concern: + rc = self._transaction.opts.read_concern.document + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) + + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False + + def _start_retryable_write(self) -> None: + self._check_ended() + self._server_session.inc_transaction_id() + + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: AsyncConnection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A AsyncClientSession cannot be copied, create a new session instead") + + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): + # Ensure id is type 4, regardless of CodecOptions.uuid_representation. + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() + self._transaction_id = 0 + self.dirty = False + self.generation = generation + + def mark_dirty(self) -> None: + """Mark this session as dirty. + + A server session is marked dirty when a command fails with a network + error. Dirty sessions are later discarded from the server session pool. + """ + self.dirty = True + + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + + idle_seconds = time.monotonic() - self.last_use + + # Timed out if we have less than a minute to live. + return idle_seconds > (session_timeout_minutes - 1) * 60 + + @property + def transaction_id(self) -> Int64: + """Positive 64-bit integer.""" + return Int64(self._transaction_id) + + def inc_transaction_id(self) -> None: + self._transaction_id += 1 + + +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] + """Pool of _ServerSession objects. + + This class is thread-safe. + """ + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 + self.clear() + + def pop_all(self) -> list[_ServerSession]: + ids = [] + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break + return ids + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + # Although the Driver Sessions Spec says we only clear stale sessions + # in return_server_session, PyMongo can't take a lock when returning + # sessions from a __del__ method (like in AsyncCursor.__die), so it can't + # clear stale sessions there. In case many sessions were returned via + # __del__, check for stale sessions here too. + self._clear_stale(session_timeout_minutes) + + # The most recently used sessions are on the left. + while True: + try: + s = self.popleft() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + return s + + return _ServerSession(self.generation) + + def return_server_session(self, server_session: _ServerSession) -> None: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.generation == self.generation and not server_session.dirty: + self.appendleft(server_session) + + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: + # Clear stale sessions. The least recently used are on the right. + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) + # The remaining sessions also haven't timed out. + break diff --git a/pymongo/asynchronous/collection.py b/pymongo/asynchronous/collection.py new file mode 100644 index 0000000000..e7e2f58031 --- /dev/null +++ b/pymongo/asynchronous/collection.py @@ -0,0 +1,3646 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection level utilities for Mongo.""" +from __future__ import annotations + +import warnings +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Coroutine, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot, common, helpers_shared, message +from pymongo.asynchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.asynchronous.bulk import _AsyncBulk +from pymongo.asynchronous.change_stream import AsyncCollectionChangeStream +from pymongo.asynchronous.command_cursor import ( + AsyncCommandCursor, + AsyncRawBatchCommandCursor, +) +from pymongo.asynchronous.cursor import ( + AsyncCursor, + AsyncRawBatchCursor, +) +from pymongo.collation import validate_collation_or_none +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, + _Op, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean + +_IS_SYNC = False + +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] + + +class ReturnDocument: + """An enum used with + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_replace` and + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_update`. + """ + + BEFORE = False + """Return the original document before it was updated/replaced, or + ``None`` if no document matches the query. + """ + AFTER = True + """Return the updated/replaced or inserted document.""" + + +if TYPE_CHECKING: + import bson + from pymongo.asynchronous.aggregation import _AggregationCommand + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.database import AsyncDatabase + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.collation import Collation + from pymongo.read_concern import ReadConcern + + +class AsyncCollection(common.BaseObject, Generic[_DocumentType]): + """An asynchronous Mongo collection.""" + + def __init__( + self, + database: AsyncDatabase[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> None: + """Get / create an asynchronous Mongo collection. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used + as options passed to the create command. See + :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for valid + options. + + If `create` is ``True``, `collation` is specified, or any additional + keyword arguments are present, a ``create`` command will be + sent, using ``session`` if specified. Otherwise, a ``create`` command + will not be sent and the collection will be created implicitly on first + use. The optional ``session`` argument is *only* used for the ``create`` + command, it is not associated with the collection afterward. + + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: **Not supported by AsyncCollection**. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) database.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) database.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) database.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) database.read_concern is used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. If a collation is provided, + it will be passed to the create collection command. + :param session: **Not supported by AsyncCollection**. + :param kwargs: **Not supported by AsyncCollection**. + + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + Removed the uuid_subtype attribute. + :class:`~pymongo.asynchronous.collection.AsyncCollection` no longer returns an + instance of :class:`~pymongo.asynchronous.collection.AsyncCollection` for attribute + names with leading underscores. You must use dict-style lookups + instead:: + + collection['__my_collection__'] + + Not: + + collection.__my_collection__ + + .. seealso:: The MongoDB documentation on `collections `_. + """ + super().__init__( + codec_options or database.codec_options, + read_preference or database.read_preference, + write_concern or database.write_concern, + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + from pymongo.asynchronous.database import AsyncDatabase + + if not isinstance(database, AsyncDatabase): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") + + if not name or ".." in name: + raise InvalidName("collection names cannot be empty") + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) + if name[0] == "." or name[-1] == ".": + raise InvalidName("collection names must not start or end with '.': %r" % name) + if "\x00" in name: + raise InvalidName("collection names must not contain the null character") + + self._database: AsyncDatabase[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + + if create or kwargs: + if _IS_SYNC: + warnings.warn( + "The `create` and `kwargs` arguments to AsyncCollection are deprecated and will be removed in PyMongo 5.0", + DeprecationWarning, + stacklevel=2, + ) + self._create(kwargs, session) # type: ignore[unused-coroutine] + else: + raise ValueError( + "AsyncCollection does not support the `create` or `kwargs` arguments." + ) + + def __getattr__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> AsyncCollection[_DocumentType]: + return AsyncCollection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AsyncCollection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`AsyncCollection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`AsyncCollection`.""" + return self._name + + @property + def database(self) -> AsyncDatabase[_DocumentType]: + """The :class:`~pymongo.asynchronous.database.AsyncDatabase` that this + :class:`AsyncCollection` is a part of. + """ + return self._database + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncCollection[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncCollection[_DocumentType] | AsyncCollection[_DocumentTypeArg]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncCollection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncCollection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncCollection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncCollection` + is used. + """ + return AsyncCollection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[AsyncClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncCollection' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on an 'AsyncDatabase' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncCollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + async with await db.collection.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with await db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This AsyncCollection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncCollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncCollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + async def _conn_for_writes( + self, session: Optional[AsyncClientSession], operation: str + ) -> AsyncContextManager[AsyncConnection]: + return await self._database.client._conn_for_writes(session, operation) + + async def _command( + self, + conn: AsyncConnection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal command helper. + + :param conn` - A AsyncConnection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of + :class:`~bson.codec_options.CodecOptions`. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of + :class:`~pymongo.read_concern.ReadConcern`. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. + :param collation` (optional) - An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param retryable_write: True if this command is a retryable + write. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + + :return: The result document. + """ + async with self._database.client._tmp_session(session) as s: + return await conn.command( + self._database.name, + command, + read_preference or self._read_preference_for(session), + codec_options or self.codec_options, + check, + allowable_errors, + read_concern=read_concern, + write_concern=write_concern, + parse_write_concern_error=True, + collation=collation, + session=s, + client=self._database.client, + retryable_write=retryable_write, + user_fields=user_fields, + ) + + async def _create_helper( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[AsyncClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: dict[str, Any] = {"create": name} + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + + if options: + if "size" in options: + options["size"] = float(options["size"]) + cmd.update(options) + async with await self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) + + async def _create( + self, + options: MutableMapping[str, Any], + session: Optional[AsyncClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + await self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + await self._create_helper( + _ecoc_coll_name(encrypted_fields, self._name), opts, None, session + ) + await self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + await self.create_index([("__safeContent__", ASCENDING)], session) + else: + await self._create_helper(self._name, options, collation, session) + + @_csot.apply + async def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + ) -> BulkWriteResult: + """Send a batch of write operations to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + ... + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), + ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] + >>> result = await db.test.bulk_write(requests) + >>> result.inserted_count + 1 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {2: ObjectId('54f62ee28891e756a6e1abd5')} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False`` requests + will be performed on the server in arbitrary order, possibly in + parallel, and all operations will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_list("requests", requests) + + blk = _AsyncBulk(self, ordered, bypass_document_validation, comment=comment, let=let) + for request in requests: + try: + request._add_to_bulk(blk) + except AttributeError: + raise TypeError(f"{request!r} is not a valid request") from None + + write_concern = self._write_concern_for(session) + bulk_api_result = await blk.execute(write_concern, session, _Op.INSERT) + if bulk_api_result is not None: + return BulkWriteResult(bulk_api_result, True) + return BulkWriteResult({}, False) + + async def _insert_one( + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: Optional[bool], + session: Optional[AsyncClientSession], + comment: Optional[Any] = None, + ) -> Any: + """Internal helper for inserting a single document.""" + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} + if comment is not None: + command["comment"] = comment + + async def _insert_command( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> None: + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + result = await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + + _check_write_command_response(result) + + await self._database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) + + if not isinstance(doc, RawBSONDocument): + return doc.get("_id") + return None + + async def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: + """Insert a single document. + + >>> await db.test.count_documents({'x': 1}) + 0 + >>> result = await db.test.insert_one({'x': 1}) + >>> result.inserted_id + ObjectId('54f112defba522406c9cc208') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} + + :param document: The document to insert. Must be a mutable mapping + type. If the document does not have an _id field one will be + added automatically. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_is_document_type("document", document) + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() # type: ignore[index] + + write_concern = self._write_concern_for(session) + return InsertOneResult( + await self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + async def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: + """Insert an iterable of documents. + + >>> await db.test.count_documents({}) + 0 + >>> result = await db.test.insert_many([{'x': i} for i in range(2)]) + >>> await result.inserted_ids + [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] + >>> await db.test.count_documents({}) + 2 + + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be + inserted on the server serially, in the order provided. If an error + occurs all remaining inserts are aborted. If ``False``, documents + will be inserted on the server in arbitrary order, possibly in + parallel, and all document inserts will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.results.InsertManyResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): + raise TypeError("documents must be a non-empty list") + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: + """A generator that validates documents and handles _ids.""" + for document in documents: + common.validate_is_document_type("document", document) + if not isinstance(document, RawBSONDocument): + if "_id" not in document: + document["_id"] = ObjectId() # type: ignore[index] + inserted_ids.append(document["_id"]) + yield (message._INSERT, document) + + write_concern = self._write_concern_for(session) + blk = _AsyncBulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) + await blk.execute(write_concern, session, _Op.INSERT) + return InsertManyResult(inserted_ids, write_concern.acknowledged) + + async def _update( + self, + conn: AsyncConnection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + validate_boolean("upsert", upsert) + collation = validate_collation_or_none(collation) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + update_doc["collation"] = collation + if array_filters is not None: + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + else: + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Update command. + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + # The command result has to be published for APM unmodified + # so we make a shallow copy here before adding updatedExisting. + result = ( + await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + ).copy() + _check_write_command_response(result) + # Add the updatedExisting field for compatibility. + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True + else: + result["updatedExisting"] = False + # MongoDB >= 2.6.0 returns the upsert _id in an array + # element. Break it out for backward compatibility. + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] + + if not acknowledged: + return None + return result + + async def _update_retryable( + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + operation: str, + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + + async def _update( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: + return await self._update( + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + sort=sort, + comment=comment, + ) + + return await self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, + ) + + async def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Replace a single document matching the filter. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + >>> result = await db.test.replace_one({'x': 1}, {'y': 1}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + + The *upsert* option can be used to insert a new document if a matching + document does not exist. + + >>> result = await db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('54f11e5c8891e756a6e1abd4') + >>> await db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_replace(replacement) + if let is not None: + common.validate_is_mapping("let", let) + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + replacement, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update a single document matching the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = await db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> await db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Added ``bypass_document_validation`` support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update one or more documents that match the filter. + + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = await db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 3 + >>> result.modified_count + 3 + >>> async for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + await self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + multi=True, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def drop( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: + """Alias for :meth:`~pymongo.asynchronous.database.AsyncDatabase.drop_collection`. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. + + The following two calls are equivalent: + + >>> await db.foo.drop() + >>> await db.drop_collection("foo") + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.7 + :meth:`drop` now respects this :class:`AsyncCollection`'s :attr:`write_concern`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + await dbo.drop_collection( + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) + + async def _delete( + self, + conn: AsyncConnection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + common.validate_is_mapping("filter", criteria) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + delete_doc = {"q": criteria, "limit": int(not multi)} + collation = validate_collation_or_none(collation) + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + delete_doc["hint"] = hint + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + + # Delete command. + result = await conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + _check_write_command_response(result) + return result + + async def _delete_retryable( + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + + async def _delete( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Mapping[str, Any]: + return await self._delete( + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return await self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, + ) + + async def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete a single document matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_one({'x': 1}) + >>> result.deleted_count + 1 + >>> await db.test.count_documents({'x': 1}) + 2 + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + await self._delete_retryable( + filter, + False, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete one or more documents matching the filter. + + >>> await db.test.count_documents({'x': 1}) + 3 + >>> result = await db.test.delete_many({'x': 1}) + >>> result.deleted_count + 3 + >>> await db.test.count_documents({'x': 1}) + 0 + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + await self._delete_retryable( + filter, + True, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + async def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: + """Get a single document from the database. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single document, or ``None`` if no matching + document is found. + + The :meth:`find_one` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param filter: a dictionary specifying + the query to be performed OR any other type to be used as + the value for a query for ``"_id"``. + + :param args: any additional positional arguments + are the same as the arguments to :meth:`find`. + + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + :: code-block: python + + >>> await collection.find_one(max_time_ms=100) + + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + cursor = self.find(filter, *args, **kwargs) + async for result in cursor.limit(-1): + return result + return None + + def find(self, *args: Any, **kwargs: Any) -> AsyncCursor[_DocumentType]: + """Query the database. + + The `filter` argument is a query document that all results + must match. For example: + + >>> db.test.find({"hello": "world"}) + + only matches documents that have a key "hello" with value + "world". Matches can have other keys *in addition* to + "hello". The `projection` argument is used to specify a subset + of fields that should be included in the result documents. By + limiting results to a certain subset of fields you can cut + down on network traffic and decoding time. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~pymongo.asynchronous.cursor.AsyncCursor` corresponding to this query. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with collection.find() as cursor: + async for doc in cursor: + print(doc) + + The :meth:`find` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param filter: A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. + :param projection: a list of field names that should be + returned in the result set or a dict specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param skip: the number of documents to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return. A limit of 0 (the default) is equivalent to setting no + limit. + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param cursor_type: the type of cursor to return. The valid + options are defined by :class:`~pymongo.cursor.CursorType`: + + - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of + this find call will return a standard cursor over the result set. + - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this + find call will be a tailable cursor - tailable cursors are only + for use with capped collections. They are not closed when the + last data is retrieved but are kept open and the cursor location + marks the final document position. If more data is received + iteration of the cursor will continue from the last document + received. For details, see the `tailable cursor documentation + `_. + - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result + of this find call will be a tailable cursor with the await flag + set. The server will wait for a few seconds after returning the + full result set so that it can capture and return additional data + added during the query. + - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this + find call will be an exhaust cursor. MongoDB will stream batched + results to the client without waiting for the client to request + each batch, reducing latency. See notes on compatibility below. + + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.sort` for details. + :param allow_partial_results: if True, mongos will return + partial results if some shards are down instead of returning an + error. + :param oplog_replay: **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. + :param batch_size: Limits the number of documents returned in + a single batch. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param return_key: If True, return only the index keys in + each document. + :param show_record_id: If True, adds a field ``$recordId`` in + each document with the storage engine's internal record identifier. + :param snapshot: **DEPRECATED** - If True, prevents the + cursor from returning a document more than once because of an + intervening write operation. + :param hint: An index, in the same format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.hint` on the cursor to tell Mongo the + proper index to use for the query. + :param max_time_ms: Specifies a time limit for a query + operation. If the specified time is exceeded, the operation will be + aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass + this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max_time_ms` on the cursor. + :param max_scan: **DEPRECATED** - The maximum number of + documents to scan. Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max_scan` on the cursor. + :param min: A list of field, limit pairs specifying the + inclusive lower bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.min` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param max: A list of field, limit pairs specifying the + exclusive upper bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.max` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param comment: A string to attach to the query to help + interpret and trace the operation in the server logs and in profile + data. Pass this as an alternative to calling + :meth:`~pymongo.asynchronous.cursor.AsyncCursor.comment` on the cursor. + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. + + .. note:: There are a number of caveats to using + :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: + + - The `limit` option can not be used with an exhaust cursor. + + - Exhaust cursors are not supported by mongos and can not be + used with a sharded cluster. + + - A :class:`~pymongo.cursor.AsyncCursor` instance created with the + :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an + exclusive :class:`~socket.socket` connection to MongoDB. If the + :class:`~pymongo.asynchronous.cursor.AsyncCursor` is discarded without being + completely iterated the underlying :class:`~socket.socket` + connection will be closed and discarded without being returned to + the connection pool. + + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + + .. versionchanged:: 3.7 + Deprecated the ``snapshot`` option, which is deprecated in MongoDB + 3.6 and removed in MongoDB 4.0. + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.5 + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. + + .. versionchanged:: 3.4 + Added support for the ``collation`` option. + + .. versionchanged:: 3.0 + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always + represents BSON regular expressions as :class:`~bson.regex.Regex` + objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to + convert from a BSON regular expression to a Python regular + expression object. + Soft deprecated the ``manipulate`` option. + + .. seealso:: The MongoDB documentation on `find `_. + """ + return AsyncCursor(self, *args, **kwargs) + + def find_raw_batches(self, *args: Any, **kwargs: Any) -> AsyncRawBatchCursor[_DocumentType]: + """Query the database and retrieve batches of raw BSON. + + Similar to the :meth:`find` method but returns a + :class:`~pymongo.asynchronous.cursor.AsyncRawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.find_raw_batches() + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("find_raw_batches does not support auto encryption") + return AsyncRawBatchCursor(self, *args, **kwargs) + + async def _count_cmd( + self, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + res = await self._command( + conn, + cmd, + read_preference=read_preference, + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + return int(res["n"]) + + async def _aggregate_one_result( + self, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[_CollationIn], + session: Optional[AsyncClientSession], + ) -> Optional[Mapping[str, Any]]: + """Internal helper to run an aggregate that returns a single result.""" + result = await self._command( + conn, + cmd, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] + return batch[0] if batch else None + + async def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: + """Get an estimate of the number of documents in this collection using + collection metadata. + + The :meth:`estimated_document_count` method is **not** supported in a + transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + `versioned API `_. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ + """ + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"count": self._name} + cmd.update(kwargs) + return await self._count_cmd(session, conn, read_preference, cmd, collation=None) + + return await self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) + + async def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: + """Count the number of documents in this collection. + + .. note:: For a fast count of the total documents in a collection see + :meth:`estimated_document_count`. + + The :meth:`count_documents` method is supported in a transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `skip` (int): The number of matching documents to skip before + returning results. + - `limit` (int): The maximum number of documents to count. Must be + a positive integer. If not provided, no limit is imposed. + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `hint` (string or list of tuples): The index to use. Specify either + the index name as a string or the index specification as a list of + tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). + + The :meth:`count_documents` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + .. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+-------------------------------------+ + | Operator | Replacement | + +=============+=====================================+ + | $where | `$expr`_ | + +-------------+-------------------------------------+ + | $near | `$geoWithin`_ with `$center`_ | + +-------------+-------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | + +-------------+-------------------------------------+ + + :param filter: A query document that selects which documents + to count in the collection. Can be an empty document to count all + documents. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + + .. versionadded:: 3.7 + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + """ + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + if "hint" in kwargs and not isinstance(kwargs["hint"], str): + kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) + result = await self._aggregate_one_result( + conn, read_preference, cmd, collation, session + ) + if not result: + return 0 + return result["n"] + + return await self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) + + async def _retryable_non_cursor_read( + self, + func: Callable[ + [Optional[AsyncClientSession], Server, AsyncConnection, Optional[_ServerMode]], + Coroutine[Any, Any, T], + ], + session: Optional[AsyncClientSession], + operation: str, + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self._database.client + async with client._tmp_session(session) as s: + return await client._retryable_read(func, self._read_preference_for(s), s, operation) + + async def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. + + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> await db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 + + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ + """ + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return await self._create_indexes(indexes, session, **kwargs) + + @_csot.apply + async def _create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[AsyncClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ + names = [] + async with await self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in indexes: + if not isinstance(index, IndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) + document = index.document + names.append(document["name"]) + yield document + + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + write_concern=self._write_concern_for(session), + session=session, + ) + return names + + async def create_index( + self, + keys: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: + """Creates an index on this collection. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str` and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + To create a single key ascending index on the key ``'mike'`` we just + use a string argument:: + + >>> await my_collection.create_index("mike") + + For a compound index on ``'mike'`` descending and ``'eliot'`` + ascending we need to use a list of tuples:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING), + ... "eliot"]) + + All optional index creation parameters should be passed as + keyword arguments to this method. For example:: + + >>> await my_collection.create_index([("mike", pymongo.DESCENDING)], + ... background=True) + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The + option is silently ignored by the server and unique index builds + using the option will fail if a duplicate value is detected. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + :param keys: a single key or a list of (key, direction) + pairs specifying the index to create + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for passing maxTimeMS + in kwargs. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.2 + Added partialFilterExpression to support partial indexes. + .. versionchanged:: 3.0 + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. + + .. seealso:: The MongoDB documentation on `indexes `_. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + cmd_options = {} + if "maxTimeMS" in kwargs: + cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return (await self._create_indexes([index], session, **cmd_options))[0] + + async def drop_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops all indexes on this collection. + + Can be used on non-existent collections or collections with no indexes. + Raises OperationFailure on an error. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + """ + if comment is not None: + kwargs["comment"] = comment + await self._drop_index("*", session=session, **kwargs) + + @_csot.apply + async def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops the specified index on this collection. + + Can be used on non-existent collections or collections with no + indexes. Raises OperationFailure on an error (e.g. trying to + drop an index that does not exist). `index_or_name` + can be either an index name (as returned by `create_index`), + or an index specifier (as passed to `create_index`). An index + specifier should be a list of (key, direction) pairs. Raises + TypeError if index is not an instance of (str, unicode, list). + + .. warning:: + + if a custom name was used on index creation (by + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. + + :param index_or_name: index (or name of index) to drop + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + await self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + async def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + name = index_or_name + if isinstance(index_or_name, list): + name = helpers_shared._gen_index_name(index_or_name) + + if not isinstance(name, str): + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") + + cmd = {"dropIndexes": self._name, "index": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + async def list_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the index documents for this collection. + + >>> async for index in await db.test.list_indexes(): + ... print(index) + ... + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_indexes() as cursor: + async for index in cursor: + print(index) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionadded:: 3.0 + """ + return await self._list_indexes(session, comment) + + async def _list_indexes( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) + coll = cast( + AsyncCollection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + cmd = {"listIndexes": self._name, "cursor": {}} + if comment is not None: + cmd["comment"] = comment + + try: + cursor = ( + await self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = AsyncCommandCursor( + coll, + cursor, + conn.address, + session=session, + comment=cmd.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + async with self._database.client._tmp_session(session) as s: + return await self._database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) + + async def index_information( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get information on this collection's indexes. + + Returns a dictionary where the keys are index names (as + returned by create_index()) and the values are dictionaries + containing information about each index. The dictionary is + guaranteed to contain at least a single key, ``"key"`` which + is a list of (key, direction) pairs specifying the index (as + passed to create_index()). It will also contain any other + metadata about the indexes, except for the ``"ns"`` and + ``"name"`` keys, which are cleaned. Example output might look + like this: + + >>> await db.test.create_index("x", unique=True) + 'x_1' + >>> await db.test.index_information() + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + cursor = await self._list_indexes(session=session, comment=comment) + info = {} + async for index in cursor: + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 + info[index.pop("name")] = index + return info + + async def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.list_search_indexes() as cursor: + async for index in cursor: + print(index) + + :param name: If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, + ) + cmd = _CollectionAggregationCommand( + coll, + AsyncCommandCursor, + pipeline, + kwargs, + comment=comment, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return await self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, + ) + + async def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :param model: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(**model) + return (await self._create_search_indexes([model], session, comment, **kwargs))[0] + + async def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + return await self._create_search_indexes(models, session, comment, **kwargs) + + async def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + + async with await self._conn_for_writes( + session, operation=_Op.CREATE_SEARCH_INDEXES + ) as conn: + resp = await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + async def drop_search_index( + self, + name: str, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :param name: The name of the search index to be deleted. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"dropSearchIndex": self._name, "name": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + async def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + async with await self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: + await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + async def options( + self, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get the options set on this collection. + + Returns a dictionary of options and their values - see + :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for more + information on the possible options. Returns an empty + dictionary if the collection has not been created yet. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + cursor = await dbo.list_collections( + session=session, filter={"name": self._name}, comment=comment + ) + + result = None + async for doc in cursor: + result = doc + break + + if not result: + return {} + + options = result.get("options", {}) + assert options is not None + if "create" in options: + del options["create"] + + return options + + @_csot.apply + async def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[AsyncCommandCursor], # type: ignore[type-arg] + session: Optional[AsyncClientSession], + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment + cmd = aggregation_command( + self, + cursor_class, + pipeline, + kwargs, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return await self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + async def aggregate( + self, + pipeline: _Pipeline, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + """Perform an aggregation using the aggregation framework on this + collection. + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`AsyncCollection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the `aggregation example `_ + documentation. + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await collection.aggregate() as cursor: + async for operation in cursor: + print(operation) + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. + + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. + .. versionchanged:: 3.9 + Apply this collection's read concern to pipelines containing the + `$out` stage when connected to MongoDB >= 4.2. + Added support for the ``$merge`` pipeline stage. + Aggregations that write always use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + .. versionchanged:: 3.6 + Added the `session` parameter. Added the `maxAwaitTimeMS` option. + Deprecated the `useCursor` option. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.0 + The :meth:`aggregate` method always returns an AsyncCommandCursor. The + pipeline argument must be a list. + + .. seealso:: `Aggregation `_ + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + async with self._database.client._tmp_session(session) as s: + return await self._aggregate( + _CollectionAggregationCommand, + pipeline, + AsyncCommandCursor, + session=s, + let=let, + comment=comment, + **kwargs, + ) + + async def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncRawBatchCursor[_DocumentType]: + """Perform an aggregation and retrieve batches of raw BSON. + + Similar to the :meth:`aggregate` method but returns a + :class:`~pymongo.asynchronous.cursor.AsyncRawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = await db.test.aggregate_raw_batches([ + ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) + >>> async for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + async with self._database.client._tmp_session(session) as s: + return cast( + AsyncRawBatchCursor[_DocumentType], + await self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + AsyncRawBatchCommandCursor, + session=s, + **kwargs, + ), + ) + + @_csot.apply + async def rename( + self, + new_name: str, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: + """Rename this collection. + + If operating in auth mode, client must be authorized as an + admin to perform this operation. Raises :class:`TypeError` if + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` + if `new_name` is not a valid collection name. + + :param new_name: new name for this collection + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional arguments to the rename command + may be passed as keyword arguments to this helper method + (i.e. ``dropTarget=True``) + + .. note:: The :attr:`~pymongo.asynchronous.collection.AsyncCollection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + if not isinstance(new_name, str): + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") + + if not new_name or ".." in new_name: + raise InvalidName("collection names cannot be empty") + if new_name[0] == "." or new_name[-1] == ".": + raise InvalidName("collection names must not start or end with '.'") + if "$" in new_name and not new_name.startswith("oplog.$main"): + raise InvalidName("collection names must not contain '$'") + + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + write_concern = self._write_concern_for_cmd(cmd, session) + + async with await self._conn_for_writes(session, operation=_Op.RENAME) as conn: + async with self._database.client._tmp_session(session) as s: + return await conn.command( + "admin", + cmd, + write_concern=write_concern, + parse_write_concern_error=True, + session=s, + client=self._database.client, + ) + + async def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, + **kwargs: Any, + ) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in this collection. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + All optional distinct parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow the count + command to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + The :meth:`distinct` method obeys the :attr:`read_preference` of + this :class:`AsyncCollection`. + + :param key: name of the field for which we want to get the distinct + values + :param filter: A query document that specifies the documents + from which to retrieve the distinct values. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``). + :param kwargs: See list of options above. + + .. versionchanged:: 4.12 + Added ``hint`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + """ + if not isinstance(key, str): + raise TypeError(f"key must be an instance of str, not {type(key)}") + if filter is not None: + if "query" in kwargs: + raise ConfigurationError("can't pass both filter and query") + kwargs["query"] = filter + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: Optional[_ServerMode], + ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] + return ( + await self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) + )["values"] + + return await self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) + + async def _find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Internal findAndModify helper.""" + common.validate_is_mapping("filter", filter) + if not isinstance(return_document, bool): + raise ValueError( + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + write_concern = self._write_concern_for_cmd(kwargs, session) + + async def _find_and_modify_helper( + session: Optional[AsyncClientSession], conn: AsyncConnection, retryable_write: bool + ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert + acknowledged = write_concern.acknowledged + if array_filters is not None: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = await self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) + _check_write_command_response(out) + + return out.get("value") + + return await self._database.client._retryable_write( + write_concern.acknowledged, + _find_and_modify_helper, + session, + operation=_Op.FIND_AND_MODIFY, + ) + + async def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and deletes it, returning the document. + + >>> await db.test.count_documents({'x': 1}) + 2 + >>> await db.test.find_one_and_delete({'x': 1}) + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + >>> await db.test.count_documents({'x': 1}) + 1 + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'x': 1}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_delete( + ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) + {'x': 1, '_id': 2} + + The *projection* option can be used to limit the fields returned. + + >>> await db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + {'x': 1} + + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is deleted. + :param hint: An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + async def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and replaces it, returning either the + original or the replaced document. + + The :meth:`find_one_and_replace` method differs from + :meth:`find_one_and_update` by replacing the document matched by + *filter*, rather than modifying the existing document. + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> await db.test.find_one_and_replace({'x': 1}, {'y': 1}) + {'x': 1, '_id': 0} + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is replaced. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was replaced, or ``None`` + if no document matches. If + :attr:`ReturnDocument.AFTER`, returns the replaced + or inserted document. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_replace(replacement) + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + async def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[AsyncClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and updates it, returning either the + original or the updated document. + + >>> await db.test.find_one_and_update( + ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) + {'_id': 665, 'done': False, 'count': 25}} + + Returns ``None`` if no document matches the filter. + + >>> await db.test.find_one_and_update( + ... {'_exists': False}, {'$inc': {'count': 1}}) + + When the filter matches, by default :meth:`find_one_and_update` + returns the original version of the document before the update was + applied. To return the updated (or inserted in the case of + *upsert*) version of the document instead, use the *return_document* + option. + + >>> from pymongo import ReturnDocument + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... return_document=ReturnDocument.AFTER) + {'_id': 'userid', 'seq': 1} + + You can limit the fields returned with the *projection* option. + + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... return_document=ReturnDocument.AFTER) + {'seq': 2} + + The *upsert* option can be used to create the document if it doesn't + already exist. + + >>> (await db.example.delete_many({})).deleted_count + 1 + >>> await db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... upsert=True, + ... return_document=ReturnDocument.AFTER) + {'seq': 1} + + If multiple documents match *filter*, a *sort* can be applied. + + >>> async for doc in db.test.find({'done': True}): + ... print(doc) + ... + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} + >>> await db.test.find_one_and_update( + ... {'done': True}, + ... {'$set': {'final': True}}, + ... sort=[('_id', pymongo.DESCENDING)]) + {'_id': 701, 'done': True, 'result': {'count': 17}} + + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is updated. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was updated. If + :attr:`ReturnDocument.AFTER`, returns the updated + or inserted document. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` options. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.asynchronous.collection.AsyncCollection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return await self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) diff --git a/pymongo/asynchronous/command_cursor.py b/pymongo/asynchronous/command_cursor.py new file mode 100644 index 0000000000..e18b3a330e --- /dev/null +++ b/pymongo/asynchronous/command_cursor.py @@ -0,0 +1,472 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CommandCursor class to iterate over command results.""" +from __future__ import annotations + +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterator, + Generic, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot +from pymongo.asynchronous.cursor import _ConnectionManager +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.pool import AsyncConnection + +_IS_SYNC = False + + +class AsyncCommandCursor(Generic[_DocumentType]): + """An asynchronous cursor / iterator over command cursors.""" + + _getmore_class = _GetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[AsyncClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self._sock_mgr: Any = None + self._collection: AsyncCollection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout + self._session = session + if self._session is not None: + self._session._attached_to_cursor = True + self._killed = self._id == 0 + self._comment = comment + if self._killed: + self._end_session() + + if "ns" in cursor_info: # noqa: SIM401 + self._ns = cursor_info["ns"] + else: + self._ns = collection.full_name + + self.batch_size(batch_size) + + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + + def __del__(self) -> None: + self._die_no_lock() + + def batch_size(self, batch_size: int) -> AsyncCommandCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + + self._batch_size = batch_size == 1 and 2 or batch_size + return self + + def _has_next(self) -> bool: + """Returns `True` if the cursor has documents remaining from the + previous batch. + """ + return len(self._data) > 0 + + @property + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: + """Retrieve the postBatchResumeToken from the response to a + changeStream aggregate or getMore. + """ + return self._postbatchresumetoken + + async def _maybe_pin_connection(self, conn: AsyncConnection) -> None: + client = self._collection.database.client + if not client._should_pin_cursor(self._session): + return + if not self._sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self._id == 0: + await conn_mgr.close() + else: + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + async for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[AsyncClientSession]: + """The cursor's :class:`~pymongo.asynchronous.client_session.AsyncClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def _prepare_to_die(self) -> tuple[int, Optional[_CursorAddress]]: + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def _die_lock(self) -> None: + """Closes this cursor.""" + cursor_id, address = self._prepare_to_die() + await self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _end_session(self) -> None: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session._end_implicit_session() + self._session = None + + async def close(self) -> None: + """Explicitly close / kill this cursor.""" + await self._die_lock() + + async def _send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" + client = self._collection.database.client + try: + response = await client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + # Return the session and pinned connection, if necessary. + await self.close() + raise + except ConnectionFailure: + # Don't send killCursors because the cursor is already closed. + self._killed = True + # Return the session and pinned connection, if necessary. + await self.close() + raise + except Exception: + await self.close() + raise + + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] + else: + documents = response.docs + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + + if self._id == 0: + await self.close() + self._data = deque(documents) + + async def _refresh(self) -> int: + """Refreshes the cursor with more data from the server. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + await self._send_message( + self._getmore_class( + dbname, + collname, + self._batch_size, + self._id, + self._collection.codec_options, + read_pref, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + False, + self._comment, + ) + ) + else: # Cursor id is zero nothing else to return + await self._die_lock() + + return len(self._data) + + def __aiter__(self) -> AsyncIterator[_DocumentType]: + return self + + async def next(self) -> _DocumentType: + """Advance the cursor.""" + # Block until a document is returnable. + while self.alive: + doc = await self._try_next(True) + if doc is not None: + return doc + + raise StopAsyncIteration + + async def __anext__(self) -> _DocumentType: + return await self.next() + + async def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: + """Advance the cursor blocking for at most one getMore command.""" + if not len(self._data) and not self._killed and get_more_allowed: + await self._refresh() + if len(self._data): + return self._data.popleft() + else: + return None + + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some available documents from the cursor.""" + if not len(self._data) and not self._killed: + await self._refresh() + if len(self._data): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + async def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :return: The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return await self._try_next(get_more_allowed=True) + + async def __aenter__(self) -> AsyncCommandCursor[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + @_csot.apply + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + + To use:: + + >>> await cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> await cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not await self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class AsyncRawBatchCommandCursor(AsyncCommandCursor[_DocumentType]): + _getmore_class = _RawBatchGetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[AsyncClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[dict[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on AsyncRawBatchCommandCursor") diff --git a/pymongo/asynchronous/cursor.py b/pymongo/asynchronous/cursor.py new file mode 100644 index 0000000000..f19d3f6cee --- /dev/null +++ b/pymongo/asynchronous/cursor.py @@ -0,0 +1,1374 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + +import copy +import warnings +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams +from bson.code import Code +from bson.son import SON +from pymongo import _csot, helpers_shared +from pymongo.collation import validate_collation_or_none +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, +) +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _async_create_lock +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.collection import AsyncCollection + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.read_preferences import _ServerMode + +_IS_SYNC = False + + +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" + + def __init__(self, conn: AsyncConnection, more_to_come: bool): + self.conn: Optional[AsyncConnection] = conn + self.more_to_come = more_to_come + self._lock = _async_create_lock() + + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + + async def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + await self.conn.unpin() + self.conn = None + + +class AsyncCursor(Generic[_DocumentType]): + _query_class = _Query + _getmore_class = _GetMore + + def __init__( + self, + collection: AsyncCollection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[AsyncClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: + """Create a new cursor. + + Should not be called directly by application developers - see + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + # Initialize all attributes used in __del__ before possibly raising + # an error to avoid attribute errors during garbage collection. + self._collection: AsyncCollection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[AsyncClientSession] + + if session: + self._session = session + self._session._attached_to_cursor = True + else: + self._session = None + + spec: Mapping[str, Any] = filter or {} + validate_is_mapping("filter", spec) + if not isinstance(skip, int): + raise TypeError(f"skip must be an instance of int, not {type(skip)}") + if not isinstance(limit, int): + raise TypeError(f"limit must be an instance of int, not {type(limit)}") + validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and self._session and self._session._implicit: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): + raise ValueError("not a valid value for cursor_type") + validate_boolean("allow_partial_results", allow_partial_results) + validate_boolean("oplog_replay", oplog_replay) + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) + + if projection is not None: + projection = helpers_shared._fields_list_to_dict(projection, "projection") + + if let is not None: + validate_is_document_type("let", let) + + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers_shared._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) + + # This is ugly. People want to be able to do cursor[5:5] and + # get an empty result set (old behavior was an + # exception). It's hard to do that right, though, because the + # server uses limit(0) to mean 'no limit'. So we set __empty + # in that case and check for it when iterating. We also unset + # it anytime we change __limit. + self._empty = False + + self._data: deque = deque() # type: ignore[type-arg] + self._address: Optional[_Address] = None + self._retrieved = 0 + + self._codec_options = collection.codec_options + # Read preference is set when the initial find is sent. + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern + + self._query_flags = cursor_type + self._cursor_type = cursor_type + if no_cursor_timeout: + self._query_flags |= _QUERY_OPTIONS["no_timeout"] + if allow_partial_results: + self._query_flags |= _QUERY_OPTIONS["partial"] + if oplog_replay: + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] + + # The namespace to use for find/getMore commands. + self._dbname = collection.database.name + self._collname = collection.name + + # Checking exhaust cursor support requires network IO + if _IS_SYNC: + self._exhaust_checked = True + self._supports_exhaust() # type: ignore[unused-coroutine] + else: + self._exhaust = cursor_type == CursorType.EXHAUST + self._exhaust_checked = False + + async def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if await self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True + + @property + def collection(self) -> AsyncCollection[_DocumentType]: + """The :class:`~pymongo.asynchronous.collection.AsyncCollection` that this + :class:`AsyncCursor` is iterating. + """ + return self._collection + + @property + def retrieved(self) -> int: + """The number of documents retrieved so far.""" + return self._retrieved + + def __del__(self) -> None: + self._die_no_lock() + + def clone(self) -> AsyncCursor[_DocumentType]: + """Get a clone of this cursor. + + Returns a new AsyncCursor instance with options matching those that have + been set on the current instance. The clone will be completely + unevaluated, even if the current instance has been partially or + completely evaluated. + """ + return self._clone(True) + + def _clone(self, deepcopy: bool = True, base: Optional[AsyncCursor] = None) -> AsyncCursor: # type: ignore[type-arg] + """Internal clone helper.""" + if not base: + if self._session and not self._session._implicit: + base = self._clone_base(self._session) + else: + base = self._clone_base(None) + + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + "cursor_type", + ) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone + } + if deepcopy: + data = self._deepcopy(data) + base.__dict__.update(data) + return base + + def _clone_base(self, session: Optional[AsyncClientSession]) -> AsyncCursor: # type: ignore[type-arg] + """Creates an empty AsyncCursor object for information to be copied into.""" + return self.__class__(self._collection, session=session) + + def _query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: + operators["$explain"] = True + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: + # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot + + if operators: + # Make a shallow copy so we can cleanly rewind or clone. + spec = dict(self._spec) + + # Allow-listed commands must be wrapped in $query. + if "$query" not in spec: + # $query has to come first + spec = {"$query": spec} + + spec.update(operators) + return spec + # Have to wrap with $query if "query" is the first key. + # We can't just use $query anytime "query" is a key as + # that breaks commands like count and find_and_modify. + # Checking spec.keys()[0] covers the case that the spec + # was passed as an instance of SON or OrderedDict. + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} + + return self._spec + + def _check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" + if self._retrieved or self._id is not None: + raise InvalidOperation("cannot set options after executing query") + + async def add_option(self, mask: int) -> AsyncCursor[_DocumentType]: + """Set arbitrary query flags using a bitmask. + + To set the tailable flag: + cursor.add_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + if await self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + self._exhaust = True + + self._query_flags |= mask + return self + + def remove_option(self, mask: int) -> AsyncCursor[_DocumentType]: + """Unset arbitrary query flags using a bitmask. + + To unset the tailable flag: + cursor.remove_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + self._exhaust = False + + self._query_flags &= ~mask + return self + + def allow_disk_use(self, allow_disk_use: bool) -> AsyncCursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") + self._check_okay_to_chain() + + self._allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> AsyncCursor[_DocumentType]: + """Limits the number of results to be returned by this cursor. + + Raises :exc:`TypeError` if `limit` is not an integer. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` + has already been used. The last `limit` applied to this cursor + takes precedence. A limit of ``0`` is equivalent to no limit. + + :param limit: the number of results to return + + .. seealso:: The MongoDB documentation on `limit `_. + """ + if not isinstance(limit, int): + raise TypeError(f"limit must be an integer, not {type(limit)}") + if self._exhaust: + raise InvalidOperation("Can't use limit and exhaust together.") + self._check_okay_to_chain() + + self._empty = False + self._limit = limit + return self + + def batch_size(self, batch_size: int) -> AsyncCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. The last `batch_size` + applied to this cursor takes precedence. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + self._check_okay_to_chain() + + self._batch_size = batch_size + return self + + def skip(self, skip: int) -> AsyncCursor[_DocumentType]: + """Skips the first `skip` results of this cursor. + + Raises :exc:`TypeError` if `skip` is not an integer. Raises + :exc:`ValueError` if `skip` is less than ``0``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` has + already been used. The last `skip` applied to this cursor takes + precedence. + + :param skip: the number of results to skip + """ + if not isinstance(skip, int): + raise TypeError(f"skip must be an integer, not {type(skip)}") + if skip < 0: + raise ValueError("skip must be >= 0") + self._check_okay_to_chain() + + self._skip = skip + return self + + def max_time_ms(self, max_time_ms: Optional[int]) -> AsyncCursor[_DocumentType]: + """Specifies a time limit for a query operation. If the specified + time is exceeded, the operation will be aborted and + :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` + is ``None`` no limit is applied. + + Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` + has already been used. + + :param max_time_ms: the time limit after which the operation is aborted + """ + if not isinstance(max_time_ms, int) and max_time_ms is not None: + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") + self._check_okay_to_chain() + + self._max_time_ms = max_time_ms + return self + + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> AsyncCursor[_DocumentType]: + """Specifies a time limit for a getMore operation on a + :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other + types of cursor max_await_time_ms is ignored. + + Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or + ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. + + .. note:: `max_await_time_ms` requires server version **>= 3.2** + + :param max_await_time_ms: the time limit after which the operation is + aborted + + .. versionadded:: 3.2 + """ + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + self._check_okay_to_chain() + + # Ignore max_await_time_ms if not tailable or await_data is False. + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms + + return self + + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> AsyncCursor[_DocumentType]: + ... + + def __getitem__( + self, index: Union[int, slice] + ) -> Union[_DocumentType, AsyncCursor[_DocumentType]]: + """Get a single document or a slice of documents from this cursor. + + .. warning:: A :class:`~AsyncCursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. + + To get a single document use an integral index, e.g.:: + + >>> db.test.find()[50] + + An :class:`IndexError` will be raised if the index is negative + or greater than the amount of documents in this cursor. Any + limit previously applied to this cursor will be ignored. + + To get a slice of documents use a slice index, e.g.:: + + >>> db.test.find()[20:25] + + This will return this cursor with a limit of ``5`` and skip of + ``20`` applied. Using a slice index will override any prior + limits or skips applied to this cursor (including those + applied through previous calls to this method). Raises + :class:`IndexError` when the slice has a step, a negative + start value, or a stop value less than or equal to the start + value. + + :param index: An integer or slice index to be applied to this cursor + """ + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("AsyncCursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("AsyncCursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: + raise IndexError("AsyncCursor instances do not support negative indices") + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for AsyncCursor instance") + raise TypeError("index %r cannot be applied to AsyncCursor instances" % index) + else: + raise IndexError("AsyncCursor does not support indexing") + + def max_scan(self, max_scan: Optional[int]) -> AsyncCursor[_DocumentType]: + """**DEPRECATED** - Limit the number of documents to scan when + performing the query. + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. Only the last :meth:`max_scan` + applied to this cursor has any effect. + + :param max_scan: the maximum number of documents to scan + + .. versionchanged:: 3.7 + Deprecated :meth:`max_scan`. Support for this option is deprecated in + MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side + execution time. + """ + self._check_okay_to_chain() + self._max_scan = max_scan + return self + + def max(self, spec: _Sort) -> AsyncCursor[_DocumentType]: + """Adds ``max`` operator that specifies upper bound for specific index. + + When using ``max``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the exclusive + upper bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``max`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._max = dict(spec) + return self + + def min(self, spec: _Sort) -> AsyncCursor[_DocumentType]: + """Adds ``min`` operator that specifies lower bound for specific index. + + When using ``min``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the inclusive + lower bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``min`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._min = dict(spec) + return self + + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> AsyncCursor[_DocumentType]: + """Sorts this cursor's results. + + Pass a field name and a direction, either + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: + + async for doc in collection.find().sort('field', pymongo.ASCENDING): + print(doc) + + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: + + async for doc in collection.find().sort([ + 'field1', + ('field2', pymongo.DESCENDING)]): + print(doc) + + Text search results can be sorted by relevance:: + + cursor = db.test.find( + {'$text': {'$search': 'some words'}}, + {'score': {'$meta': 'textScore'}}) + + # Sort by 'score' field. + cursor.sort([('score', {'$meta': 'textScore'})]) + + async for doc in cursor: + print(doc) + + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. Only the last :meth:`sort` applied to this + cursor has any effect. + + :param key_or_list: a single key or a list of (key, direction) + pairs specifying the keys to sort on + :param direction: only used if `key_or_list` is a single + key, if not given :data:`~pymongo.ASCENDING` is assumed + """ + self._check_okay_to_chain() + keys = helpers_shared._index_list(key_or_list, direction) + self._ordering = helpers_shared._index_document(keys) + return self + + async def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. note:: This method uses the default verbosity mode of the + `explain command + `_, + ``allPlansExecution``. To use a different verbosity use + :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` to run the explain + command directly. + + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + c = self.clone() + c._explain = True + + # always use a hard limit for explains + if c._limit: + c._limit = -abs(c._limit) + return await anext(c) + + def _set_hint(self, index: Optional[_Hint]) -> None: + if index is None: + self._hint = None + return + + if isinstance(index, str): + self._hint = index + else: + self._hint = helpers_shared._index_document(index) + + def hint(self, index: Optional[_Hint]) -> AsyncCursor[_DocumentType]: + """Adds a 'hint', telling Mongo the proper index to use for the query. + + Judicious use of hints can greatly improve query + performance. When doing a query on multiple fields (at least + one of which is indexed) pass the indexed field as a hint to + the query. Raises :class:`~pymongo.errors.OperationFailure` if the + provided hint requires an index that does not exist on this collection, + and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. + + `index` should be an index as passed to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` + (e.g. ``[('field', ASCENDING)]``) or the name of the index. + If `index` is ``None`` any existing hint for this query is + cleared. The last hint applied to this cursor takes precedence + over all others. + + :param index: index to hint on (as an index specifier) + """ + self._check_okay_to_chain() + self._set_hint(index) + return self + + def comment(self, comment: Any) -> AsyncCursor[_DocumentType]: + """Adds a 'comment' to the cursor. + + http://mongodb.com/docs/manual/reference/operator/comment/ + + :param comment: A string to attach to the query to help interpret and + trace the operation in the server logs and in profile data. + + .. versionadded:: 2.7 + """ + self._check_okay_to_chain() + self._comment = comment + return self + + def where(self, code: Union[str, Code]) -> AsyncCursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + async for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) + + Raises :class:`TypeError` if `code` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this + :class:`AsyncCursor` has already been used. Only the last call to + :meth:`where` applied to a :class:`AsyncCursor` has any effect. + + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + + :param code: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ + """ + self._check_okay_to_chain() + if not isinstance(code, Code): + code = Code(code) + + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self._has_filter: + spec = dict(self._spec) + else: + spec = cast(dict, self._spec) # type: ignore[type-arg] + spec["$where"] = code + self._spec = spec + return self + + def collation(self, collation: Optional[_CollationIn]) -> AsyncCursor[_DocumentType]: + """Adds a :class:`~pymongo.collation.Collation` to this query. + + Raises :exc:`TypeError` if `collation` is not an instance of + :class:`~pymongo.collation.Collation` or a ``dict``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`AsyncCursor` has + already been used. Only the last collation applied to this cursor has + any effect. + + :param collation: An instance of :class:`~pymongo.collation.Collation`. + """ + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) + return self + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, # type: ignore[type-arg] + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: + # Save the read preference for getMore commands. + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + This is mostly useful with `tailable cursors + `_ + since they will stop iterating even though they *may* return more + results in the future. + + With regular cursors, simply use an asynchronous for loop instead of :attr:`alive`:: + + async for doc in collection.find(): + print(doc) + + .. note:: Even if :attr:`alive` is True, :meth:`next` can raise + :exc:`StopIteration`. :attr:`alive` can also be True while iterating + a cursor from a failed server. In this case :attr:`alive` will + return False after :meth:`next` fails to retrieve the next batch + of results from the server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> Optional[int]: + """Returns the id of the cursor + + .. versionadded:: 2.2 + """ + return self._id + + @property + def address(self) -> Optional[tuple[str, Any]]: + """The (host, port) of the server used, or None. + + .. versionchanged:: 3.0 + Renamed from "conn_id". + """ + return self._address + + @property + def session(self) -> Optional[AsyncClientSession]: + """The cursor's :class:`~pymongo.asynchronous.client_session.AsyncClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def __copy__(self) -> AsyncCursor[_DocumentType]: + """Support function for `copy.copy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=False) + + def __deepcopy__(self, memo: Any) -> Any: + """Support function for `copy.deepcopy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=True) + + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] + ... + + @overload + def _deepcopy( + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] + ... + + def _deepcopy( + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: + """Deepcopy helper for the data dictionary or list. + + Regular expressions cannot be deep copied but as they are immutable we + don't have to copy them when cloning. + """ + y: Union[list[Any], dict[str, Any]] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): + y, is_list, iterator = [], True, enumerate(x) + else: + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] + if memo is None: + memo = {} + val_id = id(x) + if val_id in memo: + return memo[val_id] + memo[val_id] = y + + for key, value in iterator: + if isinstance(value, (dict, list)) and not isinstance(value, SON): + value = self._deepcopy(value, memo) # noqa: PLW2901 + elif not isinstance(value, RE_TYPE): + value = copy.deepcopy(value, memo) # noqa: PLW2901 + + if is_list: + y.append(value) # type: ignore[union-attr] + else: + if not isinstance(key, RE_TYPE): + key = copy.deepcopy(key, memo) # noqa: PLW2901 + y[key] = value # type:ignore[index] + return y + + def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def _die_lock(self) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + await self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + async def close(self) -> None: + """Explicitly close / kill this cursor.""" + await self._die_lock() + + async def distinct(self, key: str) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.asynchronous.collection.AsyncCollection.read_preference` of the + :class:`~pymongo.asynchronous.collection.AsyncCollection` instance on which + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.asynchronous.collection.AsyncCollection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return await self._collection.distinct(key, session=self._session, **options) + + async def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = await client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + await self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures AsyncCursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + await self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + await self.close() + raise + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + await self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + await self.close() + + async def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + await self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + await self._send_message(g) + + return len(self._data) + + async def rewind(self) -> AsyncCursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + await self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + async def next(self) -> _DocumentType: + """Advance the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + await self._supports_exhaust() + if self._empty: + raise StopAsyncIteration + if len(self._data) or await self._refresh(): + return self._data.popleft() + else: + raise StopAsyncIteration + + async def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some documents from the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + await self._supports_exhaust() + if self._empty: + return False + if len(self._data) or await self._refresh(): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + async def __anext__(self) -> _DocumentType: + return await self.next() + + def __aiter__(self) -> AsyncCursor[_DocumentType]: + return self + + async def __aenter__(self) -> AsyncCursor[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + @_csot.apply + async def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc async for doc in cursor]``. + + To use:: + + >>> await cursor.to_list() + + Or, to read at most n items from the cursor:: + + >>> await cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not await self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class AsyncRawBatchCursor(AsyncCursor, Generic[_DocumentType]): # type: ignore[type-arg] + """An asynchronous cursor / iterator over raw batches of BSON data from a query result.""" + + _query_class = _RawBatchQuery + _getmore_class = _RawBatchGetMore + + def __init__( + self, collection: AsyncCollection[_DocumentType], *args: Any, **kwargs: Any + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + async def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + clone = self._clone(deepcopy=True, base=AsyncCursor(self.collection)) + return await clone.explain() + + def __getitem__(self, index: Any) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on AsyncRawBatchCursor") diff --git a/pymongo/asynchronous/database.py b/pymongo/asynchronous/database.py new file mode 100644 index 0000000000..8e0afc9dc9 --- /dev/null +++ b/pymongo/asynchronous/database.py @@ -0,0 +1,1469 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database level operations.""" +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.dbref import DBRef +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.asynchronous.aggregation import _DatabaseAggregationCommand +from pymongo.asynchronous.change_stream import AsyncDatabaseChangeStream +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_concern import ReadConcern + from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncDatabase(common.BaseObject, Generic[_DocumentType]): + def __init__( + self, + client: AsyncMongoClient[_DocumentType], + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: + """Get a database by client and name. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. + + :param client: A :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` instance. + :param name: The database name. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) client.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) client.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) client.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) client.read_concern is used. + + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + :class:`~pymongo.asynchronous.database.AsyncDatabase` no longer returns an instance + of :class:`~pymongo.asynchronous.collection.AsyncCollection` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + db['__my_collection__'] + + Not: + + db.__my_collection__ + """ + super().__init__( + codec_options or client.codec_options, + read_preference or client.read_preference, + write_concern or client.write_concern, + read_concern or client.read_concern, + ) + + from pymongo.asynchronous.mongo_client import AsyncMongoClient + + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + + if not isinstance(client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") + + if name != "$external": + _check_name(name) + + self._name = name + self._client: AsyncMongoClient[_DocumentType] = client + self._timeout = client.options.timeout + + @property + def client(self) -> AsyncMongoClient[_DocumentType]: + """The client instance for this :class:`AsyncDatabase`.""" + return self._client + + @property + def name(self) -> str: + """The name of this :class:`AsyncDatabase`.""" + return self._name + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> AsyncDatabase[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncDatabase[_DocumentType] | AsyncDatabase[_DocumentTypeArg]: + """Get a clone of this database changing the specified settings. + + >>> db1.read_preference + Primary() + >>> from pymongo.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) + >>> db1.read_preference + Primary() + >>> db2.read_preference + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncCollection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncCollection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncCollection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncCollection` + is used. + + .. versionadded:: 3.8 + """ + return AsyncDatabase( + self._client, + self._name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AsyncDatabase): + return self._client == other.client and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._client, self._name)) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._client!r}, {self._name!r})" + + def __getattr__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> AsyncCollection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + return AsyncCollection(self, name) + + def get_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> AsyncCollection[_DocumentType]: + """Get a :class:`~pymongo.asynchronous.collection.AsyncCollection` with the given name + and options. + + Useful for creating a :class:`~pymongo.asynchronous.collection.AsyncCollection` with + different codec options, read preference, and/or write concern from + this :class:`AsyncDatabase`. + + >>> db.read_preference + Primary() + >>> coll1 = db.test + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = db.get_collection( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the collection - a string. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncDatabase` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncDatabase` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncDatabase` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncDatabase` is + used. + """ + return AsyncCollection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + async def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = await self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncDatabase' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncDatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + async with db.watch() as stream: + async for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + async with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + async for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncDatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncDatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + @_csot.apply + async def create_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[AsyncClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> AsyncCollection[_DocumentType]: + """Create a new :class:`~pymongo.asynchronous.collection.AsyncCollection` in this + database. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.CollectionInvalid` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncDatabase` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncDatabase` is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncDatabase` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncDatabase` is + used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param check_exists: if True (the default), send a listCollections command to + check if the collection already exists before creation. + :param kwargs: additional keyword arguments will + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Added the collation option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + """ + encrypted_fields = await self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) + + async with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in await self._list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False + coll = AsyncCollection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + await coll._create(kwargs, s) + + return coll + + async def aggregate( + self, pipeline: _Pipeline, session: Optional[AsyncClientSession] = None, **kwargs: Any + ) -> AsyncCommandCursor[_DocumentType]: + """Perform a database-level aggregation. + + See the `aggregation pipeline`_ documentation for a list of stages + that are supported. + + .. code-block:: python + + # Lists all operations currently running on the server. + async with await client.admin.aggregate([{"$currentOp": {}}]) as cursor: + async for operation in cursor: + print(operation) + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`AsyncDatabase`, except when ``$out`` or ``$merge`` are used, in + which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` + is used. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + + .. note:: This method does not support the 'explain' option. Please + use :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` instead. + + .. note:: The :attr:`~pymongo.asynchronous.database.AsyncDatabase.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + + :return: A :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor` over the result + set. + + .. versionadded:: 3.9 + + .. _aggregation pipeline: + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + async with self.client._tmp_session(session) as s: + cmd = _DatabaseAggregationCommand( + self, + AsyncCommandCursor, + pipeline, + kwargs, + user_fields={"cursor": {"firstBatch": 1}}, + ) + return await self.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + @overload + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + async def _command( + self, + conn: AsyncConnection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[AsyncClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Internal command helper.""" + if isinstance(command, str): + command = {command: value} + + command.update(kwargs) + async with self._client._tmp_session(session) as s: + return await conn.command( + self._name, + command, + read_preference, + codec_options, # type: ignore[arg-type] + check, + allowable_errors, + write_concern=write_concern, + parse_write_concern_error=parse_write_concern_error, + session=s, + client=self._client, + ) + + @overload + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + async def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Issue a MongoDB command. + + Send command `command` to the database and return the + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of + :class:`dict` and will be sent as is. + + Any additional keyword arguments will be added to the final + command document before it is sent. + + For example, a command like ``{buildinfo: 1}`` can be sent + using: + + >>> await db.command("buildinfo") + OR + >>> await db.command({"buildinfo": 1}) + + For a command where the value matters, like ``{count: + collection_name}`` we can do: + + >>> await db.command("count", collection_name) + OR + >>> await db.command({"count": collection_name}) + + For commands that take additional arguments we can use + kwargs. So ``{count: collection_name, query: query}`` becomes: + + >>> await db.command("count", collection_name, query=query) + OR + >>> await db.command({"count": collection_name, "query": query}) + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should be done with this in mind. + + :param value: value to use for the command verb when + `command` is passed as a string + :param check: check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + :param allowable_errors: if `check` is ``True``, error messages + in this list will be ignored by error-checking + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this AsyncDatabase's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, + and `secondary_acceptable_latency_ms` option. + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + Added the ``codec_options`` parameter. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment + + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + async with await self._client._conn_for_reads( + read_preference, session, operation=command_name + ) as ( + connection, + read_preference, + ): + return await self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, # type: ignore[arg-type] + session=session, + **kwargs, + ) + + @_csot.apply + async def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + .. note:: :meth:`command` does **not** obey this AsyncDatabase's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + async with self._client._tmp_session(session) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + async with await self._client._conn_for_reads( + read_preference, tmp_session, command_name + ) as ( + conn, + read_preference, + ): + response = await self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = AsyncCommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + comment=comment, + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + async def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + operation: str, + session: Optional[AsyncClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return await self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return await self._client._retryable_read(_cmd, read_preference, session, operation) + + async def _list_collections( + self, + conn: AsyncConnection, + session: Optional[AsyncClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + AsyncCollection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = {"listCollections": 1, "cursor": {}} + cmd.update(kwargs) + async with self._client._tmp_session(session) as tmp_session: + cursor = ( + await self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] + cmd_cursor = AsyncCommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + comment=cmd.get("comment"), + ) + await cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + async def _list_collections_helper( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + if filter is not None: + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + async def _cmd( + session: Optional[AsyncClientSession], + _server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + return await self._list_collections( + conn, session, read_preference=read_preference, **kwargs + ) + + return await self._client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) + + async def list_collections( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await database.list_collections() as cursor: + async for collection in cursor: + print(collection) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + return await self._list_collections_helper(session, filter, comment, **kwargs) + + async def _list_collection_names( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + if filter is None: + kwargs["nameOnly"] = True + + else: + # The enumerate collections spec states that "drivers MUST NOT set + # nameOnly if a filter specifies any keys other than name." + common.validate_is_mapping("filter", filter) + kwargs["filter"] = filter + if not filter or (len(filter) == 1 and "name" in filter): + kwargs["nameOnly"] = True + + return [ + result["name"] + async for result in await self._list_collections_helper(session=session, **kwargs) + ] + + async def list_collection_names( + self, + session: Optional[AsyncClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return await self._list_collection_names(session, filter, comment, **kwargs) + + async def _drop_helper( + self, name: str, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = {"drop": name} + if comment is not None: + command["comment"] = comment + + async with await self._client._conn_for_writes(session, operation=_Op.DROP) as connection: + return await self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + async def drop_collection( + self, + name_or_collection: Union[str, AsyncCollection[_DocumentTypeArg]], + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: + """Drop a collection. + + :param name_or_collection: the name of a collection to drop or the + collection object itself + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + + + .. note:: The :attr:`~pymongo.asynchronous.database.AsyncDatabase.write_concern` of + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this database's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_collection + if isinstance(name, AsyncCollection): + name = name.name + + if not isinstance(name, str): + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") + encrypted_fields = await self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + await self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + await self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return await self._drop_helper(name, session, comment) + + async def validate_collection( + self, + name_or_collection: Union[str, AsyncCollection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[AsyncClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: + """Validate a collection. + + Returns a dict of validation info. Raises CollectionInvalid if + validation fails. + + See also the MongoDB documentation on the `validate command`_. + + :param name_or_collection: An AsyncCollection object or the name of a + collection to validate. + :param scandata: Do extra checks beyond checking the overall + structure of the collection. + :param full: Have the server do a more thorough scan of the + collection. Use with `scandata` for a thorough scan + of the structure of the collection and the individual + documents. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param background: A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + """ + name = name_or_collection + if isinstance(name, AsyncCollection): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_collection must be an instance of str or AsyncCollection, not {type(name)}" + ) + cmd = {"validate": name, "scandata": scandata, "full": full} + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background + + result = await self.command(cmd, session=session) + + valid = True + # Pre 1.9 results + if "result" in result: + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + # Sharded results + elif "raw" in result: + for _, res in result["raw"].items(): + if "result" in res: + info = res["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + elif not res.get("valid", False): + valid = False + break + # Post 1.9 non-sharded results. + elif not result.get("valid", False): + valid = False + + if not valid: + raise CollectionInvalid(f"{name} invalid: {result!r}") + + return result + + async def dereference( + self, + dbref: DBRef, + session: Optional[AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: + """Dereference a :class:`~bson.dbref.DBRef`, getting the + document it points to. + + Raises :class:`TypeError` if `dbref` is not an instance of + :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if + the reference does not point to a valid document. Raises + :class:`ValueError` if `dbref` has a database specified that + is different from the current database. + + :param dbref: the reference + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional keyword arguments + are the same as the arguments to + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find`. + + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if not isinstance(dbref, DBRef): + raise TypeError("cannot dereference a %s" % type(dbref)) + if dbref.database is not None and dbref.database != self._name: + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self._name!r})" + ) + return await self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/asynchronous/encryption.py b/pymongo/asynchronous/encryption.py new file mode 100644 index 0000000000..4dfd36aa49 --- /dev/null +++ b/pymongo/asynchronous/encryption.py @@ -0,0 +1,1283 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for explicit client-side field level encryption.""" +from __future__ import annotations + +import asyncio +import contextlib +import enum +import socket +import time as time # noqa: PLC0414 # needed in sync version +import uuid +import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Dict, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +try: + from pymongocrypt.asynchronous.auto_encrypter import AsyncAutoEncrypter # type:ignore[import] + from pymongocrypt.asynchronous.explicit_encrypter import ( # type:ignore[import] + AsyncExplicitEncrypter, + ) + from pymongocrypt.asynchronous.state_machine import ( # type:ignore[import] + AsyncMongoCryptCallback, + ) + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False + AsyncMongoCryptCallback = object + +from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from pymongo import _csot +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import CONNECT_TIMEOUT +from pymongo.daemon import _spawn_daemon +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + NetworkTimeout, + ServerSelectionTimeoutError, +) +from pymongo.helpers_shared import _get_timeout_details +from pymongo.network_layer import async_socket_sendall +from pymongo.operations import UpdateOne +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _async_configured_socket, + _raise_connection_failure, +) +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context +from pymongo.typings import _DocumentType, _DocumentTypeArg +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext + + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + + +_IS_SYNC = False + +_HTTPS_PORT = 443 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD +) +# Use RawBSONDocument codec options to avoid needlessly decoding +# documents from the key vault. +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) + + +async def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return await _async_configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + +@contextlib.contextmanager +def _wrap_encryption_errors() -> Iterator[None]: + """Context manager to wrap encryption related errors.""" + try: + yield + except BSONError: + # BSON encoding/decoding errors are unrelated to encryption so + # we should propagate them unchanged. + raise + except Exception as exc: + raise EncryptionError(exc) from exc + + +class _EncryptionIO(AsyncMongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[AsyncMongoClient[_DocumentTypeArg]], + key_vault_coll: AsyncCollection[_DocumentTypeArg], + mongocryptd_client: Optional[AsyncMongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): + """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any + # Use a weak ref to break reference cycle. + if client is not None: + self.client_ref = weakref.ref(client) + else: + self.client_ref = None + self.key_vault_coll: Optional[AsyncCollection[RawBSONDocument]] = cast( + AsyncCollection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) + self.mongocryptd_client = mongocryptd_client + self.opts = opts + self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) + + async def kms_request(self, kms_context: MongoCryptKmsContext) -> None: + """Complete a KMS request. + + :param kms_context: A :class:`MongoCryptKmsContext`. + + :return: None + """ + endpoint = kms_context.endpoint + message = kms_context.message + provider = kms_context.kms_provider + ctx = self._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + _IS_SYNC, + ) + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + await asyncio.sleep(sleep_sec) + try: + conn = await _connect_kms(address, opts) + try: + await async_socket_sendall(conn, message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + async_receive_data_socket, + ) + + data = await async_receive_data_socket(conn, kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) + finally: + conn.close() + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err + + async def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: + """Get the collection info for a namespace. + + The returned collection info is passed to libmongocrypt which reads + the JSON schema. + + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. + + :return: All documents from the listCollections command response as BSON. + """ + async with await self.client_ref()[database].list_collections( + filter=RawBSONDocument(filter) + ) as cursor: + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) async for doc in cursor] + + def spawn(self) -> None: + """Spawn mongocryptd. + + Note this method is thread safe; at most one mongocryptd will start + successfully. + """ + self._spawned = True + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] + args.extend(self.opts._mongocryptd_spawn_args) + _spawn_daemon(args) + + async def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: + """Mark a command for encryption. + + :param database: The database on which to run this command. + :param cmd: The BSON command to run. + + :return: The marked command response from mongocryptd. + """ + if not self._spawned and not self.opts._mongocryptd_bypass_spawn: + self.spawn() + # AsyncDatabase.command only supports mutable mappings so we need to decode + # the raw BSON command first. + inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None + try: + res = await self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + except ServerSelectionTimeoutError: + if self.opts._mongocryptd_bypass_spawn: + raise + self.spawn() + res = await self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + return res.raw + + async def fetch_keys(self, filter: bytes) -> AsyncGenerator[bytes | memoryview, None]: + """Yields one or more keys from the key vault. + + :param filter: The filter to pass to find. + + :return: A generator which yields the requested keys from the key vault. + """ + assert self.key_vault_coll is not None + async with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: + async for key in cursor: + yield key.raw + + async def insert_data_key(self, data_key: bytes) -> Binary: + """Insert a data key into the key vault. + + :param data_key: The data key document to insert. + + :return: The _id of the inserted data key document. + """ + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) + + assert self.key_vault_coll is not None + await self.key_vault_coll.insert_one(raw_doc) + return data_key_id + + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + :param doc: mapping type representing a document + + :return: The encoded BSON bytes. + """ + return encode(doc) + + async def close(self) -> None: + """Release resources. + + Note it is not safe to call this method from __del__ or any GC hooks. + """ + self.client_ref = None + self.key_vault_coll = None + if self.mongocryptd_client: + await self.mongocryptd_client.close() + self.mongocryptd_client = None + + +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~AsyncClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~AsyncClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. + + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: AsyncMongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. + + :param client: The encrypted AsyncMongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. + """ + if opts._schema_map is None: + schema_map = None + else: + schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: AsyncMongoClient[_DocumentTypeArg] + ) -> AsyncMongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: AsyncMongoClient[Mapping[str, Any]] = AsyncMongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( # type:ignore[misc] + metadata_client, + key_vault_coll, # type:ignore[arg-type] + mongocryptd_client, + opts, + ) + self._auto_encrypter = AsyncAutoEncrypter( + io_callbacks, + _create_mongocrypt_options( + kms_providers=opts._kms_providers, + schema_map=schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, + ), + ) + self._closed = False + + async def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> dict[str, Any]: + """Encrypt a MongoDB command. + + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. + + :return: The encrypted command to execute. + """ + self._check_closed() + encoded_cmd = _dict_to_bson(cmd, False, codec_options) + with _wrap_encryption_errors(): + encrypted_cmd = await self._auto_encrypter.encrypt(database, encoded_cmd) + # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + + async def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: + """Decrypt a MongoDB command response. + + :param response: A MongoDB command response as BSON. + + :return: The decrypted command response. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast(bytes, await self._auto_encrypter.decrypt(response)) + + def _check_closed(self) -> None: + if self._closed: + raise InvalidOperation("Cannot use AsyncMongoClient after close") + + async def close(self) -> None: + """Cleanup resources.""" + self._closed = True + await self._auto_encrypter.close() + if self._internal_client: + await self._internal_client.close() + self._internal_client = None + + +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" + + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + RANGE = "Range" + """Range. + + .. versionadded:: 4.9 + """ + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - RangePreview. + + .. note:: Support for RangePreview is deprecated. Use :attr:`Algorithm.RANGE` instead. + + .. versionadded:: 4.4 + """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" + + RANGE = "range" + """Used to encrypt a value for a range query. + + .. versionadded:: 4.9 + """ + + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - Used to encrypt a value for a rangePreview query. + + .. note:: Support for RangePreview is deprecated. Use :attr:`QueryType.RANGE` instead. + + .. versionadded:: 4.4 + """ + + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + + +def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) + + +class AsyncClientEncryption(Generic[_DocumentType]): + """Explicit client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, + ) -> None: + """Explicit client-side field level encryption. + + The AsyncClientEncryption class encapsulates explicit operations on a key + vault collection that cannot be done directly on an AsyncMongoClient. Similar + to configuring auto encryption on an AsyncMongoClient, it is constructed with + an AsyncMongoClient (to a MongoDB cluster containing the key vault + collection), KMS provider configuration, and keyVaultNamespace. It + provides an API for explicitly encrypting and decrypting values, and + creating data keys. It does not provide an API to query keys from the + key vault collection, as this can be done directly on the AsyncMongoClient. + + See `explicit client-side encryption `_ for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: An AsyncMongoClient connected to a MongoDB cluster + containing the `key_vault_namespace` collection. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` to use when encoding a + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + AsyncMongoClient, AsyncDatabase, or AsyncCollection used to access application + data. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.AsyncMongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " + "python -m pip install --upgrade 'pymongo[encryption]'" + ) + + check_min_pymongocrypt() + + if not isinstance(codec_options, CodecOptions): + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) + + if not isinstance(key_vault_client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any( + cls.__name__ == "AsyncMongoClient" for cls in type(key_vault_client).__mro__ + ): + raise TypeError( + f"AsyncMongoClient required but given {type(key_vault_client).__name__}" + ) + + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._codec_options = codec_options + + db, coll = key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + opts = AutoEncryptionOpts( + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, + ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) + self._encryption = AsyncExplicitEncrypter( + self._io_callbacks, + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + async def create_encrypted_collection( + self, + database: AsyncDatabase[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[AsyncCollection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :param database: the database to create the collection + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + :param kwargs: additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.asynchronous.database.AsyncDatabase.create_collection` for all valid options. + + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + if not isinstance(database, AsyncDatabase): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncDatabase" for cls in type(database).__mro__): + raise TypeError(f"AsyncDatabase required but given {type(database).__name__}") + + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = await self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + await database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + async def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: + """Create and insert a new data key into the key vault collection. + + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + + If the `kms_provider` type is "aws" it is required and has the + following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". + + If the `kms_provider` type is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` type is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` type is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. + + :param key_alt_names: An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. The following example shows creating + and referring to a data key by alternate name:: + + client_encryption.create_data_key("local", key_alt_names=["name1"]) + # reference the key with the alternate name + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + :param key_material: Sets the custom key material to be used + by the data key for encryption and decryption. + + :return: The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast( + Binary, + await self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + async def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + text_opts: Optional[TextOpts] = None, + ) -> Any: + self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = await self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, + ) + return decode(encrypted_doc)["v"] + + async def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, + ) -> Binary: + """Encrypt a BSON value with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. + + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. + """ + return cast( + Binary, + await self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + text_opts=text_opts, + ), + ) + + async def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + await self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + async def decrypt(self, value: Binary) -> Any: + """Decrypt an encrypted value. + + :param value` (Binary): The encrypted value, a + :class:`~bson.binary.Binary` with subtype 6. + + :return: The decrypted BSON value. + """ + self._check_closed() + if not (isinstance(value, Binary) and value.subtype == 6): + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") + + with _wrap_encryption_errors(): + doc = encode({"v": value}) + decrypted_doc = await self._encryption.decrypt(doc) + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + async def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> AsyncCursor[RawBSONDocument]: + """Get all of the data keys. + + :return: An instance of :class:`~pymongo.asynchronous.cursor.AsyncCursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + async def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.delete_one({"_id": id}) + + async def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to add. + + :return: The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one_and_update({"_id": id}, update) + + async def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :param key_alt_name: (str): The key alternate name of the key to get. + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + async def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to remove. + + :return: Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return await self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + async def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + :param master_key: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :return: A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = await self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = await self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + async def __aenter__(self) -> AsyncClientEncryption[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + def _check_closed(self) -> None: + if self._encryption is None: + raise InvalidOperation("Cannot use closed AsyncClientEncryption") + + async def close(self) -> None: + """Release resources. + + Note that using this class in a with-statement will automatically call + :meth:`close`:: + + with AsyncClientEncryption(...) as client_encryption: + encrypted = client_encryption.encrypt(value, ...) + decrypted = client_encryption.decrypt(encrypted) + + """ + if self._io_callbacks: + await self._io_callbacks.close() + self._encryption.close() + self._io_callbacks = None + self._encryption = None diff --git a/pymongo/asynchronous/helpers.py b/pymongo/asynchronous/helpers.py new file mode 100644 index 0000000000..4a8c918133 --- /dev/null +++ b/pymongo/asynchronous/helpers.py @@ -0,0 +1,86 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous pieces that need to be synchronized.""" +from __future__ import annotations + +import asyncio +import socket +from typing import ( + Any, + Callable, + TypeVar, + cast, +) + +from pymongo.errors import ( + OperationFailure, +) +from pymongo.helpers_shared import _REAUTHENTICATION_REQUIRED_CODE + +_IS_SYNC = False + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + async def inner(*args: Any, **kwargs: Any) -> Any: + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.message import _BulkWriteContext + + try: + return await func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a AsyncConnection + # or has a connection attribute, so we can trigger + # a reauth. + conn = None + for arg in args: + if isinstance(arg, AsyncConnection): + conn = arg + break + if isinstance(arg, _BulkWriteContext): + conn = arg.conn # type: ignore[assignment] + break + if conn: + await conn.authenticate(reauthenticate=True) + else: + raise + return await func(*args, **kwargs) + raise + + return cast(F, inner) + + +async def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return await loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) diff --git a/pymongo/asynchronous/mongo_client.py b/pymongo/asynchronous/mongo_client.py new file mode 100644 index 0000000000..d9bf808d55 --- /dev/null +++ b/pymongo/asynchronous/mongo_client.py @@ -0,0 +1,2978 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for connecting to MongoDB. + +.. seealso:: `Read and Write Settings `_ for examples of connecting + to replica sets or sets of mongos servers. + +To get a :class:`~pymongo.asynchronous.database.AsyncDatabase` instance from a +:class:`AsyncMongoClient` use either dictionary-style or attribute-style +access: + +.. doctest:: + + >>> from pymongo import AsyncMongoClient + >>> c = AsyncMongoClient() + >>> c.test_database + AsyncDatabase(AsyncMongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + AsyncDatabase(AsyncMongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') +""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import warnings +import weakref +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + AsyncGenerator, + Callable, + Collection, + Coroutine, + FrozenSet, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +from bson.timestamp import Timestamp +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.asynchronous import client_session, database, uri_parser +from pymongo.asynchronous.change_stream import AsyncChangeStream, AsyncClusterChangeStream +from pymongo.asynchronous.client_bulk import _AsyncClientBulk +from pymongo.asynchronous.client_session import _EmptyServerSession +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _async_create_lock, + _release_locks, +) +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) +from pymongo.message import _CursorAddress, _GetMore, _Query +from pymongo.monitoring import ConnectionClosedReason, _EventListeners +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.uri_parser_shared import ( + SRV_SCHEME, + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, + _validate_uri, + split_hosts, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.asynchronous.bulk import _AsyncBulk + from pymongo.asynchronous.client_session import AsyncClientSession, _ServerSession + from pymongo.asynchronous.cursor import _ConnectionManager + from pymongo.asynchronous.encryption import _Encrypter + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.asynchronous.server import Server + from pymongo.read_concern import ReadConcern + from pymongo.response import Response + from pymongo.server_selectors import Selection + + +T = TypeVar("T") + +_WriteCall = Callable[ + [Optional["AsyncClientSession"], "AsyncConnection", bool], Coroutine[Any, Any, T] +] +_ReadCall = Callable[ + [Optional["AsyncClientSession"], "Server", "AsyncConnection", _ServerMode], + Coroutine[Any, Any, T], +] + +_IS_SYNC = False + +_WriteOp = Union[ + InsertOne, # type: ignore[type-arg] + DeleteOne, + DeleteMany, + ReplaceOne, # type: ignore[type-arg] + UpdateOne, + UpdateMany, +] + + +class AsyncMongoClient(common.BaseObject, Generic[_DocumentType]): + HOST = "localhost" + PORT = 27017 + # Define order to retrieve options from ClientOptions for __repr__. + # No host/port; these are retrieved from TopologySettings. + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] + + def __init__( + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: + """Client for a MongoDB instance, a replica set, or a set of mongoses. + + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + + The client object is thread-safe and has connection-pooling built in. + If an operation fails because of a network error, + :class:`~pymongo.errors.ConnectionFailure` is raised and the client + reconnects in the background. Application code should handle this + exception (recognizing that the operation failed) and then continue to + execute. + + Best practice is to call :meth:`AsyncMongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + async with AsyncMongoClient(url) as client: + # Use client here. + + The `host` parameter can be a full `mongodb URI + `_, in addition to + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and + passwords reserved characters like ':', '/', '+' and '@' must be + percent encoded following RFC 2396:: + + from urllib.parse import quote_plus + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), host) + client = AsyncMongoClient(uri) + + Unix domain sockets are also supported. The socket path must be percent + encoded in the URI:: + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), quote_plus(socket_path)) + client = AsyncMongoClient(uri) + + But not when passed as a simple hostname:: + + client = AsyncMongoClient('/tmp/mongodb-27017.sock') + + Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The + URI must include one, and only one, hostname. The hostname will be + resolved to one or more DNS `SRV records + `_ which will be used + as the seed list for connecting to the MongoDB deployment. When using + SRV URIs, the `authSource` and `replicaSet` configuration options can + be specified using `TXT records + `_. See the + `Initial DNS Seedlist Discovery spec + `_ + for more details. Note that the use of SRV URIs implicitly enables + TLS support. Pass tls=false in the URI to override. + + .. note:: AsyncMongoClient creation will block waiting for answers from + DNS when mongodb+srv:// URIs are used. + + .. note:: Starting with version 3.0 the :class:`AsyncMongoClient` + constructor no longer blocks while connecting to the server or + servers, and it no longer raises + :class:`~pymongo.errors.ConnectionFailure` if they are + unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. You can check if the server is available + like this:: + + from pymongo.errors import ConnectionFailure + client = AsyncMongoClient() + try: + # The ping command is cheap and does not require auth. + client.admin.command('ping') + except ConnectionFailure: + print("Server not available") + + .. warning:: When using PyMongo in a multiprocessing context, please + read `PyMongo multiprocessing `_ first. + + .. note:: Many of the following options can be passed using a MongoDB + URI or keyword parameters. If the same option is passed in a URI and + as a keyword parameter the keyword parameter takes precedence. + + :param host: hostname or IP address or Unix domain socket + path of a single mongod or mongos instance to connect to, or a + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters + following the RFC2732 URL syntax (e.g. '[::1]' for localhost). + Multihomed and round robin DNS addresses are **not** supported. + :param port: port number on which to connect + :param document_class: default class to use for + documents returned from queries on this client + :param tz_aware: if ``True``, + :class:`~datetime.datetime` instances returned as values + in a document by this :class:`AsyncMongoClient` will be timezone + aware (otherwise they will be naive) + :param connect: **Not supported by AsyncMongoClient**. + :param type_registry: instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. + - `maxPoolSize` (optional): The maximum allowable number of + concurrent connections to each connected server. Requests to a + server will block if there are `maxPoolSize` outstanding + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. + - `minPoolSize` (optional): The minimum required number of concurrent + connections that the pool will maintain to each connected server. + Default is 0. + - `maxIdleTimeMS` (optional): The maximum number of milliseconds that + a connection can remain idle in the pool before being removed and + replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. + - `socketTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait for a response after sending an + ordinary (non-monitoring) database operation before concluding that + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). + - `connectTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait during server monitoring when + connecting a new socket to a server before concluding the server + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). + - `server_selector`: (callable or None) Optional, user-provided + function that augments server selection rules. The function should + accept as an argument a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + - `serverSelectionTimeoutMS`: (integer) Controls how long (in + milliseconds) the driver will wait to find an available, + appropriate server to carry out a database operation; while it is + waiting, multiple server monitoring operations may be carried out, + each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 + seconds). + - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) + a thread will wait for a socket from the pool if the pool has no + free sockets. Defaults to ``None`` (no timeout). + - `heartbeatFrequencyMS`: (optional) The number of milliseconds + between periodic server checks, or None to accept the default + frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". + - `appname`: (string or None) The name of the application that + created this AsyncMongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. + - `driver`: (pair or None) A driver implemented on top of PyMongo can + pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, + version, and platform to the message printed in the server log when + establishing a connection. + - `event_listeners`: a list or tuple of event listeners. See + :mod:`~pymongo.monitoring` for details. + - `retryWrites`: (boolean) Whether supported write operations + executed within this AsyncMongoClient will be retried once after a + network error. Defaults to ``True``. + The supported write operations are: + + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, as long as + :class:`~pymongo.asynchronous.operations.UpdateMany` or + :class:`~pymongo.asynchronous.operations.DeleteMany` are not included. + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.delete_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.insert_many` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.replace_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.update_one` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_delete` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_replace` + - :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one_and_update` + + Unsupported write operations include, but are not limited to, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` using the ``$out`` + pipeline operator and any operation with an unacknowledged write + concern (e.g. {w: 0})). See + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md + - `retryReads`: (boolean) Whether supported read operations + executed within this AsyncMongoClient will be retried once after a + network error. Defaults to ``True``. + The supported read operations are: + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.find_one`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.aggregate` without ``$out``, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.distinct`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.count`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.estimated_document_count`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.count_documents`, + :meth:`pymongo.asynchronous.collection.AsyncCollection.watch`, + :meth:`~pymongo.asynchronous.collection.AsyncCollection.list_indexes`, + :meth:`pymongo.asynchronous.database.AsyncDatabase.watch`, + :meth:`~pymongo.asynchronous.database.AsyncDatabase.list_collections`, + :meth:`pymongo.asynchronous.mongo_client.AsyncMongoClient.watch`, + and :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.list_databases`. + + Unsupported read operations include, but are not limited to + :meth:`~pymongo.asynchronous.database.AsyncDatabase.command` and any getMore + operation on a cursor. + + Enabling retryable reads makes applications more resilient to + transient errors such as network failures, database upgrades, and + replica set failovers. For an exact definition of which errors + trigger a retry, see the `retryable reads specification + `_. + + - `compressors`: Comma separated list of compressors for wire + protocol compression. The list is used to negotiate a compressor + with the server. Currently supported options are "snappy", "zlib" + and "zstd". Support for snappy requires the + `python-snappy `_ package. + zlib support requires the Python standard library zlib module. zstd + requires the `zstandard `_ + package. By default no compression is used. Compression support + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See `compress network traffic `_ for details. + - `zlibCompressionLevel`: (int) The zlib compression level to use + when zlib is used as the wire protocol compressor. Supported values + are -1 through 9. -1 tells the zlib library to use its default + compression level (usually 6). 0 means no compression. 1 is best + speed. 9 is best compression. Defaults to -1. + - `uuidRepresentation`: The BSON representation to use when encoding + from and decoding to instances of :class:`~uuid.UUID`. Valid + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See `handling UUID data `_ for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + AsyncMongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + + + | **Write Concern options:** + | (Only set if passed. No default values.) + + - `w`: (integer or string) If this is a replica set, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica set + primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). Passing w=0 **disables write + acknowledgement** and all other write concern options. + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation + to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. Passing wTimeoutMS=0 + will cause **write operations to wait indefinitely**. + - `journal`: If ``True`` block until write operations have been + committed to the journal. Cannot be used in combination with + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. + - `fsync`: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + | **Replica set keyword arguments for connecting with a replica set + - either directly or via a mongos:** + + - `replicaSet`: (string or None) The name of the replica set to + connect to. The driver will verify that all servers it connects to + match this name. Implies that the hosts specified are a seed list + and the driver should attempt to find all members of the set. + Defaults to ``None``. + + | **Read Preference:** + + - `readPreference`: The replica set read preference for this client. + One of ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. + - `readPreferenceTags`: Specifies a tag set as a comma-separated list + of colon-separated key-value pairs. For example ``dc:ny,rack:1``. + Defaults to ``None``. + - `maxStalenessSeconds`: (integer) The maximum estimated + length of time a replica set secondary can fall behind the primary + in replication before it will no longer be selected for operations. + Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds + is set, it must be a positive integer greater than or equal to + 90 seconds. + + .. seealso:: `Customize Server Selection `_ + + | **Authentication:** + + - `username`: A string. + - `password`: A string. + + Although username and password must be percent-escaped in a MongoDB + URI, they must not be percent-escaped when passed as parameters. In + this example, both the space and slash special characters are passed + as-is:: + + AsyncMongoClient(username="user name", password="pass/word") + + - `authSource`: The database to authenticate on. Defaults to the + database specified in the URI, if provided, or to "admin". + - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. + - `authMechanismProperties`: Used to specify authentication mechanism + specific options. To specify the service name for GSSAPI + authentication pass authMechanismProperties='SERVICE_NAME:'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. + + .. seealso:: `Authentication `_ + + | **TLS/SSL configuration:** + + - `tls`: (boolean) If ``True``, create the connection to the server + using transport layer security. Defaults to ``False``. + - `tlsInsecure`: (boolean) Specify whether TLS constraints should be + relaxed as much as possible. Setting ``tlsInsecure=True`` implies + ``tlsAllowInvalidCertificates=True`` and + ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think + very carefully before setting this to ``True`` as it dramatically + reduces the security of TLS. + - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues + the TLS handshake regardless of the outcome of the certificate + verification process. If this is ``False``, and a value is not + provided for ``tlsCAFile``, PyMongo will attempt to load system + provided CA certificates. If the python version in use does not + support loading system CA certificates then the ``tlsCAFile`` + parameter must point to a file of CA certificates. + ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. + Defaults to ``False``. Think very carefully before setting this + to ``True`` as that could make your application vulnerable to + on-path attackers. + - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS + hostname verification. ``tlsAllowInvalidHostnames=False`` implies + ``tls=True``. Defaults to ``False``. Think very carefully before + setting this to ``True`` as that could make your application + vulnerable to on-path attackers. + - `tlsCAFile`: A file containing a single or a bundle of + "certification authority" certificates, which are used to validate + certificates passed from the other end of the connection. + Implies ``tls=True``. Defaults to ``None``. + - `tlsCertificateKeyFile`: A file containing the client certificate + and private key. Implies ``tls=True``. Defaults to ``None``. + - `tlsCRLFile`: A file containing a PEM or DER formatted + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. + - `tlsCertificateKeyFilePassword`: The password or passphrase for + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. + - `ssl`: (boolean) Alias for ``tls``. + + | **Read Concern options:** + | (If not set explicitly, this will use the server default) + + - `readConcernLevel`: (string) The read concern level specifies the + level of isolation for read operations. For example, a read + operation using a read concern level of ``majority`` will only + return data that has been written to a majority of nodes. If the + level is left unspecified, the server default will be used. + + | **Client side encryption options:** + | (If not set explicitly, client side encryption will not be enabled.) + + - `auto_encryption_opts`: A + :class:`~pymongo.encryption_options.AutoEncryptionOpts` which + configures this client to automatically encrypt collection commands + and automatically decrypt results. See + `client-side field level encryption `_ for an example. + If a :class:`AsyncMongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``AsyncMongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See `versioned API `_ for + details. + + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` + + .. versionchanged:: 3.9 + Added the ``retryReads`` keyword argument and URI option. + Added the ``tlsInsecure`` keyword argument and URI option. + The following keyword arguments and URI options were deprecated: + + - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. + - ``j`` was deprecated in favor of ``journal``. + - ``ssl_cert_reqs`` was deprecated in favor of + ``tlsAllowInvalidCertificates``. + - ``ssl_match_hostname`` was deprecated in favor of + ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` was deprecated in favor of + ``tlsCertificateKeyFile``. + - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. + - ``ssl_pem_passphrase`` was deprecated in favor of + ``tlsCertificateKeyFilePassword``. + + .. versionchanged:: 3.9 + ``retryWrites`` now defaults to ``True``. + + .. versionchanged:: 3.8 + Added the ``server_selector`` keyword argument. + Added the ``type_registry`` keyword argument. + + .. versionchanged:: 3.7 + Added the ``driver`` keyword argument. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + Added the ``retryWrites`` keyword argument and URI option. + + .. versionchanged:: 3.5 + Add ``username`` and ``password`` options. Document the + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` + options. + Deprecated the ``socketKeepAlive`` keyword argument and URI option. + ``socketKeepAlive`` now defaults to ``True``. + + .. versionchanged:: 3.0 + :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` is now the one and only + client class for a standalone server, mongos, or replica set. + It includes the functionality that had been split into + :class:`~pymongo.asynchronous.mongo_client.MongoReplicaSetClient`: it can connect + to a replica set, discover all its members, and monitor the set for + stepdowns, elections, and reconfigs. + + The :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` constructor no + longer blocks while connecting to the server or servers, and it no + longer raises :class:`~pymongo.errors.ConnectionFailure` if they + are unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. + + Therefore the ``alive`` method is removed since it no longer + provides meaningful information; even if the client is disconnected, + it may discover a server in time to fulfill the next operation. + + In PyMongo 2.x, :class:`~pymongo.asynchronous.AsyncMongoClient` accepted a list of + standalone MongoDB servers and used the first it could connect to:: + + AsyncMongoClient(['host1.com:27017', 'host2.com:27017']) + + A list of multiple standalones is no longer supported; if multiple + servers are listed they must be members of the same replica set, or + mongoses in the same sharded cluster. + + The behavior for a list of mongoses is changed from "high + availability" to "load balancing". Before, the client connected to + the lowest-latency mongos in the list, and used it until a network + error prompted it to re-evaluate all mongoses' latencies and + reconnect to one of them. In PyMongo 3, the client monitors its + network latency to all the mongoses continuously, and distributes + operations evenly among those with the lowest latency. See + `load balancing `_ for more information. + + The ``connect`` option is added. + + The ``start_request``, ``in_request``, and ``end_request`` methods + are removed, as well as the ``auto_start_request`` option. + + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. + + The :meth:`AsyncMongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.asynchronous.AsyncMongoClient.close`. + + :class:`~pymongo.asynchronous.mongo_client.AsyncMongoClient` no longer returns an + instance of :class:`~pymongo.asynchronous.database.AsyncDatabase` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} + + self._seeds = set() + is_srv = False + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in self._host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + res = _validate_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + srv_max_hosts=srv_max_hosts, + ) + is_srv = entity.startswith(SRV_SCHEME) + self._seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in self._seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, self._seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + + self._default_database_name = dbase + self._lock = _async_create_lock() + self._kill_cursors_queue: list = [] # type: ignore[type-arg] + + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": self._seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } + ) + + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, + ) + + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) + + self._opened = False + self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None + if not is_srv: + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + async def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = await uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, + ) + if self._options.auto_encryption_opts: + from pymongo.asynchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts + + async def aconnect(self) -> None: + """Explicitly connect to MongoDB asynchronously instead of on the first operation.""" + await self._get_topology() + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + AsyncMongoClient._clients[self._topology._topology_id] = self + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + async def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + await AsyncMongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + self._opened = False + + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + + def _should_pin_cursor(self, session: Optional[AsyncClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() + + def _duplicate(self, **kwargs: Any) -> AsyncMongoClient: # type: ignore[type-arg] + args = self._init_kwargs.copy() + args.update(kwargs) + return AsyncMongoClient(**args) + + async def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.AsyncClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> AsyncChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The AsyncChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.asynchronous.change_stream.AsyncClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = AsyncClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + await change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`AsyncMongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`AsyncMongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + if self._topology is None: + return frozenset() + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.eq_props()) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.AsyncDatabase[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.AsyncDatabase(self, name) + + def __del__(self) -> None: + """Check that this AsyncMongoClient has been closed and issue a warning if not.""" + try: + if self._opened and not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + ) + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. + pass + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> AsyncClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.AsyncClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.AsyncClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.asynchronous.client_session.SessionOptions`. See the + :mod:`~pymongo.asynchronous.client_session` module for details and examples. + + A :class:`~pymongo.asynchronous.client_session.AsyncClientSession` may only be used with + the AsyncMongoClient that started it. :class:`AsyncClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`AsyncClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session( + self, session: Optional[AsyncClientSession] = None + ) -> Optional[AsyncClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[AsyncClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = AsyncMongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.AsyncDatabase[_DocumentType]: + """Get a :class:`~pymongo.asynchronous.database.AsyncDatabase` with the given name and + options. + + Useful for creating a :class:`~pymongo.asynchronous.database.AsyncDatabase` with + different codec options, read preference, and/or write concern from + this :class:`AsyncMongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`AsyncMongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`AsyncMongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`AsyncMongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`AsyncMongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.AsyncDatabase( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.AsyncDatabase: # type: ignore[type-arg] + """Get a AsyncDatabase instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + async def __aenter__(self) -> AsyncMongoClient[_DocumentType]: + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + await self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'AsyncMongoClient' object is not iterable") + + next = __next__ + + async def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = await (await self._get_topology()).select_server( + writable_server_selector, _Op.TEST + ) + + return getattr(server.description, attr_name) + + @property + async def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + if self._topology is None: + await self._get_topology() + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + return await self._server_property("address") + + @property + async def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + AsyncMongoClient gained this property in version 3.0. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_primary() # type: ignore[return-value] + + @property + async def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + AsyncMongoClient gained this property in version 3.0. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_secondaries() + + @property + async def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + if self._topology is None: + await self._get_topology() + return await self._topology.get_arbiters() + + @property + async def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return await self._server_property("is_writable") + + @property + async def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return await self._server_property("server_type") == SERVER_TYPE.Mongos + + async def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use AsyncConnection.command directly to avoid implicitly creating + # another session. + async with await self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + await conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + async def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + if self._topology is None: + return + session_ids = self._topology.pop_all_sessions() + if session_ids: + await self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + await self._process_kill_cursors() + await self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + await self._encrypter.close() + self._closed = True + if not _IS_SYNC: + await asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) + + if not _IS_SYNC: + # Add support for contextlib.aclosing. + aclose = close + + async def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.asynchronous.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use AsyncMongoClient in different event loop. AsyncMongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) + if not self._opened: + if self._resolve_srv_info["is_srv"]: + await self._resolve_srv() + self._init_background() + await self._topology.open() + async with self._lock: + self._kill_cursors_executor.open() + self._opened = True + return self._topology + + @contextlib.asynccontextmanager + async def _checkout( + self, server: Server, session: Optional[AsyncClientSession] + ) -> AsyncGenerator[AsyncConnection, None]: + in_txn = session and session.in_transaction + async with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + async with await server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + async def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[AsyncClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The AsyncClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = await self._get_topology() + if session and not session.in_transaction: + await session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = await topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = await topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + await session._unpin() + raise + + async def _conn_for_writes( + self, session: Optional[AsyncClientSession], operation: str + ) -> AsyncContextManager[AsyncConnection]: + server = await self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.asynccontextmanager + async def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[AsyncClientSession] + ) -> AsyncGenerator[tuple[AsyncConnection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single + async with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + async def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[AsyncClientSession], + operation: str, + ) -> AsyncContextManager[tuple[AsyncConnection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + server = await self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + async def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, # type: ignore[type-arg] + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = await self._select_server( + operation.read_preference, + operation.session, # type: ignore[arg-type] + operation.name, + address=address, + ) + + async with operation.conn_mgr._lock: + async with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] + err_handler.contribute_socket(operation.conn_mgr.conn) + return await server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + async def _cmd( + _session: Optional[AsyncClientSession], + server: Server, + conn: AsyncConnection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return await server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return await self._retryable_read( + _cmd, + operation.read_preference, + operation.session, # type: ignore[arg-type] + address=address, + retryable=isinstance(operation, _Query), + operation=operation.name, + ) + + async def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[AsyncClientSession], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return await self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + async def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[AsyncClientSession], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return await _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + async def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[AsyncClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + async with self._tmp_session(session) as s: + return await self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + async def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[AsyncClientSession], + operation: str, + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + async with self._tmp_session(session) as s: + return await self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + def _cleanup_cursor_no_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[AsyncClientSession], + ) -> None: + """Cleanup a cursor from __del__ without locking. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection attached at the time the cursor + was garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + """ + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + async def _cleanup_cursor_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[AsyncClientSession], + ) -> None: + """Cleanup a cursor from cursor.close() using a lock. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + """ + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + await conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + await self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + await conn_mgr.close() + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + async def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[AsyncClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") + + try: + if conn_mgr: + async with conn_mgr._lock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + await self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + await self._kill_cursors([cursor_id], address, await self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + async def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[AsyncClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = await topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = await topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + async with self._checkout(server, session) as conn: + assert address is not None + await self._kill_cursor_impl(cursor_ids, address, session, conn) + + async def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[AsyncClientSession], + conn: AsyncConnection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + await conn.command(db, spec, session=session, client=self) + + async def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + await self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + _log_client_error() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = await self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + await self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + _log_client_error() + + # This method is run periodically by a background thread. + async def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + await self._process_kill_cursors() + await self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + _log_client_error() + + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession] + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return self._topology.return_server_session(server_session) + + @contextlib.asynccontextmanager + async def _tmp_session( + self, session: Optional[client_session.AsyncClientSession] + ) -> AsyncGenerator[Optional[client_session.AsyncClientSession], None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.AsyncClientSession): + raise ValueError( + f"'session' argument must be an AsyncClientSession or None, not {type(session)}" + ) + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + await s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if not s._attached_to_cursor: + await s.end_session() + else: + yield None + + async def _process_response( + self, reply: Mapping[str, Any], session: Optional[AsyncClientSession] + ) -> None: + await self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + async def server_info( + self, session: Optional[client_session.AsyncClientSession] = None + ) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( # type: ignore[redundant-cast] + dict[str, Any], + await self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + async def _list_databases( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = await admin._retryable_read_command( + cmd, session=session, operation=_Op.LIST_DATABASES + ) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return AsyncCommandCursor(admin["$cmd"], cursor, None, comment=comment) + + async def list_databases( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> AsyncCommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`AsyncCursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + async with await client.list_databases() as cursor: + async for database in cursor: + print(database) + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.asynchronous.command_cursor.AsyncCommandCursor`. + + .. versionadded:: 3.6 + """ + return await self._list_databases(session, comment, **kwargs) + + async def list_database_names( + self, + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = await self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] async for doc in res] + + @_csot.apply + async def drop_database( + self, + name_or_database: Union[str, database.AsyncDatabase[_DocumentTypeArg]], + session: Optional[client_session.AsyncClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.asynchronous.database.AsyncDatabase`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.asynchronous.database.AsyncDatabase` instance representing the + database to drop + :param session: a + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.AsyncDatabase): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_database must be an instance of str or a AsyncDatabase, not {type(name)}" + ) + + async with await self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + await self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + async def bulk_write( + self, + models: Sequence[_WriteOp], + session: Optional[AsyncClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = await client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + >>> async for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> async for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.asynchronous.client_session.AsyncClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: For more info, see `Client Bulk Write `_. + + .. seealso:: `Writes and ids `_ + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + + common.validate_list("models", models) + + blk = _AsyncClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return await blk.execute(session, _Op.BULK_WRITE) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_shared._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # AsyncConnection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc_to_check._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__( + self, + client: AsyncMongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[AsyncClientSession], + ): + if not isinstance(client, AsyncMongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "AsyncMongoClient" for cls in type(client).__mro__): + raise TypeError(f"AsyncMongoClient required but given {type(client).__name__}") + + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: AsyncConnection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + async def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + await self.session._unpin() + err_ctx = _ErrorContext( + exc_val, # type: ignore[arg-type] + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + assert self.client._topology is not None + await self.client._topology.handle_error(self.server_address, err_ctx) + + async def __aenter__(self) -> _MongoClientErrorHandler: + return self + + async def __aexit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return await self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: AsyncMongoClient, # type: ignore[type-arg] + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[Union[_AsyncBulk, _AsyncClientBulk]], + operation: str, + is_read: bool = False, + session: Optional[AsyncClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + self._attempt_number = 0 + + async def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return await self._read() if self._is_read else await self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_shared._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + self._attempt_number += 1 + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + await self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + self._attempt_number += 1 + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + async def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return await self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + async def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = await self._get_server() + async with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return await self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + async def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = await self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + async with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return await self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in AsyncMongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/asynchronous/monitor.py b/pymongo/asynchronous/monitor.py new file mode 100644 index 0000000000..45c12b219f --- /dev/null +++ b/pymongo/asynchronous/monitor.py @@ -0,0 +1,545 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Class to monitor a MongoDB server on a background thread.""" + +from __future__ import annotations + +import asyncio +import atexit +import logging +import time +import weakref +from typing import TYPE_CHECKING, Any, Optional + +from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.errors import NetworkTimeout, _OperationCancelled +from pymongo.hello import Hello +from pymongo.lock import _async_create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors +from pymongo.pool_options import _is_faas +from pymongo.read_preferences import MovingAverage +from pymongo.server_description import ServerDescription + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import ( # type: ignore[attr-defined] + AsyncConnection, + Pool, + _CancellationContext, + ) + from pymongo.asynchronous.settings import TopologySettings + from pymongo.asynchronous.topology import Topology + +_IS_SYNC = False + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + async def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + await monitor._run() # type:ignore[attr-defined] + return True + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) + + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + self._executor.open() + + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + async def close(self) -> None: + """Close and stop monitoring. + + open() restarts the monitor after closing. + """ + self.gc_safe_close() + + async def join(self) -> None: + """Wait for the monitor to stop.""" + await self._executor.join() + + def request_check(self) -> None: + """If the monitor is sleeping, wake it soon.""" + self._executor.wake() + + +class Monitor(MonitorBase): + def __init__( + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): + """Class to monitor a MongoDB server on a background thread. + + Pass an initial ServerDescription, a Topology, a Pool, and + TopologySettings. + + The Topology is weakly referenced. The Pool must be exclusive to this + Monitor. + """ + super().__init__( + topology, + "pymongo_server_monitor_task", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + self._server_description = server_description + self._pool = pool + self._settings = topology_settings + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() + + def cancel_check(self) -> None: + """Cancel any concurrent hello check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + async def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + await self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + + async def join(self) -> None: + await asyncio.gather( + self._executor.join(), self._rtt_monitor.join(), return_exceptions=True + ) # type: ignore[func-returns-value] + + async def close(self) -> None: + self.gc_safe_close() + await self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._reset_connection() + + async def _reset_connection(self) -> None: + # Clear our pooled connection. + await self._pool.reset() + + async def _run(self) -> None: + try: + prev_sd = self._server_description + try: + self._server_description = await self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + await self._topology.on_change( + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + await self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() + except ReferenceError: + # Topology was garbage-collected. + await self.close() + finally: + if self._executor._stopped: + await self._rtt_monitor.close() + + async def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. + + Returns a ServerDescription. + """ + self._conn_id = None + start = time.monotonic() + try: + return await self._check_once() + except ReferenceError: + raise + except Exception as error: + _sanitize(error) + sd = self._server_description + address = sd.address + duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + driverConnectionId=self._conn_id, + ) + await self._reset_connection() + if isinstance(error, _OperationCancelled): + raise + await self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) + + async def _check_once(self) -> ServerDescription: + """A single attempt to call hello. + + Returns a ServerDescription, or raises an exception. + """ + address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + await self._reset_connection() + async with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + ) + + self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id + response, round_trip_time = await self._check_with_socket(conn) + if not response.awaitable: + await self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = await self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_succeeded( + address, round_trip_time, response, response.awaitable + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + ) + return sd + + async def _check_with_socket(self, conn: AsyncConnection) -> tuple[Hello, float]: # type: ignore[type-arg] + """Return (Hello, round_trip_time). + + Can raise ConnectionFailure or OperationFailure. + """ + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(await conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = await conn._hello( + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = await conn._hello(None, None) + duration = _monotonic_duration(start) + return response, duration + + +class SrvMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings): + """Class to poll SRV records on a background thread. + + Pass a Topology and a TopologySettings. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) + self._settings = topology_settings + self._seedlist = self._settings._seeds + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() + + async def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return + seedlist = await self._get_seedlist() + if seedlist: + self._seedlist = seedlist + try: + await self._topology.on_srv_update(self._seedlist) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + + async def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + """Poll SRV records for a seedlist. + + Returns a list of ServerDescriptions. + """ + try: + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = await resolver.get_hosts_and_min_ttl() + if len(seedlist) == 0: + # As per the spec: this should be treated as a failure. + raise Exception + except Exception as exc: + # As per the spec, upon encountering an error: + # - An error must not be raised + # - SRV records must be rescanned every heartbeatFrequencyMS + # - Topology must be left unchanged + self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) + return None + else: + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_task", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _async_create_lock() + + async def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + await self._pool.reset() + + async def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + async with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + async def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + async with self._lock: + return self._moving_average.get(), self._moving_min.get() + + async def reset(self) -> None: + """Reset the average RTT.""" + async with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + async def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = await self._ping() + await self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + await self.close() + except Exception: + await self._pool.reset() + + async def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + async with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + await conn.hello() + return _monotonic_duration(start) + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/asynchronous/network.py b/pymongo/asynchronous/network.py new file mode 100644 index 0000000000..5a5dc7fa2c --- /dev/null +++ b/pymongo/asynchronous/network.py @@ -0,0 +1,298 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import datetime +import logging +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from bson import _decode_all_selective +from pymongo import _csot, helpers_shared, message +from pymongo.compression_support import _NO_COMPRESSION +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, +) +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import _OpMsg +from pymongo.monitoring import _is_speculative_authenticate +from pymongo.network_layer import ( + async_receive_message, + async_sendall, +) + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.monitoring import _EventListeners + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +async def command( + conn: AsyncConnection, + dbname: str, + spec: MutableMapping[str, Any], + is_mongos: bool, + read_preference: Optional[_ServerMode], + codec_options: CodecOptions[_DocumentType], + session: Optional[AsyncClientSession], + client: Optional[AsyncMongoClient[Any]], + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + address: Optional[_Address] = None, + listeners: Optional[_EventListeners] = None, + max_bson_size: Optional[int] = None, + read_concern: Optional[ReadConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + compression_ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, + use_op_msg: bool = False, + unacknowledged: bool = False, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + write_concern: Optional[WriteConcern] = None, +) -> _DocumentType: + """Execute a command over the socket, or raise socket.error. + + :param conn: a AsyncConnection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional AsyncClientSession instance. + :param client: optional AsyncMongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` + field in the command response. + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. + """ + name = next(iter(spec)) + ns = dbname + ".$cmd" + speculative_hello = False + + # Publish the original command document, perhaps with lsid and $clusterTime. + orig = spec + if is_mongos and not use_op_msg: + assert read_preference is not None + spec = message._maybe_add_read_preference(spec, read_preference) + if read_concern and not (session and session.in_transaction): + if read_concern.level: + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) + if collation is not None: + spec["collation"] = collation + + publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() + if publish: + speculative_hello = _is_speculative_authenticate(name, spec) + + if compression_ctx and name.lower() in _NO_COMPRESSION: + compression_ctx = None + + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = await client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) + + if use_op_msg: + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 + request_id, msg, size, max_doc_size = message._op_msg( + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) + # If this is an unacknowledged write then make sure the encoded doc(s) + # are small enough, otherwise rely on the server to return an error. + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: + message._raise_document_too_large(name, size, max_bson_size) + else: + request_id, msg, size = message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) + + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + await async_sendall(conn.conn.get_conn, msg) + if use_op_msg and unacknowledged: + # Unacknowledged, fake a successful command response. + reply = None + response_doc: _DocumentOut = {"ok": 1} + else: + reply = await async_receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response( + codec_options=codec_options, user_fields=user_fields + ) + + response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time + if client: + await client._process_response(response_doc, session) + if check: + helpers_shared._check_command_response( + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) + except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_failure( + duration, + failure, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbname, + ) + raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_success( + duration, + response_doc, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) + + if client and client._encrypter and reply: + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] diff --git a/pymongo/asynchronous/pool.py b/pymongo/asynchronous/pool.py new file mode 100644 index 0000000000..f521091e3c --- /dev/null +++ b/pymongo/asynchronous/pool.py @@ -0,0 +1,1479 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import asyncio +import collections +import contextlib +import logging +import os +import sys +import time +import weakref +from typing import ( + TYPE_CHECKING, + Any, + AsyncGenerator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import _csot, helpers_shared +from pymongo.asynchronous.client_session import _validate_session_write_concern +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.asynchronous.network import command +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_MESSAGE_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + ORDERED_TYPES, +) +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) +from pymongo.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, +) +from pymongo.network_layer import AsyncNetworkingInterface, async_receive_message, async_sendall +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + SSLErrors, + _CancellationContext, + _configured_protocol_interface, + _raise_connection_failure, +) +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.asynchronous.auth import _AuthContext + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.compression_support import ( + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.message import _OpMsg, _OpReply + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.typings import _Address, _CollationIn + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = False + + +class AsyncConnection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation + """ + + def __init__( + self, + conn: AsyncNetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, + is_sdam: bool, + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.is_sdam = is_sdam + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: AsyncMongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + async def unpin(self) -> None: + pool = self.pool_ref() + if pool: + await pool.checkin(self) + else: + await self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + async def hello(self) -> Hello[dict[str, Any]]: + return await self._hello(None, None) + + async def _hello( + self, + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.asynchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = await self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + async def _next_reply(self) -> dict[str, Any]: + reply = await self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers_shared._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + async def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[AsyncClientSession] = None, + client: Optional[AsyncMongoClient[Any]] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional AsyncClientSession instance. + :param client: optional AsyncMongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return await command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, # type: ignore[arg-type] + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. + except BaseException as error: + await self._raise_connection_failure(error) + + async def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + await async_sendall(self.conn.get_conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + await self._raise_connection_failure(error) + + async def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return await async_receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + await self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + async def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + await self.send_message(msg, max_doc_size) + + async def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + await self.send_message(msg, 0) + reply = await self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers_shared._check_command_response(result, self.max_wire_version) + return result + + async def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.asynchronous import auth + + await auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + duration = time.monotonic() - self.creation_time + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_ready(self.address, self.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[AsyncMongoClient[Any]], session: Optional[AsyncClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation( + "Can only use session with the AsyncMongoClient that started it" + ) + + async def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + await self._close_conn() + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + async def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + await self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[AsyncClientSession], + client: Optional[AsyncMongoClient[Any]], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + async def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + await self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "AsyncConnection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# https://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + is_sdam: bool = False, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param is_sdam: whether to call hello for each new AsyncConnection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque[AsyncConnection] = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _async_create_lock() + self._max_connecting_cond = _async_create_condition(self.lock) + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.is_sdam = is_sdam + # Don't publish events or logs in Monitor pools. + self.enabled_for_cmap = ( + not self.is_sdam + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + self.enabled_for_logging = not self.is_sdam + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = _async_create_condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = _async_create_condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[AsyncConnection] = set() + self.ncursors = 0 + self.ntxns = 0 + + async def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + async def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + async with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + await conn.close_conn(ConnectionClosedReason.STALE) + + async def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + async with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] + + async def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + await self._reset( + close=False, service_id=service_id, interrupt_connections=interrupt_connections + ) + + async def reset_without_pause(self) -> None: + await self._reset(close=False, pause=False) + + async def close(self) -> None: + await self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + async def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + async with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + close_conns = [] + async with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + close_conns.append(self.conns.pop()) + if not _IS_SYNC: + await asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in close_conns: + await conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + async with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + async with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = await self.connect() + close_conn = False + async with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) + return + finally: + if incremented: + # Notify after adding the socket to the pool. + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + async with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + async def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> AsyncConnection: + """Connect to Mongo and return a new AsyncConnection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + async with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + networking_interface = await _configured_protocol_interface(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + async with self.lock: + self.active_contexts.discard(tmp_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = AsyncConnection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] + async with self.lock: + self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() + try: + if not self.is_sdam: + await conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + await conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + async with self.lock: + self.active_contexts.discard(conn.cancel_context) + await conn.close_conn(ConnectionClosedReason.ERROR) + raise + + if handler: + await handler.client._topology.receive_cluster_time(conn._cluster_time) + + return conn + + @contextlib.asynccontextmanager + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncGenerator[AsyncConnection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`AsyncConnection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = await self._get_conn(checkout_started_time, handler=handler) + + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + async with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + await handler.handle(exc_type, exc_val) + if not pinned and conn.active: + await self.checkin(conn) + raise + if conn.pinned_txn: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + async with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + await self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if emit_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + async def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncConnection: + """Get or create a AsyncConnection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + await self.reset_without_pause() + + if self.closed: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + async with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + async with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self.size_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + async with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + async with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + timeout = deadline - time.monotonic() if deadline else None + if not await _async_cond_wait(self._max_connecting_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if await self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = await self.connect(handler=handler) + finally: + async with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + if conn: + # We checked out a socket but authentication failed. + await conn.close_conn(ConnectionClosedReason.ERROR) + async with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if not emitted_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + async def checkin(self, conn: AsyncConnection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + async with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + await self.reset_without_pause() + else: + if self.closed: + await conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + close_conn = False + async with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + close_conn = True + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + if close_conn: + await conn.close_conn(ConnectionClosedReason.STALE) + + async with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + async def _perished(self, conn: AsyncConnection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + await conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + await conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + await conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/asynchronous/server.py b/pymongo/asynchronous/server.py new file mode 100644 index 0000000000..f212306174 --- /dev/null +++ b/pymongo/asynchronous/server.py @@ -0,0 +1,383 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Communicate with one MongoDB server in a topology.""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import ( + TYPE_CHECKING, + Any, + AsyncContextManager, + Callable, + Optional, + Union, +) + +from bson import _decode_all_selective +from pymongo.asynchronous.helpers import _handle_reauth +from pymongo.errors import NotPrimaryError, OperationFailure +from pymongo.helpers_shared import _check_command_response +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.response import PinnedResponse, Response + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.asynchronous.mongo_client import AsyncMongoClient, _MongoClientErrorHandler + from pymongo.asynchronous.monitor import Monitor + from pymongo.asynchronous.pool import AsyncConnection, Pool + from pymongo.monitoring import _EventListeners + from pymongo.read_preferences import _ServerMode + from pymongo.server_description import ServerDescription + from pymongo.typings import _DocumentOut + +_IS_SYNC = False + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, + ) -> None: + """Represent one MongoDB server.""" + self._description = server_description + self._pool = pool + self._monitor = monitor + self._topology_id = topology_id + self._publish = listeners is not None and listeners.enabled_for_server + self._listener = listeners + self._events = None + if self._publish: + self._events = events() # type: ignore[misc] + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + if not self._pool.opts.load_balanced: + self._monitor.open() + + async def reset(self, service_id: Optional[ObjectId] = None) -> None: + """Clear the connection pool.""" + await self.pool.reset(service_id) + + async def close(self) -> None: + """Clear the connection pool and stop the monitor. + + Reconnect with open(). + """ + if self._publish: + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + ) + + await self._monitor.close() + await self._pool.close() + + def request_check(self) -> None: + """Check the server's state soon.""" + self._monitor.request_check() + + async def operation_to_command( + self, operation: Union[_Query, _GetMore], conn: AsyncConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + cmd, db = operation.as_command(conn, apply_timeout) + # Support auto encryption + if operation.client._encrypter and not operation.client._encrypter._bypass_auto_encryption: + cmd = await operation.client._encrypter.encrypt( # type: ignore[misc, assignment] + operation.db, cmd, operation.codec_options + ) + operation.update_command(cmd) + + return cmd, db + + @_handle_reauth + async def run_operation( + self, + conn: AsyncConnection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + client: AsyncMongoClient[Any], + ) -> Response: + """Run a _Query or _GetMore operation and return a Response object. + + This method is used only to run _Query/_GetMore operations from + cursors. + Can raise ConnectionFailure, OperationFailure, etc. + + :param conn: An AsyncConnection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. + :param client: An AsyncMongoClient instance. + """ + assert listeners is not None + publish = listeners.enabled_for_commands + start = datetime.now() + + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + cmd, dbn = await self.operation_to_command(operation, conn, use_cmd) + if more_to_come: + request_id = 0 + else: + message = operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) + + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + + if publish: + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None + listeners.publish_command_start( + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + if more_to_come: + reply = await conn.receive_message(None) + else: + await conn.send_message(data, max_doc_size) + reply = await conn.receive_message(request_id) + + # Unpack and check for command errors. + if use_cmd: + user_fields = _CURSOR_DOC_FIELDS + legacy_response = False + else: + user_fields = None + legacy_response = True + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) + if use_cmd: + first = docs[0] + await operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] + except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + listeners.publish_command_failure( + duration, + failure, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + raise + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs + else: + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + listeners.publish_command_success( + duration, + res, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + + # Decrypt response. + client = operation.client # type: ignore[assignment] + if client and client._encrypter: + if use_cmd: + decrypted = await client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) + + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: # type: ignore[arg-type] + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( + data=reply, + address=self._description.address, + conn=conn, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + more_to_come=more_to_come, + ) + else: + response = Response( + data=reply, + address=self._description.address, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + ) + + return response + + async def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> AsyncContextManager[AsyncConnection]: + return self.pool.checkout(handler) + + @property + def description(self) -> ServerDescription: + return self._description + + @description.setter + def description(self, server_description: ServerDescription) -> None: + assert server_description.address == self._description.address + self._description = server_description + + @property + def pool(self) -> Pool: + return self._pool + + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: + """Return request_id, data, max_doc_size. + + :param message: (request_id, data, max_doc_size) or (request_id, data) + """ + if len(message) == 3: + return message # type: ignore[return-value] + else: + # get_more and kill_cursors messages don't include BSON documents. + request_id, data = message # type: ignore[misc] + return request_id, data, 0 + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/asynchronous/settings.py b/pymongo/asynchronous/settings.py new file mode 100644 index 0000000000..9c2331971a --- /dev/null +++ b/pymongo/asynchronous/settings.py @@ -0,0 +1,175 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent MongoClient's configuration.""" +from __future__ import annotations + +import threading +import traceback +from typing import Any, Collection, Optional, Type, Union + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.asynchronous import monitor, pool +from pymongo.asynchronous.pool import Pool +from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.errors import ConfigurationError +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector + +_IS_SYNC = False + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, + ): + """Represent MongoClient's configuration. + + Take a list of (host, port) pairs and optional replica set name. + """ + if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: + raise ConfigurationError( + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) + + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] + self._replica_set_name = replica_set_name + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition + self._local_threshold_ms = local_threshold_ms + self._server_selection_timeout = server_selection_timeout + self._server_selector = server_selector + self._fqdn = fqdn + self._heartbeat_frequency = heartbeat_frequency + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()[:-2]) + + @property + def seeds(self) -> Collection[tuple[str, int]]: + """List of server addresses.""" + return self._seeds + + @property + def replica_set_name(self) -> Optional[str]: + return self._replica_set_name + + @property + def pool_class(self) -> Type[Pool]: + return self._pool_class + + @property + def pool_options(self) -> PoolOptions: + return self._pool_options + + @property + def monitor_class(self) -> Type[monitor.Monitor]: + return self._monitor_class + + @property + def condition_class(self) -> Type[threading.Condition]: + return self._condition_class + + @property + def local_threshold_ms(self) -> int: + return self._local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + return self._server_selection_timeout + + @property + def server_selector(self) -> Optional[_ServerSelector]: + return self._server_selector + + @property + def heartbeat_frequency(self) -> int: + return self._heartbeat_frequency + + @property + def fqdn(self) -> Optional[str]: + return self._fqdn + + @property + def direct(self) -> Optional[bool]: + """Connect directly to a single server, or use a set of servers? + + True if there is one seed and no replica_set_name. + """ + return self._direct + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: + return TOPOLOGY_TYPE.Single + elif self.replica_set_name is not None: + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + return TOPOLOGY_TYPE.Unknown + + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: + """Initial dict of (address, ServerDescription) for all seeds.""" + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/asynchronous/srv_resolver.py b/pymongo/asynchronous/srv_resolver.py new file mode 100644 index 0000000000..9c4d9a9d57 --- /dev/null +++ b/pymongo/asynchronous/srv_resolver.py @@ -0,0 +1,155 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = False + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +async def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if _IS_SYNC: + from dns import resolver + + return resolver.resolve(*args, **kwargs) + else: + from dns import asyncresolver + + return await asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) + + async def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = await _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from exc + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] + + async def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = await _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from exc + return results + + async def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = await self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results + ] + + # Validate hosts + for node in nodes: + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) + try: + nlist = srv_host.split(".")[1:][-self.__slen :] + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + async def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = await self._get_srv_response_and_hosts(True) + return nodes + + async def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = await self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/asynchronous/topology.py b/pymongo/asynchronous/topology.py new file mode 100644 index 0000000000..283aabc690 --- /dev/null +++ b/pymongo/asynchronous/topology.py @@ -0,0 +1,1127 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal class to monitor a topology of one or more servers.""" + +from __future__ import annotations + +import asyncio +import logging +import os +import queue +import random +import sys +import time +import warnings +import weakref +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.asynchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.asynchronous.monitor import MonitorBase, SrvMonitor +from pymongo.asynchronous.pool import Pool +from pymongo.asynchronous.server import Server +from pymongo.errors import ( + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteError, +) +from pymongo.hello import Hello +from pymongo.lock import ( + _async_cond_wait, + _async_create_condition, + _async_create_lock, +) +from pymongo.logger import ( + _SDAM_LOGGER, + _SERVER_SELECTION_LOGGER, + _debug_log, + _SDAMStatusMessage, + _ServerSelectionStatusMessage, +) +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.asynchronous.settings import TopologySettings + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = False + +_pymongo_dir = str(Path(__file__).parent) + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] + q = queue_ref() + if not q: + return False # Cancel PeriodicExecutor. + + while True: + try: + event = q.get_nowait() + except queue.Empty: + break + else: + fn, args = event + fn(*args) + + return True # Continue PeriodicExecutor. + + +class Topology: + """Monitor a topology of one or more servers.""" + + def __init__(self, topology_settings: TopologySettings): + self._topology_id = topology_settings._topology_id + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology + + # Create events queue if there are publishers. + self._events = None + self.__events_executor: Any = None + + if self._publish_server or self._publish_tp: + self._events = queue.Queue(maxsize=100) + + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, + ) + + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) + self._settings = topology_settings + topology_description = TopologyDescription( + topology_settings.get_topology_type(), + topology_settings.get_server_descriptions(), + topology_settings.replica_set_name, + None, + None, + topology_settings, + ) + + self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(initial_td), + newDescription=repr(self._description), + ) + + for seed in topology_settings.seeds: + if self._publish_server: + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + ) + + # Store the seed list to help diagnose errors in _error_message(). + self._seed_addresses = list(topology_description.server_descriptions()) + self._opened = False + self._closed = False + self._lock = _async_create_lock() + self._condition = _async_create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None + self._session_pool = _ServerSessionPool() + + if self._publish_server or self._publish_tp: + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue[Any]] + + async def target() -> bool: + return process_events_queue(weak) + + executor = periodic_executor.AsyncPeriodicExecutor( + interval=common.EVENTS_QUEUE_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_events_thread", + ) + + # We strongly reference the executor and it weakly references + # the queue via this closure. When the topology is freed, stop + # the executor soon. + weak = weakref.ref(self._events, executor.close) + self.__events_executor = executor + executor.open() + + self._srv_monitor = None + if self._settings.fqdn is not None and not self._settings.load_balanced: + self._srv_monitor = SrvMonitor(self, self._settings) + + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + + async def open(self) -> None: + """Start monitoring, or restart after a fork. + + No effect if called multiple times. + + .. warning:: Topology is shared among multiple threads and is protected + by mutual exclusion. Using Topology from a process other than the one + that initialized it will emit a warning and may result in deadlock. To + prevent this from happening, AsyncMongoClient must be created after any + forking. + + """ + pid = os.getpid() + if self._pid is None: + self._pid = pid + elif pid != self._pid: + self._pid = pid + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 + "AsyncMongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", + **kwargs, + ) + async with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + await server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() + + async with self._lock: + await self._ensure_opened() + + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + async def select_servers( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + operation_id: Optional[int] = None, + ) -> list[Server]: + """Return a list of Servers matching selector, or time out. + + :param selector: function that takes a list of Servers and returns + a subset of them. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value common.SERVER_SELECTION_TIMEOUT + is used. + :param address: optional server address to select. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + if server_selection_timeout is None: + server_timeout = self.get_server_selection_timeout() + else: + server_timeout = server_selection_timeout + + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + await self.cleanup_monitors() + + async with self._lock: + server_descriptions = await self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + async def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + operation: str, + operation_id: Optional[int], + address: Optional[_Address], + ) -> list[ServerDescription]: + """select_servers() guts. Hold the lock when calling this.""" + now = time.monotonic() + end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + while not server_descriptions: + # No suitable servers. + if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) + raise ServerSelectionTimeoutError( + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) + + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(1000 * (end_time - time.monotonic())), + ) + logged_waiting = True + + await self._ensure_opened() + self._request_check_all() + + # Release the lock and wait for the topology description to + # change, or for a timeout. We won't miss any changes that + # came after our most recent apply_selector call, since we've + # held the lock until now. + await _async_cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) + self._description.check_compatible() + now = time.monotonic() + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + self._description.check_compatible() + return server_descriptions + + async def _select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + servers = await self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + async def select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Like select_servers, but choose a random server if several match.""" + server = await self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) + return server + + async def select_server_by_address( + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Return a Server for "address", reconnecting if necessary. + + If the server's type is not known, request an immediate check of all + servers. Time out after "server_selection_timeout" if the server + cannot be reached. + + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value + common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + return await self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) + + async def _process_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription on an opened topology. + + Hold the lock when calling this. + """ + td_old = self._description + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + await server.pool.ready() + + suppress_event = sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td + await self._update_servers() + + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + # Shutdown SRV polling for unsupported cluster types. + # This is only applicable if the old topology was Unknown, and the + # new one is something other than Unknown or Sharded. + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): + await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + # Wake anything waiting in select_servers(). + self._condition.notify_all() + + async def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription after an hello call completes.""" + # We do no I/O holding the lock. + async with self._lock: + # Monitors may continue working on hello calls for some time + # after a call to Topology.close, so this method may be called at + # any time. Ensure the topology is open before processing the + # change. + # Any monitored server was definitely in the topology description + # once. Check if it's still in the description or if some state- + # change removed it. E.g., we got a host list from the primary + # that didn't include this server. + if self._opened and self._description.has_server(server_description.address): + await self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + await server.pool.reset(interrupt_connections=interrupt_connections) + + async def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new seedlist on an opened topology. + Hold the lock when calling this. + """ + td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) + + await self._update_servers() + + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + async def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new list of nodes obtained from scanning SRV records.""" + # We do no I/O holding the lock. + async with self._lock: + if self._opened: + await self._process_srv_update(seedlist) + + def get_server_by_address(self, address: _Address) -> Optional[Server]: + """Get a Server or None. + + Returns the current version of the server immediately, even if it's + Unknown or absent from the topology. Only use this in unittests. + In driver code, use select_server_by_address, since then you're + assured a recent view of the server's type and wire protocol version. + """ + return self._servers.get(address) + + def has_server(self, address: _Address) -> bool: + return address in self._servers + + async def get_primary(self) -> Optional[_Address]: + """Return primary's address or None.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: + return None + + return writable_server_selector(self._new_selection())[0].address + + async def _get_replica_set_members( + self, selector: Callable[[Selection], Selection] + ) -> set[_Address]: + """Return set of replica set member addresses.""" + # Implemented here in Topology instead of AsyncMongoClient, so it can lock. + async with self._lock: + topology_type = self._description.topology_type + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): + return set() + + return {sd.address for sd in iter(selector(self._new_selection()))} + + async def get_secondaries(self) -> set[_Address]: + """Return set of secondary addresses.""" + return await self._get_replica_set_members(secondary_server_selector) + + async def get_arbiters(self) -> set[_Address]: + """Return set of arbiter addresses.""" + return await self._get_replica_set_members(arbiter_server_selector) + + def max_cluster_time(self) -> Optional[ClusterTime]: + """Return a document, the highest seen $clusterTime.""" + return self._max_cluster_time + + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + # Driver Sessions Spec: "Whenever a driver receives a cluster time from + # a server it MUST compare it to the current highest seen cluster time + # for the deployment. If the new cluster time is higher than the + # highest seen cluster time it MUST become the new highest seen cluster + # time. Two cluster times are compared using only the BsonTimestamp + # value of the clusterTime embedded field." + if cluster_time: + # ">" uses bson.timestamp.Timestamp's comparison operator. + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): + self._max_cluster_time = cluster_time + + async def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + async with self._lock: + self._receive_cluster_time_no_lock(cluster_time) + + async def request_check_all(self, wait_time: int = 5) -> None: + """Wake all monitors, wait for at least one to check its server.""" + async with self._lock: + self._request_check_all() + await _async_cond_wait(self._condition, wait_time) + + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + + async def update_pool(self) -> None: + # Remove any stale sockets and add new sockets if pool is too small. + servers = [] + async with self._lock: + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + await server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + await self.handle_error(server.description.address, ctx) + raise + + async def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ + async with self._lock: + old_td = self._description + for server in self._servers.values(): + await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + + # Mark all servers Unknown. + self._description = self._description.reset() + for address, sd in self._description.server_descriptions().items(): + if address in self._servers: + self._servers[address].description = sd + + # Stop SRV polling thread. + if self._srv_monitor: + await self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + self._opened = False + self._closed = True + + # Publish only after releasing the lock. + if self._publish_tp: + assert self._events is not None + self._description = TopologyDescription( + TOPOLOGY_TYPE.Unknown, + {}, + self._description.replica_set_name, + self._description.max_set_version, + self._description.max_election_id, + self._description._topology_settings, + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + ( + old_td, + self._description, + self._topology_id, + ), + ) + ) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(old_td), + newDescription=repr(self._description), + ) + _debug_log( + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id + ) + + if self._publish_server or self._publish_tp: + # Make sure the events executor thread is fully closed before publishing the remaining events + self.__events_executor.close() + await self.__events_executor.join(1) + process_events_queue(weakref.ref(self._events)) # type: ignore[arg-type] + + @property + def description(self) -> TopologyDescription: + return self._description + + def pop_all_sessions(self) -> list[_ServerSession]: + """Pop all session ids from the pool.""" + return self._session_pool.pop_all() + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + return self._session_pool.get_server_session(session_timeout_minutes) + + def return_server_session(self, server_session: _ServerSession) -> None: + self._session_pool.return_server_session(server_session) + + def _new_selection(self) -> Selection: + """A Selection object, initially including all known servers. + + Hold the lock when calling this. + """ + return Selection.from_topology_description(self._description) + + async def _ensure_opened(self) -> None: + """Start monitors, or restart after a fork. + + Hold the lock when calling this. + """ + if self._closed: + raise InvalidOperation("Cannot use AsyncMongoClient after close") + + if not self._opened: + self._opened = True + await self._update_servers() + + # Start or restart the events publishing thread. + if self._publish_tp or self._publish_server: + self.__events_executor.open() + + # Start the SRV polling thread. + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): + self._srv_monitor.open() + + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + await self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + + # Ensure that the monitors are open. + for server in self._servers.values(): + await server.open() + + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: + server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + async def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers_shared._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_shared._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + await server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + await self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + await server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + async def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + async with self._lock: + await self._handle_error(address, err_ctx) + + def _request_check_all(self) -> None: + """Wake all monitors. Hold the lock when calling this.""" + for server in self._servers.values(): + server.request_check() + + async def _update_servers(self) -> None: + """Sync our Servers from TopologyDescription.server_descriptions. + + Hold the lock while calling this. + """ + for address, sd in self._description.server_descriptions().items(): + if address not in self._servers: + monitor = self._settings.monitor_class( + server_description=sd, + topology=self, + pool=self._create_pool_for_monitor(address), + topology_settings=self._settings, + ) + + weak = None + if self._publish_server and self._events is not None: + weak = weakref.ref(self._events) + server = Server( + server_description=sd, + pool=self._create_pool_for_server(address), + monitor=monitor, + topology_id=self._topology_id, + listeners=self._listeners, + events=weak, + ) + + self._servers[address] = server + await server.open() + else: + # Cache old is_writable value. + was_writable = self._servers[address].description.is_writable + # Update server description. + self._servers[address].description = sd + # Update is_writable value of the pool, if it changed. + if was_writable != sd.is_writable: + await self._servers[address].pool.update_is_writable(sd.is_writable) + + for address, server in list(self._servers.items()): + if not self._description.has_server(address): + await server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + self._servers.pop(address) + + def _create_pool_for_server(self, address: _Address) -> Pool: + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) + + def _create_pool_for_monitor(self, address: _Address) -> Pool: + options = self._settings.pool_options + + # According to the Server Discovery And Monitoring Spec, monitors use + # connect_timeout for both connect_timeout and socket_timeout. The + # pool only has one socket so maxPoolSize and so on aren't needed. + monitor_pool_options = PoolOptions( + connect_timeout=options.connect_timeout, + socket_timeout=options.connect_timeout, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, + appname=options.appname, + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) + + return self._settings.pool_class( + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id + ) + + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: + """Format an error message if server selection fails. + + Hold the lock when calling this. + """ + is_replica_set = self._description.topology_type in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) + + if is_replica_set: + server_plural = "replica set members" + elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: + server_plural = "mongoses" + else: + server_plural = "servers" + + if self._description.known_servers: + # We've connected, but no servers match the selector. + if selector is writable_server_selector: + if is_replica_set: + return "No primary available for writes" + else: + return "No %s available for writes" % server_plural + else: + return f'No {server_plural} match selector "{selector}"' + else: + addresses = list(self._description.server_descriptions()) + servers = list(self._description.server_descriptions().values()) + if not servers: + if is_replica_set: + # We removed all servers because of the wrong setName? + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) + else: + return "No %s available" % server_plural + + # 1 or more servers, all Unknown. Are they unknown for one reason? + error = servers[0].error + same = all(server.error == error for server in servers[1:]) + if same: + if error is None: + # We're still discovering. + return "No %s found yet" % server_plural + + if is_replica_set and not set(addresses).intersection(self._seed_addresses): + # We replaced our seeds with new hosts but can't reach any. + return ( + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) + + return str(error) + else: + return ",".join(str(server.error) for server in servers if server.error) + + async def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + await asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for AsyncMongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/asynchronous/uri_parser.py b/pymongo/asynchronous/uri_parser.py new file mode 100644 index 0000000000..055b04d75a --- /dev/null +++ b/pymongo/asynchronous/uri_parser.py @@ -0,0 +1,193 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _SrvResolver +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _make_options_case_sensitive, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = False + + +async def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + await _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + result["options"] = _make_options_case_sensitive(result["options"]) + return result + + +async def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = await dns_resolver.get_hosts() + dns_options = await dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/auth.py b/pymongo/auth.py index 3d259335b0..a36f3f4233 100644 --- a/pymongo/auth.py +++ b/pymongo/auth.py @@ -1,10 +1,10 @@ -# Copyright 2013-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,538 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Authentication helpers.""" +"""Re-import of synchronous Auth API for compatibility.""" +from __future__ import annotations -import functools -import hashlib -import hmac -import os -import socket -from base64 import standard_b64decode, standard_b64encode -from collections import namedtuple -from typing import Callable, Mapping -from urllib.parse import quote +from pymongo.auth_shared import * # noqa: F403 +from pymongo.synchronous.auth import * # noqa: F403 +from pymongo.synchronous.auth import __doc__ as original_doc -from bson.binary import Binary -from bson.son import SON -from pymongo.auth_aws import _authenticate_aws -from pymongo.errors import ConfigurationError, OperationFailure -from pymongo.saslprep import saslprep - -HAVE_KERBEROS = True -_USE_PRINCIPAL = False -try: - import winkerberos as kerberos - - if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): - _USE_PRINCIPAL = True -except ImportError: - try: - import kerberos - except ImportError: - HAVE_KERBEROS = False - - -MECHANISMS = frozenset( - [ - "GSSAPI", - "MONGODB-CR", - "MONGODB-X509", - "MONGODB-AWS", - "PLAIN", - "SCRAM-SHA-1", - "SCRAM-SHA-256", - "DEFAULT", - ] -) -"""The authentication mechanisms supported by PyMongo.""" - - -class _Cache(object): - __slots__ = ("data",) - - _hash_val = hash("_Cache") - - def __init__(self): - self.data = None - - def __eq__(self, other): - # Two instances must always compare equal. - if isinstance(other, _Cache): - return True - return NotImplemented - - def __ne__(self, other): - if isinstance(other, _Cache): - return False - return NotImplemented - - def __hash__(self): - return self._hash_val - - -MongoCredential = namedtuple( - "MongoCredential", - ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], -) -"""A hashable namedtuple of values used for authentication.""" - - -GSSAPIProperties = namedtuple( - "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm"] -) -"""Mechanism properties for GSSAPI authentication.""" - - -_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) -"""Mechanism properties for MONGODB-AWS authentication.""" - - -def _build_credentials_tuple(mech, source, user, passwd, extra, database): - """Build and return a mechanism specific credentials tuple.""" - if mech not in ("MONGODB-X509", "MONGODB-AWS") and user is None: - raise ConfigurationError("%s requires a username." % (mech,)) - if mech == "GSSAPI": - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for GSSAPI") - properties = extra.get("authmechanismproperties", {}) - service_name = properties.get("SERVICE_NAME", "mongodb") - canonicalize = properties.get("CANONICALIZE_HOST_NAME", False) - service_realm = properties.get("SERVICE_REALM") - props = GSSAPIProperties( - service_name=service_name, - canonicalize_host_name=canonicalize, - service_realm=service_realm, - ) - # Source is always $external. - return MongoCredential(mech, "$external", user, passwd, props, None) - elif mech == "MONGODB-X509": - if passwd is not None: - raise ConfigurationError("Passwords are not supported by MONGODB-X509") - if source is not None and source != "$external": - raise ValueError("authentication source must be $external or None for MONGODB-X509") - # Source is always $external, user can be None. - return MongoCredential(mech, "$external", user, None, None, None) - elif mech == "MONGODB-AWS": - if user is not None and passwd is None: - raise ConfigurationError("username without a password is not supported by MONGODB-AWS") - if source is not None and source != "$external": - raise ConfigurationError( - "authentication source must be $external or None for MONGODB-AWS" - ) - - properties = extra.get("authmechanismproperties", {}) - aws_session_token = properties.get("AWS_SESSION_TOKEN") - aws_props = _AWSProperties(aws_session_token=aws_session_token) - # user can be None for temporary link-local EC2 credentials. - return MongoCredential(mech, "$external", user, passwd, aws_props, None) - elif mech == "PLAIN": - source_database = source or database or "$external" - return MongoCredential(mech, source_database, user, passwd, None, None) - else: - source_database = source or database or "admin" - if passwd is None: - raise ConfigurationError("A password is required.") - return MongoCredential(mech, source_database, user, passwd, None, _Cache()) - - -def _xor(fir, sec): - """XOR two byte strings together (python 3.x).""" - return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) - - -def _parse_scram_response(response): - """Split a scram response into key, value pairs.""" - return dict(item.split(b"=", 1) for item in response.split(b",")) - - -def _authenticate_scram_start(credentials, mechanism): - username = credentials.username - user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") - nonce = standard_b64encode(os.urandom(32)) - first_bare = b"n=" + user + b",r=" + nonce - - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", mechanism), - ("payload", Binary(b"n,," + first_bare)), - ("autoAuthorize", 1), - ("options", {"skipEmptyExchange": True}), - ] - ) - return nonce, first_bare, cmd - - -def _authenticate_scram(credentials, sock_info, mechanism): - """Authenticate using SCRAM.""" - username = credentials.username - if mechanism == "SCRAM-SHA-256": - digest = "sha256" - digestmod = hashlib.sha256 - data = saslprep(credentials.password).encode("utf-8") - else: - digest = "sha1" - digestmod = hashlib.sha1 - data = _password_digest(username, credentials.password).encode("utf-8") - source = credentials.source - cache = credentials.cache - - # Make local - _hmac = hmac.HMAC - - ctx = sock_info.auth_ctx - if ctx and ctx.speculate_succeeded(): - nonce, first_bare = ctx.scram_data - res = ctx.speculative_authenticate - else: - nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) - res = sock_info.command(source, cmd) - - server_first = res["payload"] - parsed = _parse_scram_response(server_first) - iterations = int(parsed[b"i"]) - if iterations < 4096: - raise OperationFailure("Server returned an invalid iteration count.") - salt = parsed[b"s"] - rnonce = parsed[b"r"] - if not rnonce.startswith(nonce): - raise OperationFailure("Server returned an invalid nonce.") - - without_proof = b"c=biws,r=" + rnonce - if cache.data: - client_key, server_key, csalt, citerations = cache.data - else: - client_key, server_key, csalt, citerations = None, None, None, None - - # Salt and / or iterations could change for a number of different - # reasons. Either changing invalidates the cache. - if not client_key or salt != csalt or iterations != citerations: - salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) - client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() - server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() - cache.data = (client_key, server_key, salt, iterations) - stored_key = digestmod(client_key).digest() - auth_msg = b",".join((first_bare, server_first, without_proof)) - client_sig = _hmac(stored_key, auth_msg, digestmod).digest() - client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) - client_final = b",".join((without_proof, client_proof)) - - server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) - - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", res["conversationId"]), - ("payload", Binary(client_final)), - ] - ) - res = sock_info.command(source, cmd) - - parsed = _parse_scram_response(res["payload"]) - if not hmac.compare_digest(parsed[b"v"], server_sig): - raise OperationFailure("Server returned an invalid signature.") - - # A third empty challenge may be required if the server does not support - # skipEmptyExchange: SERVER-44857. - if not res["done"]: - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", res["conversationId"]), - ("payload", Binary(b"")), - ] - ) - res = sock_info.command(source, cmd) - if not res["done"]: - raise OperationFailure("SASL conversation failed to complete.") - - -def _password_digest(username, password): - """Get a password digest to use for authentication.""" - if not isinstance(password, str): - raise TypeError("password must be an instance of str") - if len(password) == 0: - raise ValueError("password can't be empty") - if not isinstance(username, str): - raise TypeError("username must be an instance of str") - - md5hash = hashlib.md5() - data = "%s:mongo:%s" % (username, password) - md5hash.update(data.encode("utf-8")) - return md5hash.hexdigest() - - -def _auth_key(nonce, username, password): - """Get an auth key to use for authentication.""" - digest = _password_digest(username, password) - md5hash = hashlib.md5() - data = "%s%s%s" % (nonce, username, digest) - md5hash.update(data.encode("utf-8")) - return md5hash.hexdigest() - - -def _canonicalize_hostname(hostname): - """Canonicalize hostname following MIT-krb5 behavior.""" - # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 - af, socktype, proto, canonname, sockaddr = socket.getaddrinfo( - hostname, None, 0, 0, socket.IPPROTO_TCP, socket.AI_CANONNAME - )[0] - - try: - name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) - except socket.gaierror: - return canonname.lower() - - return name[0].lower() - - -def _authenticate_gssapi(credentials, sock_info): - """Authenticate using GSSAPI.""" - if not HAVE_KERBEROS: - raise ConfigurationError( - 'The "kerberos" module must be installed to use GSSAPI authentication.' - ) - - try: - username = credentials.username - password = credentials.password - props = credentials.mechanism_properties - # Starting here and continuing through the while loop below - establish - # the security context. See RFC 4752, Section 3.1, first paragraph. - host = sock_info.address[0] - if props.canonicalize_host_name: - host = _canonicalize_hostname(host) - service = props.service_name + "@" + host - if props.service_realm is not None: - service = service + "@" + props.service_realm - - if password is not None: - if _USE_PRINCIPAL: - # Note that, though we use unquote_plus for unquoting URI - # options, we use quote here. Microsoft's UrlUnescape (used - # by WinKerberos) doesn't support +. - principal = ":".join((quote(username), quote(password))) - result, ctx = kerberos.authGSSClientInit( - service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG - ) - else: - if "@" in username: - user, domain = username.split("@", 1) - else: - user, domain = username, None - result, ctx = kerberos.authGSSClientInit( - service, - gssflags=kerberos.GSS_C_MUTUAL_FLAG, - user=user, - domain=domain, - password=password, - ) - else: - result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) - - if result != kerberos.AUTH_GSS_COMPLETE: - raise OperationFailure("Kerberos context failed to initialize.") - - try: - # pykerberos uses a weird mix of exceptions and return values - # to indicate errors. - # 0 == continue, 1 == complete, -1 == error - # Only authGSSClientStep can return 0. - if kerberos.authGSSClientStep(ctx, "") != 0: - raise OperationFailure("Unknown kerberos failure in step function.") - - # Start a SASL conversation with mongod/s - # Note: pykerberos deals with base64 encoded byte strings. - # Since mongo accepts base64 strings as the payload we don't - # have to use bson.binary.Binary. - payload = kerberos.authGSSClientResponse(ctx) - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", "GSSAPI"), - ("payload", payload), - ("autoAuthorize", 1), - ] - ) - response = sock_info.command("$external", cmd) - - # Limit how many times we loop to catch protocol / library issues - for _ in range(10): - result = kerberos.authGSSClientStep(ctx, str(response["payload"])) - if result == -1: - raise OperationFailure("Unknown kerberos failure in step function.") - - payload = kerberos.authGSSClientResponse(ctx) or "" - - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", response["conversationId"]), - ("payload", payload), - ] - ) - response = sock_info.command("$external", cmd) - - if result == kerberos.AUTH_GSS_COMPLETE: - break - else: - raise OperationFailure("Kerberos authentication failed to complete.") - - # Once the security context is established actually authenticate. - # See RFC 4752, Section 3.1, last two paragraphs. - if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: - raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") - - if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: - raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") - - payload = kerberos.authGSSClientResponse(ctx) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", response["conversationId"]), - ("payload", payload), - ] - ) - sock_info.command("$external", cmd) - - finally: - kerberos.authGSSClientClean(ctx) - - except kerberos.KrbError as exc: - raise OperationFailure(str(exc)) - - -def _authenticate_plain(credentials, sock_info): - """Authenticate using SASL PLAIN (RFC 4616)""" - source = credentials.source - username = credentials.username - password = credentials.password - payload = ("\x00%s\x00%s" % (username, password)).encode("utf-8") - cmd = SON( - [ - ("saslStart", 1), - ("mechanism", "PLAIN"), - ("payload", Binary(payload)), - ("autoAuthorize", 1), - ] - ) - sock_info.command(source, cmd) - - -def _authenticate_x509(credentials, sock_info): - """Authenticate using MONGODB-X509.""" - ctx = sock_info.auth_ctx - if ctx and ctx.speculate_succeeded(): - # MONGODB-X509 is done after the speculative auth step. - return - - cmd = _X509Context(credentials).speculate_command() - sock_info.command("$external", cmd) - - -def _authenticate_mongo_cr(credentials, sock_info): - """Authenticate using MONGODB-CR.""" - source = credentials.source - username = credentials.username - password = credentials.password - # Get a nonce - response = sock_info.command(source, {"getnonce": 1}) - nonce = response["nonce"] - key = _auth_key(nonce, username, password) - - # Actually authenticate - query = SON([("authenticate", 1), ("user", username), ("nonce", nonce), ("key", key)]) - sock_info.command(source, query) - - -def _authenticate_default(credentials, sock_info): - if sock_info.max_wire_version >= 7: - if sock_info.negotiated_mechs: - mechs = sock_info.negotiated_mechs - else: - source = credentials.source - cmd = sock_info.hello_cmd() - cmd["saslSupportedMechs"] = source + "." + credentials.username - mechs = sock_info.command(source, cmd, publish_events=False).get( - "saslSupportedMechs", [] - ) - if "SCRAM-SHA-256" in mechs: - return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-256") - else: - return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") - else: - return _authenticate_scram(credentials, sock_info, "SCRAM-SHA-1") - - -_AUTH_MAP: Mapping[str, Callable] = { - "GSSAPI": _authenticate_gssapi, - "MONGODB-CR": _authenticate_mongo_cr, - "MONGODB-X509": _authenticate_x509, - "MONGODB-AWS": _authenticate_aws, - "PLAIN": _authenticate_plain, - "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), - "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), - "DEFAULT": _authenticate_default, -} - - -class _AuthContext(object): - def __init__(self, credentials): - self.credentials = credentials - self.speculative_authenticate = None - - @staticmethod - def from_credentials(creds): - spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) - if spec_cls: - return spec_cls(creds) - return None - - def speculate_command(self): - raise NotImplementedError - - def parse_response(self, hello): - self.speculative_authenticate = hello.speculative_authenticate - - def speculate_succeeded(self): - return bool(self.speculative_authenticate) - - -class _ScramContext(_AuthContext): - def __init__(self, credentials, mechanism): - super(_ScramContext, self).__init__(credentials) - self.scram_data = None - self.mechanism = mechanism - - def speculate_command(self): - nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) - # The 'db' field is included only on the speculative command. - cmd["db"] = self.credentials.source - # Save for later use. - self.scram_data = (nonce, first_bare) - return cmd - - -class _X509Context(_AuthContext): - def speculate_command(self): - cmd = SON([("authenticate", 1), ("mechanism", "MONGODB-X509")]) - if self.credentials.username is not None: - cmd["user"] = self.credentials.username - return cmd - - -_SPECULATIVE_AUTH_MAP: Mapping[str, Callable] = { - "MONGODB-X509": _X509Context, - "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), - "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), - "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), -} - - -def authenticate(credentials, sock_info): - """Authenticate sock_info.""" - mechanism = credentials.mechanism - auth_func = _AUTH_MAP[mechanism] - auth_func(credentials, sock_info) +__doc__ = original_doc diff --git a/pymongo/auth_aws.py b/pymongo/auth_aws.py deleted file mode 100644 index 4b2af35ea4..0000000000 --- a/pymongo/auth_aws.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MONGODB-AWS Authentication helpers.""" - -try: - import pymongo_auth_aws - from pymongo_auth_aws import AwsCredential, AwsSaslContext, PyMongoAuthAwsError - - _HAVE_MONGODB_AWS = True -except ImportError: - - class AwsSaslContext(object): # type: ignore - def __init__(self, credentials): - pass - - _HAVE_MONGODB_AWS = False - -import bson -from bson.binary import Binary -from bson.son import SON -from pymongo.errors import ConfigurationError, OperationFailure - - -class _AwsSaslContext(AwsSaslContext): # type: ignore - # Dependency injection: - def binary_type(self): - """Return the bson.binary.Binary type.""" - return Binary - - def bson_encode(self, doc): - """Encode a dictionary to BSON.""" - return bson.encode(doc) - - def bson_decode(self, data): - """Decode BSON to a dictionary.""" - return bson.decode(data) - - -def _authenticate_aws(credentials, sock_info): - """Authenticate using MONGODB-AWS.""" - if not _HAVE_MONGODB_AWS: - raise ConfigurationError( - "MONGODB-AWS authentication requires pymongo-auth-aws: " - "install with: python -m pip install 'pymongo[aws]'" - ) - - if sock_info.max_wire_version < 9: - raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") - - try: - ctx = _AwsSaslContext( - AwsCredential( - credentials.username, - credentials.password, - credentials.mechanism_properties.aws_session_token, - ) - ) - client_payload = ctx.step(None) - client_first = SON( - [("saslStart", 1), ("mechanism", "MONGODB-AWS"), ("payload", client_payload)] - ) - server_first = sock_info.command("$external", client_first) - res = server_first - # Limit how many times we loop to catch protocol / library issues - for _ in range(10): - client_payload = ctx.step(res["payload"]) - cmd = SON( - [ - ("saslContinue", 1), - ("conversationId", server_first["conversationId"]), - ("payload", client_payload), - ] - ) - res = sock_info.command("$external", cmd) - if res["done"]: - # SASL complete. - break - except PyMongoAuthAwsError as exc: - # Convert to OperationFailure and include pymongo-auth-aws version. - raise OperationFailure( - "%s (pymongo-auth-aws version %s)" % (exc, pymongo_auth_aws.__version__) - ) diff --git a/pymongo/auth_oidc.py b/pymongo/auth_oidc.py new file mode 100644 index 0000000000..61764b8111 --- /dev/null +++ b/pymongo/auth_oidc.py @@ -0,0 +1,23 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Re-import of synchronous AuthOIDC API for compatibility.""" +from __future__ import annotations + +from pymongo.auth_oidc_shared import * # noqa: F403 +from pymongo.synchronous.auth_oidc import * # noqa: F403 +from pymongo.synchronous.auth_oidc import __doc__ as original_doc + +__doc__ = original_doc +__all__ = ["OIDCCallback", "OIDCCallbackContext", "OIDCCallbackResult", "OIDCIdPInfo"] # noqa: F405 diff --git a/pymongo/auth_oidc_shared.py b/pymongo/auth_oidc_shared.py new file mode 100644 index 0000000000..d33397f52d --- /dev/null +++ b/pymongo/auth_oidc_shared.py @@ -0,0 +1,132 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across OIDC auth implementations.""" +from __future__ import annotations + +import abc +import os +from dataclasses import dataclass, field +from typing import Optional +from urllib.parse import quote + +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response + + +@dataclass +class OIDCIdPInfo: + issuer: str + clientId: Optional[str] = field(default=None) + requestScopes: Optional[list[str]] = field(default=None) + + +@dataclass +class OIDCCallbackContext: + timeout_seconds: float + username: str + version: int + refresh_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + + +@dataclass +class OIDCCallbackResult: + access_token: str + expires_in_seconds: Optional[float] = field(default=None) + refresh_token: Optional[str] = field(default=None) + + +class OIDCCallback(abc.ABC): + """A base class for defining OIDC callbacks.""" + + @abc.abstractmethod + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + """Convert the given BSON value into our own type.""" + + +@dataclass +class _OIDCProperties: + callback: Optional[OIDCCallback] = field(default=None) + human_callback: Optional[OIDCCallback] = field(default=None) + environment: Optional[str] = field(default=None) + allowed_hosts: list[str] = field(default_factory=list) + token_resource: Optional[str] = field(default=None) + username: str = "" + + +"""Mechanism properties for MONGODB-OIDC authentication.""" + +TOKEN_BUFFER_MINUTES = 5 +HUMAN_CALLBACK_TIMEOUT_SECONDS = 5 * 60 +CALLBACK_VERSION = 1 +MACHINE_CALLBACK_TIMEOUT_SECONDS = 60 +TIME_BETWEEN_CALLS_SECONDS = 0.1 + + +class _OIDCTestCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("OIDC_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "test" provider requires "OIDC_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAWSCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + token_file = os.environ.get("AWS_WEB_IDENTITY_TOKEN_FILE") + if not token_file: + raise RuntimeError( + 'MONGODB-OIDC with an "aws" provider requires "AWS_WEB_IDENTITY_TOKEN_FILE" to be set' + ) + with open(token_file) as fid: + return OIDCCallbackResult(access_token=fid.read().strip()) + + +class _OIDCAzureCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_azure_response(self.token_resource, context.username, context.timeout_seconds) + return OIDCCallbackResult( + access_token=resp["access_token"], expires_in_seconds=resp["expires_in"] + ) + + +class _OIDCGCPCallback(OIDCCallback): + def __init__(self, token_resource: str) -> None: + self.token_resource = quote(token_resource) + + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + resp = _get_gcp_response(self.token_resource, context.timeout_seconds) + return OIDCCallbackResult(access_token=resp["access_token"]) + + +class _OIDCK8SCallback(OIDCCallback): + def fetch(self, context: OIDCCallbackContext) -> OIDCCallbackResult: + return OIDCCallbackResult(access_token=_get_k8s_token()) + + +def _get_k8s_token() -> str: + fname = "/var/run/secrets/kubernetes.io/serviceaccount/token" + for key in ["AZURE_FEDERATED_TOKEN_FILE", "AWS_WEB_IDENTITY_TOKEN_FILE"]: + if key in os.environ: + fname = os.environ[key] + with open(fname) as fid: + return fid.read() diff --git a/pymongo/auth_shared.py b/pymongo/auth_shared.py new file mode 100644 index 0000000000..5a9a2b6732 --- /dev/null +++ b/pymongo/auth_shared.py @@ -0,0 +1,254 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants and types shared across multiple auth types.""" +from __future__ import annotations + +import os +import typing +from base64 import standard_b64encode +from collections import namedtuple +from typing import Any, Dict, Mapping, Optional + +from bson import Binary +from pymongo.auth_oidc_shared import ( + _OIDCAzureCallback, + _OIDCGCPCallback, + _OIDCK8SCallback, + _OIDCProperties, + _OIDCTestCallback, +) +from pymongo.errors import ConfigurationError + +MECHANISMS = frozenset( + [ + "GSSAPI", + "MONGODB-OIDC", + "MONGODB-X509", + "MONGODB-AWS", + "PLAIN", + "SCRAM-SHA-1", + "SCRAM-SHA-256", + "DEFAULT", + ] +) +"""The authentication mechanisms supported by PyMongo.""" + + +class _Cache: + __slots__ = ("data",) + + _hash_val = hash("_Cache") + + def __init__(self) -> None: + self.data = None + + def __eq__(self, other: object) -> bool: + # Two instances must always compare equal. + if isinstance(other, _Cache): + return True + return NotImplemented + + def __ne__(self, other: object) -> bool: + if isinstance(other, _Cache): + return False + return NotImplemented + + def __hash__(self) -> int: + return self._hash_val + + +MongoCredential = namedtuple( + "MongoCredential", + ["mechanism", "source", "username", "password", "mechanism_properties", "cache"], +) +"""A hashable namedtuple of values used for authentication.""" + + +GSSAPIProperties = namedtuple( + "GSSAPIProperties", ["service_name", "canonicalize_host_name", "service_realm", "service_host"] +) +"""Mechanism properties for GSSAPI authentication.""" + + +_AWSProperties = namedtuple("_AWSProperties", ["aws_session_token"]) +"""Mechanism properties for MONGODB-AWS authentication.""" + + +def _validate_canonicalize_host_name(value: str | bool) -> str | bool: + valid_names = [False, True, "none", "forward", "forwardAndReverse"] + if value in ["true", "false", True, False]: + return value in ["true", True] + + if value not in valid_names: + raise ValueError(f"CANONICALIZE_HOST_NAME '{value}' not in valid options: {valid_names}") + return value + + +def _build_credentials_tuple( + mech: str, + source: Optional[str], + user: Optional[str], + passwd: Optional[str], + extra: Mapping[str, Any], + database: Optional[str], +) -> MongoCredential: + """Build and return a mechanism specific credentials tuple.""" + if mech not in ("MONGODB-X509", "MONGODB-AWS", "MONGODB-OIDC") and user is None: + raise ConfigurationError(f"{mech} requires a username") + if mech == "GSSAPI": + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for GSSAPI") + properties = extra.get("authmechanismproperties", {}) + service_name = properties.get("SERVICE_NAME", "mongodb") + service_host = properties.get("SERVICE_HOST", None) + canonicalize = properties.get("CANONICALIZE_HOST_NAME", "false") + canonicalize = _validate_canonicalize_host_name(canonicalize) + service_realm = properties.get("SERVICE_REALM") + props = GSSAPIProperties( + service_name=service_name, + canonicalize_host_name=canonicalize, + service_realm=service_realm, + service_host=service_host, + ) + # Source is always $external. + return MongoCredential(mech, "$external", user, passwd, props, None) + elif mech == "MONGODB-X509": + if passwd is not None: + raise ConfigurationError("Passwords are not supported by MONGODB-X509") + if source is not None and source != "$external": + raise ValueError("authentication source must be $external or None for MONGODB-X509") + # Source is always $external, user can be None. + return MongoCredential(mech, "$external", user, None, None, None) + elif mech == "MONGODB-AWS": + if user is not None and passwd is None: + raise ConfigurationError("username without a password is not supported by MONGODB-AWS") + if source is not None and source != "$external": + raise ConfigurationError( + "authentication source must be $external or None for MONGODB-AWS" + ) + + properties = extra.get("authmechanismproperties", {}) + aws_session_token = properties.get("AWS_SESSION_TOKEN") + aws_props = _AWSProperties(aws_session_token=aws_session_token) + # user can be None for temporary link-local EC2 credentials. + return MongoCredential(mech, "$external", user, passwd, aws_props, None) + elif mech == "MONGODB-OIDC": + properties = extra.get("authmechanismproperties", {}) + callback = properties.get("OIDC_CALLBACK") + human_callback = properties.get("OIDC_HUMAN_CALLBACK") + environ = properties.get("ENVIRONMENT") + token_resource = properties.get("TOKEN_RESOURCE", "") + default_allowed = [ + "*.mongodb.net", + "*.mongodb-dev.net", + "*.mongodb-qa.net", + "*.mongodbgov.net", + "localhost", + "127.0.0.1", + "::1", + ] + allowed_hosts = properties.get("ALLOWED_HOSTS", default_allowed) + if properties.get("ALLOWED_HOSTS", None) is not None and human_callback is None: + raise ConfigurationError("ALLOWED_HOSTS is only valid with OIDC_HUMAN_CALLBACK") + msg = ( + "authentication with MONGODB-OIDC requires providing either a callback or a environment" + ) + if passwd is not None: + msg = "password is not supported by MONGODB-OIDC" + raise ConfigurationError(msg) + if callback or human_callback: + if environ is not None: + raise ConfigurationError(msg) + if callback and human_callback: + msg = "cannot set both OIDC_CALLBACK and OIDC_HUMAN_CALLBACK" + raise ConfigurationError(msg) + elif environ is not None: + if environ == "test": + if user is not None: + msg = "test environment for MONGODB-OIDC does not support username" + raise ConfigurationError(msg) + callback = _OIDCTestCallback() + elif environ == "azure": + passwd = None + if not token_resource: + raise ConfigurationError( + "Azure environment for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCAzureCallback(token_resource) + elif environ == "gcp": + passwd = None + if not token_resource: + raise ConfigurationError( + "GCP provider for MONGODB-OIDC requires a TOKEN_RESOURCE auth mechanism property" + ) + callback = _OIDCGCPCallback(token_resource) + elif environ == "k8s": + passwd = None + callback = _OIDCK8SCallback() + else: + raise ConfigurationError(f"unrecognized ENVIRONMENT for MONGODB-OIDC: {environ}") + else: + raise ConfigurationError(msg) + + oidc_props = _OIDCProperties( + callback=callback, + human_callback=human_callback, + environment=environ, + allowed_hosts=allowed_hosts, + token_resource=token_resource, + username=user or "", + ) + return MongoCredential(mech, "$external", user, passwd, oidc_props, _Cache()) + + elif mech == "PLAIN": + source_database = source or database or "$external" + return MongoCredential(mech, source_database, user, passwd, None, None) + else: + source_database = source or database or "admin" + if passwd is None: + raise ConfigurationError("A password is required") + return MongoCredential(mech, source_database, user, passwd, None, _Cache()) + + +def _xor(fir: bytes, sec: bytes) -> bytes: + """XOR two byte strings together.""" + return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)]) + + +def _parse_scram_response(response: bytes) -> Dict[bytes, bytes]: + """Split a scram response into key, value pairs.""" + return dict( + typing.cast(typing.Tuple[bytes, bytes], item.split(b"=", 1)) + for item in response.split(b",") + ) + + +def _authenticate_scram_start( + credentials: MongoCredential, mechanism: str +) -> tuple[bytes, bytes, typing.MutableMapping[str, Any]]: + username = credentials.username + user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C") + nonce = standard_b64encode(os.urandom(32)) + first_bare = b"n=" + user + b",r=" + nonce + + cmd = { + "saslStart": 1, + "mechanism": mechanism, + "payload": Binary(b"n,," + first_bare), + "autoAuthorize": 1, + "options": {"skipEmptyExchange": True}, + } + return nonce, first_bare, cmd diff --git a/pymongo/bulk.py b/pymongo/bulk.py deleted file mode 100644 index b21b576aa5..0000000000 --- a/pymongo/bulk.py +++ /dev/null @@ -1,513 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""The bulk write operations interface. - -.. versionadded:: 2.7 -""" -import copy -from itertools import islice -from typing import Any, NoReturn - -from bson.objectid import ObjectId -from bson.raw_bson import RawBSONDocument -from bson.son import SON -from pymongo import _csot, common -from pymongo.client_session import _validate_session_write_concern -from pymongo.collation import validate_collation_or_none -from pymongo.common import ( - validate_is_document_type, - validate_ok_for_replace, - validate_ok_for_update, -) -from pymongo.errors import ( - BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure, -) -from pymongo.helpers import _RETRYABLE_ERROR_CODES, _get_wce_doc -from pymongo.message import ( - _DELETE, - _INSERT, - _UPDATE, - _BulkWriteContext, - _EncryptedBulkWriteContext, - _randint, -) -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern - -_DELETE_ALL = 0 -_DELETE_ONE = 1 - -# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err -_BAD_VALUE = 2 -_UNKNOWN_ERROR = 8 -_WRITE_CONCERN_ERROR = 64 - -_COMMANDS = ("insert", "update", "delete") - - -class _Run(object): - """Represents a batch of write operations.""" - - def __init__(self, op_type): - """Initialize a new Run object.""" - self.op_type = op_type - self.index_map = [] - self.ops = [] - self.idx_offset = 0 - - def index(self, idx): - """Get the original index of an operation in this run. - - :Parameters: - - `idx`: The Run index that maps to the original index. - """ - return self.index_map[idx] - - def add(self, original_index, operation): - """Add an operation to this Run instance. - - :Parameters: - - `original_index`: The original index of this operation - within a larger bulk operation. - - `operation`: The operation document. - """ - self.index_map.append(original_index) - self.ops.append(operation) - - -def _merge_command(run, full_result, offset, result): - """Merge a write command result into the full bulk result.""" - affected = result.get("n", 0) - - if run.op_type == _INSERT: - full_result["nInserted"] += affected - - elif run.op_type == _DELETE: - full_result["nRemoved"] += affected - - elif run.op_type == _UPDATE: - upserted = result.get("upserted") - if upserted: - n_upserted = len(upserted) - for doc in upserted: - doc["index"] = run.index(doc["index"] + offset) - full_result["upserted"].extend(upserted) - full_result["nUpserted"] += n_upserted - full_result["nMatched"] += affected - n_upserted - else: - full_result["nMatched"] += affected - full_result["nModified"] += result["nModified"] - - write_errors = result.get("writeErrors") - if write_errors: - for doc in write_errors: - # Leave the server response intact for APM. - replacement = doc.copy() - idx = doc["index"] + offset - replacement["index"] = run.index(idx) - # Add the failed operation to the error document. - replacement["op"] = run.ops[idx] - full_result["writeErrors"].append(replacement) - - wce = _get_wce_doc(result) - if wce: - full_result["writeConcernErrors"].append(wce) - - -def _raise_bulk_write_error(full_result: Any) -> NoReturn: - """Raise a BulkWriteError from the full bulk api result.""" - if full_result["writeErrors"]: - full_result["writeErrors"].sort(key=lambda error: error["index"]) - raise BulkWriteError(full_result) - - -class _Bulk(object): - """The private guts of the bulk write API.""" - - def __init__(self, collection, ordered, bypass_document_validation, comment=None, let=None): - """Initialize a _Bulk instance.""" - self.collection = collection.with_options( - codec_options=collection.codec_options._replace( - unicode_decode_error_handler="replace", document_class=dict - ) - ) - self.let = let - if self.let is not None: - common.validate_is_document_type("let", self.let) - self.comment = comment - self.ordered = ordered - self.ops = [] - self.executed = False - self.bypass_doc_val = bypass_document_validation - self.uses_collation = False - self.uses_array_filters = False - self.uses_hint_update = False - self.uses_hint_delete = False - self.is_retryable = True - self.retrying = False - self.started_retryable_write = False - # Extra state so that we know where to pick up on a retry attempt. - self.current_run = None - self.next_run = None - - @property - def bulk_ctx_class(self): - encrypter = self.collection.database.client._encrypter - if encrypter and not encrypter._bypass_auto_encryption: - return _EncryptedBulkWriteContext - else: - return _BulkWriteContext - - def add_insert(self, document): - """Add an insert document to the list of ops.""" - validate_is_document_type("document", document) - # Generate ObjectId client side. - if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() - self.ops.append((_INSERT, document)) - - def add_update( - self, - selector, - update, - multi=False, - upsert=False, - collation=None, - array_filters=None, - hint=None, - ): - """Create an update document and add it to the list of ops.""" - validate_ok_for_update(update) - cmd = SON([("q", selector), ("u", update), ("multi", multi), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) - if collation is not None: - self.uses_collation = True - cmd["collation"] = collation - if array_filters is not None: - self.uses_array_filters = True - cmd["arrayFilters"] = array_filters - if hint is not None: - self.uses_hint_update = True - cmd["hint"] = hint - if multi: - # A bulk_write containing an update_many is not retryable. - self.is_retryable = False - self.ops.append((_UPDATE, cmd)) - - def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): - """Create a replace document and add it to the list of ops.""" - validate_ok_for_replace(replacement) - cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) - collation = validate_collation_or_none(collation) - if collation is not None: - self.uses_collation = True - cmd["collation"] = collation - if hint is not None: - self.uses_hint_update = True - cmd["hint"] = hint - self.ops.append((_UPDATE, cmd)) - - def add_delete(self, selector, limit, collation=None, hint=None): - """Create a delete document and add it to the list of ops.""" - cmd = SON([("q", selector), ("limit", limit)]) - collation = validate_collation_or_none(collation) - if collation is not None: - self.uses_collation = True - cmd["collation"] = collation - if hint is not None: - self.uses_hint_delete = True - cmd["hint"] = hint - if limit == _DELETE_ALL: - # A bulk_write containing a delete_many is not retryable. - self.is_retryable = False - self.ops.append((_DELETE, cmd)) - - def gen_ordered(self): - """Generate batches of operations, batched by type of - operation, in the order **provided**. - """ - run = None - for idx, (op_type, operation) in enumerate(self.ops): - if run is None: - run = _Run(op_type) - elif run.op_type != op_type: - yield run - run = _Run(op_type) - run.add(idx, operation) - yield run - - def gen_unordered(self): - """Generate batches of operations, batched by type of - operation, in arbitrary order. - """ - operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] - for idx, (op_type, operation) in enumerate(self.ops): - operations[op_type].add(idx, operation) - - for run in operations: - if run.ops: - yield run - - def _execute_command( - self, - generator, - write_concern, - session, - sock_info, - op_id, - retryable, - full_result, - final_write_concern=None, - ): - db_name = self.collection.database.name - client = self.collection.database.client - listeners = client._event_listeners - - if not self.current_run: - self.current_run = next(generator) - self.next_run = None - run = self.current_run - - # sock_info.command validates the session, but we use - # sock_info.write_command. - sock_info.validate_session(client, session) - last_run = False - - while run: - if not self.retrying: - self.next_run = next(generator, None) - if self.next_run is None: - last_run = True - - cmd_name = _COMMANDS[run.op_type] - bwc = self.bulk_ctx_class( - db_name, - cmd_name, - sock_info, - op_id, - listeners, - session, - run.op_type, - self.collection.codec_options, - ) - - while run.idx_offset < len(run.ops): - # If this is the last possible operation, use the - # final write concern. - if last_run and (len(run.ops) - run.idx_offset) == 1: - write_concern = final_write_concern or write_concern - - cmd = SON([(cmd_name, self.collection.name), ("ordered", self.ordered)]) - if self.comment: - cmd["comment"] = self.comment - _csot.apply_write_concern(cmd, write_concern) - if self.bypass_doc_val: - cmd["bypassDocumentValidation"] = True - if self.let is not None and run.op_type in (_DELETE, _UPDATE): - cmd["let"] = self.let - if session: - # Start a new retryable write unless one was already - # started for this command. - if retryable and not self.started_retryable_write: - session._start_retryable_write() - self.started_retryable_write = True - session._apply_to(cmd, retryable, ReadPreference.PRIMARY, sock_info) - sock_info.send_cluster_time(cmd, session, client) - sock_info.add_server_api(cmd) - # CSOT: apply timeout before encoding the command. - sock_info.apply_timeout(client, cmd) - ops = islice(run.ops, run.idx_offset, None) - - # Run as many ops as possible in one command. - if write_concern.acknowledged: - result, to_send = bwc.execute(cmd, ops, client) - - # Retryable writeConcernErrors halt the execution of this run. - wce = result.get("writeConcernError", {}) - if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: - # Synthesize the full bulk result without modifying the - # current one because this write operation may be retried. - full = copy.deepcopy(full_result) - _merge_command(run, full, run.idx_offset, result) - _raise_bulk_write_error(full) - - _merge_command(run, full_result, run.idx_offset, result) - - # We're no longer in a retry once a command succeeds. - self.retrying = False - self.started_retryable_write = False - - if self.ordered and "writeErrors" in result: - break - else: - to_send = bwc.execute_unack(cmd, ops, client) - - run.idx_offset += len(to_send) - - # We're supposed to continue if errors are - # at the write concern level (e.g. wtimeout) - if self.ordered and full_result["writeErrors"]: - break - # Reset our state - self.current_run = run = self.next_run - - def execute_command(self, generator, write_concern, session): - """Execute using write commands.""" - # nModified is only reported for write commands, not legacy ops. - full_result = { - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nModified": 0, - "nRemoved": 0, - "upserted": [], - } - op_id = _randint() - - def retryable_bulk(session, sock_info, retryable): - self._execute_command( - generator, write_concern, session, sock_info, op_id, retryable, full_result - ) - - client = self.collection.database.client - with client._tmp_session(session) as s: - client._retry_with_session(self.is_retryable, retryable_bulk, s, self) - - if full_result["writeErrors"] or full_result["writeConcernErrors"]: - _raise_bulk_write_error(full_result) - return full_result - - def execute_op_msg_no_results(self, sock_info, generator): - """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" - db_name = self.collection.database.name - client = self.collection.database.client - listeners = client._event_listeners - op_id = _randint() - - if not self.current_run: - self.current_run = next(generator) - run = self.current_run - - while run: - cmd_name = _COMMANDS[run.op_type] - bwc = self.bulk_ctx_class( - db_name, - cmd_name, - sock_info, - op_id, - listeners, - None, - run.op_type, - self.collection.codec_options, - ) - - while run.idx_offset < len(run.ops): - cmd = SON( - [ - (cmd_name, self.collection.name), - ("ordered", False), - ("writeConcern", {"w": 0}), - ] - ) - sock_info.add_server_api(cmd) - ops = islice(run.ops, run.idx_offset, None) - # Run as many ops as possible. - to_send = bwc.execute_unack(cmd, ops, client) - run.idx_offset += len(to_send) - self.current_run = run = next(generator, None) - - def execute_command_no_results(self, sock_info, generator, write_concern): - """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" - full_result = { - "writeErrors": [], - "writeConcernErrors": [], - "nInserted": 0, - "nUpserted": 0, - "nMatched": 0, - "nModified": 0, - "nRemoved": 0, - "upserted": [], - } - # Ordered bulk writes have to be acknowledged so that we stop - # processing at the first error, even when the application - # specified unacknowledged writeConcern. - initial_write_concern = WriteConcern() - op_id = _randint() - try: - self._execute_command( - generator, - initial_write_concern, - None, - sock_info, - op_id, - False, - full_result, - write_concern, - ) - except OperationFailure: - pass - - def execute_no_results(self, sock_info, generator, write_concern): - """Execute all operations, returning no results (w=0).""" - if self.uses_collation: - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - if self.uses_array_filters: - raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") - # Guard against unsupported unacknowledged writes. - unack = write_concern and not write_concern.acknowledged - if unack and self.uses_hint_delete and sock_info.max_wire_version < 9: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." - ) - if unack and self.uses_hint_update and sock_info.max_wire_version < 8: - raise ConfigurationError( - "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." - ) - # Cannot have both unacknowledged writes and bypass document validation. - if self.bypass_doc_val: - raise OperationFailure( - "Cannot set bypass_document_validation with unacknowledged write concern" - ) - - if self.ordered: - return self.execute_command_no_results(sock_info, generator, write_concern) - return self.execute_op_msg_no_results(sock_info, generator) - - def execute(self, write_concern, session): - """Execute operations.""" - if not self.ops: - raise InvalidOperation("No operations to execute") - if self.executed: - raise InvalidOperation("Bulk operations can only be executed once.") - self.executed = True - write_concern = write_concern or self.collection.write_concern - session = _validate_session_write_concern(session, write_concern) - - if self.ordered: - generator = self.gen_ordered() - else: - generator = self.gen_unordered() - - client = self.collection.database.client - if not write_concern.acknowledged: - with client._socket_for_writes(session) as sock_info: - self.execute_no_results(sock_info, generator, write_concern) - else: - return self.execute_command(generator, write_concern, session) diff --git a/pymongo/bulk_shared.py b/pymongo/bulk_shared.py new file mode 100644 index 0000000000..9276419d8a --- /dev/null +++ b/pymongo/bulk_shared.py @@ -0,0 +1,131 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, types, and classes shared across Bulk Write API implementations.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, NoReturn + +from pymongo.errors import BulkWriteError, OperationFailure +from pymongo.helpers_shared import _get_wce_doc +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, +) + +if TYPE_CHECKING: + from pymongo.typings import _DocumentOut + + +_DELETE_ALL: int = 0 +_DELETE_ONE: int = 1 + +# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err +_BAD_VALUE: int = 2 +_UNKNOWN_ERROR: int = 8 +_WRITE_CONCERN_ERROR: int = 64 + +_COMMANDS: tuple[str, str, str] = ("insert", "update", "delete") + + +class _Run: + """Represents a batch of write operations.""" + + def __init__(self, op_type: int) -> None: + """Initialize a new Run object.""" + self.op_type: int = op_type + self.index_map: list[int] = [] + self.ops: list[Any] = [] + self.idx_offset: int = 0 + + def index(self, idx: int) -> int: + """Get the original index of an operation in this run. + + :param idx: The Run index that maps to the original index. + """ + return self.index_map[idx] + + def add(self, original_index: int, operation: Any) -> None: + """Add an operation to this Run instance. + + :param original_index: The original index of this operation + within a larger bulk operation. + :param operation: The operation document. + """ + self.index_map.append(original_index) + self.ops.append(operation) + + +def _merge_command( + run: _Run, + full_result: MutableMapping[str, Any], + offset: int, + result: Mapping[str, Any], +) -> None: + """Merge a write command result into the full bulk result.""" + affected = result.get("n", 0) + + if run.op_type == _INSERT: + full_result["nInserted"] += affected + + elif run.op_type == _DELETE: + full_result["nRemoved"] += affected + + elif run.op_type == _UPDATE: + upserted = result.get("upserted") + if upserted: + n_upserted = len(upserted) + for doc in upserted: + doc["index"] = run.index(doc["index"] + offset) + full_result["upserted"].extend(upserted) + full_result["nUpserted"] += n_upserted + full_result["nMatched"] += affected - n_upserted + else: + full_result["nMatched"] += affected + full_result["nModified"] += result["nModified"] + + write_errors = result.get("writeErrors") + if write_errors: + for doc in write_errors: + # Leave the server response intact for APM. + replacement = doc.copy() + idx = doc["index"] + offset + replacement["index"] = run.index(idx) + # Add the failed operation to the error document. + replacement["op"] = run.ops[idx] + full_result["writeErrors"].append(replacement) + + wce = _get_wce_doc(result) + if wce: + full_result["writeConcernErrors"].append(wce) + + +def _raise_bulk_write_error(full_result: _DocumentOut) -> NoReturn: + """Raise a BulkWriteError from the full bulk api result.""" + # retryWrites on MMAPv1 should raise an actionable error. + if full_result["writeErrors"]: + full_result["writeErrors"].sort(key=lambda error: error["index"]) + err = full_result["writeErrors"][0] + code = err["code"] + msg = err["errmsg"] + if code == 20 and msg.startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, full_result) + raise BulkWriteError(full_result) diff --git a/pymongo/change_stream.py b/pymongo/change_stream.py index 80820dff91..f9abddec44 100644 --- a/pymongo/change_stream.py +++ b/pymongo/change_stream.py @@ -1,457 +1,22 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Watch changes on a collection, a database, or the entire cluster.""" +"""Re-import of synchronous ChangeStream API for compatibility.""" +from __future__ import annotations -import copy -from typing import TYPE_CHECKING, Any, Dict, Generic, Mapping, Optional, Union +from pymongo.synchronous.change_stream import * # noqa: F403 +from pymongo.synchronous.change_stream import __doc__ as original_doc -from bson import _bson_to_dict -from bson.raw_bson import RawBSONDocument -from bson.timestamp import Timestamp -from pymongo import _csot, common -from pymongo.aggregation import ( - _CollectionAggregationCommand, - _DatabaseAggregationCommand, -) -from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor -from pymongo.errors import ( - ConnectionFailure, - CursorNotFound, - InvalidOperation, - OperationFailure, - PyMongoError, -) -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline - -# The change streams spec considers the following server errors from the -# getMore command non-resumable. All other getMore errors are resumable. -_RESUMABLE_GETMORE_ERRORS = frozenset( - [ - 6, # HostUnreachable - 7, # HostNotFound - 89, # NetworkTimeout - 91, # ShutdownInProgress - 189, # PrimarySteppedDown - 262, # ExceededTimeLimit - 9001, # SocketException - 10107, # NotWritablePrimary - 11600, # InterruptedAtShutdown - 11602, # InterruptedDueToReplStateChange - 13435, # NotPrimaryNoSecondaryOk - 13436, # NotPrimaryOrSecondary - 63, # StaleShardVersion - 150, # StaleEpoch - 13388, # StaleConfig - 234, # RetryChangeStream - 133, # FailedToSatisfyReadPreference - ] -) - - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - from pymongo.database import Database - from pymongo.mongo_client import MongoClient - - -class ChangeStream(Generic[_DocumentType]): - """The internal abstract base class for change stream cursors. - - Should not be called directly by application developers. Use - :meth:`pymongo.collection.Collection.watch`, - :meth:`pymongo.database.Database.watch`, or - :meth:`pymongo.mongo_client.MongoClient.watch` instead. - - .. versionadded:: 3.6 - .. seealso:: The MongoDB documentation on `changeStreams `_. - """ - - def __init__( - self, - target: Union[ - "MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]" - ], - pipeline: Optional[_Pipeline], - full_document: Optional[str], - resume_after: Optional[Mapping[str, Any]], - max_await_time_ms: Optional[int], - batch_size: Optional[int], - collation: Optional[_CollationIn], - start_at_operation_time: Optional[Timestamp], - session: Optional["ClientSession"], - start_after: Optional[Mapping[str, Any]], - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - ) -> None: - if pipeline is None: - pipeline = [] - pipeline = common.validate_list("pipeline", pipeline) - common.validate_string_or_none("full_document", full_document) - validate_collation_or_none(collation) - common.validate_non_negative_integer_or_none("batchSize", batch_size) - - self._decode_custom = False - self._orig_codec_options = target.codec_options - if target.codec_options.type_registry._decoder_map: - self._decode_custom = True - # Keep the type registry so that we support encoding custom types - # in the pipeline. - self._target = target.with_options( # type: ignore - codec_options=target.codec_options.with_options(document_class=RawBSONDocument) - ) - else: - self._target = target - - self._pipeline = copy.deepcopy(pipeline) - self._full_document = full_document - self._full_document_before_change = full_document_before_change - self._uses_start_after = start_after is not None - self._uses_resume_after = resume_after is not None - self._resume_token = copy.deepcopy(start_after or resume_after) - self._max_await_time_ms = max_await_time_ms - self._batch_size = batch_size - self._collation = collation - self._start_at_operation_time = start_at_operation_time - self._session = session - self._comment = comment - self._closed = False - self._timeout = self._target._timeout - # Initialize cursor. - self._cursor = self._create_cursor() - - @property - def _aggregation_command_class(self): - """The aggregation command class to be used.""" - raise NotImplementedError - - @property - def _client(self): - """The client against which the aggregation commands for - this ChangeStream will be run.""" - raise NotImplementedError - - def _change_stream_options(self): - """Return the options dict for the $changeStream pipeline stage.""" - options: Dict[str, Any] = {} - if self._full_document is not None: - options["fullDocument"] = self._full_document - - if self._full_document_before_change is not None: - options["fullDocumentBeforeChange"] = self._full_document_before_change - - resume_token = self.resume_token - if resume_token is not None: - if self._uses_start_after: - options["startAfter"] = resume_token - else: - options["resumeAfter"] = resume_token - - if self._start_at_operation_time is not None: - options["startAtOperationTime"] = self._start_at_operation_time - return options - - def _command_options(self): - """Return the options dict for the aggregation command.""" - options = {} - if self._max_await_time_ms is not None: - options["maxAwaitTimeMS"] = self._max_await_time_ms - if self._batch_size is not None: - options["batchSize"] = self._batch_size - return options - - def _aggregation_pipeline(self): - """Return the full aggregation pipeline for this ChangeStream.""" - options = self._change_stream_options() - full_pipeline: list = [{"$changeStream": options}] - full_pipeline.extend(self._pipeline) - return full_pipeline - - def _process_result(self, result, sock_info): - """Callback that caches the postBatchResumeToken or - startAtOperationTime from a changeStream aggregate command response - containing an empty batch of change documents. - - This is implemented as a callback because we need access to the wire - version in order to determine whether to cache this value. - """ - if not result["cursor"]["firstBatch"]: - if "postBatchResumeToken" in result["cursor"]: - self._resume_token = result["cursor"]["postBatchResumeToken"] - elif ( - self._start_at_operation_time is None - and self._uses_resume_after is False - and self._uses_start_after is False - and sock_info.max_wire_version >= 7 - ): - self._start_at_operation_time = result.get("operationTime") - # PYTHON-2181: informative error on missing operationTime. - if self._start_at_operation_time is None: - raise OperationFailure( - "Expected field 'operationTime' missing from command " - "response : %r" % (result,) - ) - - def _run_aggregation_cmd(self, session, explicit_session): - """Run the full aggregation pipeline for this ChangeStream and return - the corresponding CommandCursor. - """ - cmd = self._aggregation_command_class( - self._target, - CommandCursor, - self._aggregation_pipeline(), - self._command_options(), - explicit_session, - result_processor=self._process_result, - comment=self._comment, - ) - return self._client._retryable_read( - cmd.get_cursor, self._target._read_preference_for(session), session - ) - - def _create_cursor(self): - with self._client._tmp_session(self._session, close=False) as s: - return self._run_aggregation_cmd(session=s, explicit_session=self._session is not None) - - def _resume(self): - """Reestablish this change stream after a resumable error.""" - try: - self._cursor.close() - except PyMongoError: - pass - self._cursor = self._create_cursor() - - def close(self) -> None: - """Close this ChangeStream.""" - self._closed = True - self._cursor.close() - - def __iter__(self) -> "ChangeStream[_DocumentType]": - return self - - @property - def resume_token(self) -> Optional[Mapping[str, Any]]: - """The cached resume token that will be used to resume after the most - recently returned change. - - .. versionadded:: 3.9 - """ - return copy.deepcopy(self._resume_token) - - @_csot.apply - def next(self) -> _DocumentType: - """Advance the cursor. - - This method blocks until the next change document is returned or an - unrecoverable error is raised. This method is used when iterating over - all changes in the cursor. For example:: - - try: - resume_token = None - pipeline = [{'$match': {'operationType': 'insert'}}] - with db.collection.watch(pipeline) as stream: - for insert_change in stream: - print(insert_change) - resume_token = stream.resume_token - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - if resume_token is None: - # There is no usable resume token because there was a - # failure during ChangeStream initialization. - logging.error('...') - else: - # Use the interrupted ChangeStream's resume token to create - # a new ChangeStream. The new stream will continue from the - # last seen insert change without missing any events. - with db.collection.watch( - pipeline, resume_after=resume_token) as stream: - for insert_change in stream: - print(insert_change) - - Raises :exc:`StopIteration` if this ChangeStream is closed. - """ - while self.alive: - doc = self.try_next() - if doc is not None: - return doc - - raise StopIteration - - __next__ = next - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise - :exc:`StopIteration` and :meth:`try_next` can return ``None``. - - .. versionadded:: 3.8 - """ - return not self._closed - - @_csot.apply - def try_next(self) -> Optional[_DocumentType]: - """Advance the cursor without blocking indefinitely. - - This method returns the next change document without waiting - indefinitely for the next change. For example:: - - with db.collection.watch() as stream: - while stream.alive: - change = stream.try_next() - # Note that the ChangeStream's resume token may be updated - # even when no changes are returned. - print("Current resume token: %r" % (stream.resume_token,)) - if change is not None: - print("Change document: %r" % (change,)) - continue - # We end up here when there are no recent changes. - # Sleep for a while before trying again to avoid flooding - # the server with getMore requests when no changes are - # available. - time.sleep(10) - - If no change document is cached locally then this method runs a single - getMore command. If the getMore yields any documents, the next - document is returned, otherwise, if the getMore returns no documents - (because there have been no changes) then ``None`` is returned. - - :Returns: - The next change document or ``None`` when no document is available - after running a single getMore or when the cursor is closed. - - .. versionadded:: 3.8 - """ - if not self._closed and not self._cursor.alive: - self._resume() - - # Attempt to get the next change with at most one getMore and at most - # one resume attempt. - try: - change = self._cursor._try_next(True) - except (ConnectionFailure, CursorNotFound): - self._resume() - change = self._cursor._try_next(False) - except OperationFailure as exc: - if exc._max_wire_version is None: - raise - is_resumable = ( - exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") - ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) - if not is_resumable: - raise - self._resume() - change = self._cursor._try_next(False) - - # Check if the cursor was invalidated. - if not self._cursor.alive: - self._closed = True - - # If no changes are available. - if change is None: - # We have either iterated over all documents in the cursor, - # OR the most-recently returned batch is empty. In either case, - # update the cached resume token with the postBatchResumeToken if - # one was returned. We also clear the startAtOperationTime. - if self._cursor._post_batch_resume_token is not None: - self._resume_token = self._cursor._post_batch_resume_token - self._start_at_operation_time = None - return change - - # Else, changes are available. - try: - resume_token = change["_id"] - except KeyError: - self.close() - raise InvalidOperation( - "Cannot provide resume functionality when the resume token is missing." - ) - - # If this is the last change document from the current batch, cache the - # postBatchResumeToken. - if not self._cursor._has_next() and self._cursor._post_batch_resume_token: - resume_token = self._cursor._post_batch_resume_token - - # Hereafter, don't use startAfter; instead use resumeAfter. - self._uses_start_after = False - self._uses_resume_after = True - - # Cache the resume token and clear startAtOperationTime. - self._resume_token = resume_token - self._start_at_operation_time = None - - if self._decode_custom: - return _bson_to_dict(change.raw, self._orig_codec_options) - return change - - def __enter__(self) -> "ChangeStream": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - -class CollectionChangeStream(ChangeStream, Generic[_DocumentType]): - """A change stream that watches changes on a single collection. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.collection.Collection.watch` instead. - - .. versionadded:: 3.7 - """ - - @property - def _aggregation_command_class(self): - return _CollectionAggregationCommand - - @property - def _client(self): - return self._target.database.client - - -class DatabaseChangeStream(ChangeStream, Generic[_DocumentType]): - """A change stream that watches changes on all collections in a database. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.database.Database.watch` instead. - - .. versionadded:: 3.7 - """ - - @property - def _aggregation_command_class(self): - return _DatabaseAggregationCommand - - @property - def _client(self): - return self._target.client - - -class ClusterChangeStream(DatabaseChangeStream, Generic[_DocumentType]): - """A change stream that watches changes on all collections in the cluster. - - Should not be called directly by application developers. Use - helper method :meth:`pymongo.mongo_client.MongoClient.watch` instead. - - .. versionadded:: 3.7 - """ - - def _change_stream_options(self): - options = super(ClusterChangeStream, self)._change_stream_options() - options["allChangesForCluster"] = True - return options +__doc__ = original_doc +__all__ = ["ChangeStream", "ClusterChangeStream", "CollectionChangeStream", "DatabaseChangeStream"] # noqa: F405 diff --git a/pymongo/client_options.py b/pymongo/client_options.py index 882474e258..8b4eea7e65 100644 --- a/pymongo/client_options.py +++ b/pymongo/client_options.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,35 +12,52 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Tools to parse mongo client options.""" +"""Tools to parse mongo client options. -from typing import Optional +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, cast from bson.codec_options import _parse_codec_options from pymongo import common -from pymongo.auth import _build_credentials_tuple -from pymongo.common import validate_boolean from pymongo.compression_support import CompressionSettings from pymongo.errors import ConfigurationError -from pymongo.monitoring import _EventListeners -from pymongo.pool import PoolOptions +from pymongo.monitoring import _EventListener, _EventListeners +from pymongo.pool_options import PoolOptions from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +from pymongo.read_preferences import ( + _ServerMode, + make_read_preference, + read_pref_mode_from_name, +) from pymongo.server_selectors import any_server_selector from pymongo.ssl_support import get_ssl_context -from pymongo.write_concern import WriteConcern +from pymongo.write_concern import WriteConcern, validate_boolean + +if TYPE_CHECKING: + from bson.codec_options import CodecOptions + from pymongo.auth_shared import MongoCredential + from pymongo.encryption_options import AutoEncryptionOpts + from pymongo.pyopenssl_context import SSLContext + from pymongo.topology_description import _ServerSelector -def _parse_credentials(username, password, database, options): +def _parse_credentials( + username: str, password: str, database: Optional[str], options: Mapping[str, Any] +) -> Optional[MongoCredential]: """Parse authentication credentials.""" mechanism = options.get("authmechanism", "DEFAULT" if username else None) source = options.get("authsource") if username or mechanism: + from pymongo.auth_shared import _build_credentials_tuple + return _build_credentials_tuple(mechanism, source, username, password, options, database) return None -def _parse_read_preference(options): +def _parse_read_preference(options: Mapping[str, Any]) -> _ServerMode: """Parse read preference options.""" if "read_preference" in options: return options["read_preference"] @@ -52,7 +69,7 @@ def _parse_read_preference(options): return make_read_preference(mode, tags, max_staleness) -def _parse_write_concern(options): +def _parse_write_concern(options: Mapping[str, Any]) -> WriteConcern: """Parse write concern options.""" concern = options.get("w") wtimeout = options.get("wtimeoutms") @@ -61,13 +78,15 @@ def _parse_write_concern(options): return WriteConcern(concern, wtimeout, j, fsync) -def _parse_read_concern(options): +def _parse_read_concern(options: Mapping[str, Any]) -> ReadConcern: """Parse read concern options.""" concern = options.get("readconcernlevel") return ReadConcern(concern) -def _parse_ssl_options(options): +def _parse_ssl_options( + options: Mapping[str, Any], is_sync: bool +) -> tuple[Optional[SSLContext], bool]: """Parse ssl options.""" use_tls = options.get("tls") if use_tls is not None: @@ -121,12 +140,19 @@ def _parse_ssl_options(options): allow_invalid_certificates, allow_invalid_hostnames, disable_ocsp_endpoint_check, + is_sync, ) return ctx, allow_invalid_hostnames return None, allow_invalid_hostnames -def _parse_pool_options(username, password, database, options): +def _parse_pool_options( + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool, +) -> PoolOptions: """Parse connection pool options.""" credentials = _parse_credentials(username, password, database, options) max_pool_size = options.get("maxpoolsize", common.MAX_POOL_SIZE) @@ -137,14 +163,14 @@ def _parse_pool_options(username, password, database, options): connect_timeout = options.get("connecttimeoutms", common.CONNECT_TIMEOUT) socket_timeout = options.get("sockettimeoutms") wait_queue_timeout = options.get("waitqueuetimeoutms", common.WAIT_QUEUE_TIMEOUT) - event_listeners = options.get("event_listeners") + event_listeners = cast(Optional[Sequence[_EventListener]], options.get("event_listeners")) appname = options.get("appname") driver = options.get("driver") server_api = options.get("server_api") compression_settings = CompressionSettings( options.get("compressors", []), options.get("zlibcompressionlevel", -1) ) - ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options) + ssl_context, tls_allow_invalid_hostnames = _parse_ssl_options(options, is_sync) load_balanced = options.get("loadbalanced") max_connecting = options.get("maxconnecting", common.MAX_CONNECTING) return PoolOptions( @@ -164,18 +190,26 @@ def _parse_pool_options(username, password, database, options): server_api=server_api, load_balanced=load_balanced, credentials=credentials, + is_sync=is_sync, ) -class ClientOptions(object): - """Read only configuration options for a MongoClient. +class ClientOptions: + """Read only configuration options for an AsyncMongoClient/MongoClient. Should not be instantiated directly by application developers. Access - a client's options via :attr:`pymongo.mongo_client.MongoClient.options` + a client's options via :attr:`pymongo.mongo_client.AsyncMongoClient.options` or :attr:`pymongo.mongo_client.MongoClient.options` instead. """ - def __init__(self, username, password, database, options): + def __init__( + self, + username: str, + password: str, + database: Optional[str], + options: Mapping[str, Any], + is_sync: bool = True, + ): self.__options = options self.__codec_options = _parse_codec_options(options) self.__direct_connection = options.get("directconnection") @@ -185,7 +219,7 @@ def __init__(self, username, password, database, options): self.__server_selection_timeout = options.get( "serverselectiontimeoutms", common.SERVER_SELECTION_TIMEOUT ) - self.__pool_options = _parse_pool_options(username, password, database, options) + self.__pool_options = _parse_pool_options(username, password, database, options, is_sync) self.__read_preference = _parse_read_preference(options) self.__replica_set_name = options.get("replicaset") self.__write_concern = _parse_write_concern(options) @@ -198,68 +232,71 @@ def __init__(self, username, password, database, options): self.__auto_encryption_opts = options.get("auto_encryption_opts") self.__load_balanced = options.get("loadbalanced") self.__timeout = options.get("timeoutms") + self.__server_monitoring_mode = options.get( + "servermonitoringmode", common.SERVER_MONITORING_MODE + ) @property - def _options(self): + def _options(self) -> Mapping[str, Any]: """The original options used to create this ClientOptions.""" return self.__options @property - def connect(self): + def connect(self) -> Optional[bool]: """Whether to begin discovering a MongoDB topology automatically.""" return self.__connect @property - def codec_options(self): + def codec_options(self) -> CodecOptions[Any]: """A :class:`~bson.codec_options.CodecOptions` instance.""" return self.__codec_options @property - def direct_connection(self): + def direct_connection(self) -> Optional[bool]: """Whether to connect to the deployment in 'Single' topology.""" return self.__direct_connection @property - def local_threshold_ms(self): + def local_threshold_ms(self) -> int: """The local threshold for this instance.""" return self.__local_threshold_ms @property - def server_selection_timeout(self): + def server_selection_timeout(self) -> int: """The server selection timeout for this instance in seconds.""" return self.__server_selection_timeout @property - def server_selector(self): + def server_selector(self) -> _ServerSelector: return self.__server_selector @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: """The monitoring frequency in seconds.""" return self.__heartbeat_frequency @property - def pool_options(self): + def pool_options(self) -> PoolOptions: """A :class:`~pymongo.pool.PoolOptions` instance.""" return self.__pool_options @property - def read_preference(self): + def read_preference(self) -> _ServerMode: """A read preference instance.""" return self.__read_preference @property - def replica_set_name(self): + def replica_set_name(self) -> Optional[str]: """Replica set name or None.""" return self.__replica_set_name @property - def write_concern(self): + def write_concern(self) -> WriteConcern: """A :class:`~pymongo.write_concern.WriteConcern` instance.""" return self.__write_concern @property - def read_concern(self): + def read_concern(self) -> ReadConcern: """A :class:`~pymongo.read_concern.ReadConcern` instance.""" return self.__read_concern @@ -267,36 +304,45 @@ def read_concern(self): def timeout(self) -> Optional[float]: """The configured timeoutMS converted to seconds, or None. - ..versionadded: 4.2 + .. versionadded:: 4.2 """ return self.__timeout @property - def retry_writes(self): + def retry_writes(self) -> bool: """If this instance should retry supported write operations.""" return self.__retry_writes @property - def retry_reads(self): + def retry_reads(self) -> bool: """If this instance should retry supported read operations.""" return self.__retry_reads @property - def auto_encryption_opts(self): + def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: """A :class:`~pymongo.encryption.AutoEncryptionOpts` or None.""" return self.__auto_encryption_opts @property - def load_balanced(self): + def load_balanced(self) -> Optional[bool]: """True if the client was configured to connect to a load balancer.""" return self.__load_balanced @property - def event_listeners(self): + def event_listeners(self) -> list[_EventListeners]: """The event listeners registered for this client. See :mod:`~pymongo.monitoring` for details. .. versionadded:: 4.0 """ + assert self.__pool_options._event_listeners is not None return self.__pool_options._event_listeners.event_listeners() + + @property + def server_monitoring_mode(self) -> str: + """The configured serverMonitoringMode option. + + .. versionadded:: 4.5 + """ + return self.__server_monitoring_mode diff --git a/pymongo/client_session.py b/pymongo/client_session.py index 3ff98a579f..db72b0b2e1 100644 --- a/pymongo/client_session.py +++ b/pymongo/client_session.py @@ -1,10 +1,10 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,1101 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Logical sessions for ordering sequential operations. +"""Re-import of synchronous ClientSession API for compatibility.""" +from __future__ import annotations -.. versionadded:: 3.6 +from pymongo.synchronous.client_session import * # noqa: F403 +from pymongo.synchronous.client_session import __doc__ as original_doc -Causally Consistent Reads -========================= - -.. code-block:: python - - with client.start_session(causal_consistency=True) as session: - collection = client.db.collection - collection.update_one({'_id': 1}, {'$set': {'x': 10}}, session=session) - secondary_c = collection.with_options( - read_preference=ReadPreference.SECONDARY) - - # A secondary read waits for replication of the write. - secondary_c.find_one({'_id': 1}, session=session) - -If `causal_consistency` is True (the default), read operations that use -the session are causally after previous read and write operations. Using a -causally consistent session, an application can read its own writes and is -guaranteed monotonic reads, even when reading from replica set secondaries. - -.. seealso:: The MongoDB documentation on `causal-consistency `_. - -.. _transactions-ref: - -Transactions -============ - -.. versionadded:: 3.7 - -MongoDB 4.0 adds support for transactions on replica set primaries. A -transaction is associated with a :class:`ClientSession`. To start a transaction -on a session, use :meth:`ClientSession.start_transaction` in a with-statement. -Then, execute an operation within the transaction by passing the session to the -operation: - -.. code-block:: python - - orders = client.db.orders - inventory = client.db.inventory - with client.start_session() as session: - with session.start_transaction(): - orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) - -Upon normal completion of ``with session.start_transaction()`` block, the -transaction automatically calls :meth:`ClientSession.commit_transaction`. -If the block exits with an exception, the transaction automatically calls -:meth:`ClientSession.abort_transaction`. - -In general, multi-document transactions only support read/write (CRUD) -operations on existing collections. However, MongoDB 4.4 adds support for -creating collections and indexes with some limitations, including an -insert operation that would result in the creation of a new collection. -For a complete description of all the supported and unsupported operations -see the `MongoDB server's documentation for transactions -`_. - -A session may only have a single active transaction at a time, multiple -transactions on the same session can be executed in sequence. - -Sharded Transactions -^^^^^^^^^^^^^^^^^^^^ - -.. versionadded:: 3.9 - -PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB ->=4.2. Sharded transactions have the same API as replica set transactions. -When running a transaction against a sharded cluster, the session is -pinned to the mongos server selected for the first operation in the -transaction. All subsequent operations that are part of the same transaction -are routed to the same mongos server. When the transaction is completed, by -running either commitTransaction or abortTransaction, the session is unpinned. - -.. seealso:: The MongoDB documentation on `transactions `_. - -.. _snapshot-reads-ref: - -Snapshot Reads -============== - -.. versionadded:: 3.12 - -MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by -passing the ``snapshot`` option to -:meth:`~pymongo.mongo_client.MongoClient.start_session`. -If ``snapshot`` is True, all read operations that use this session read data -from the same snapshot timestamp. The server chooses the latest -majority-committed snapshot timestamp when executing the first read operation -using the session. Subsequent reads on this session read from the same -snapshot timestamp. Snapshot reads are also supported when reading from -replica set secondaries. - -.. code-block:: python - - # Each read using this session reads data from the same point in time. - with client.start_session(snapshot=True) as session: - order = orders.find_one({"sku": "abc123"}, session=session) - inventory = inventory.find_one({"sku": "abc123"}, session=session) - -Snapshot Reads Limitations -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Snapshot reads sessions are incompatible with ``causal_consistency=True``. -Only the following read operations are supported in a snapshot reads session: - -- :meth:`~pymongo.collection.Collection.find` -- :meth:`~pymongo.collection.Collection.find_one` -- :meth:`~pymongo.collection.Collection.aggregate` -- :meth:`~pymongo.collection.Collection.count_documents` -- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) - -Classes -======= -""" - -import collections -import time -import uuid -from collections.abc import Mapping as _Mapping -from typing import ( - TYPE_CHECKING, - Any, - Callable, - ContextManager, - Mapping, - NoReturn, - Optional, - TypeVar, -) - -from bson.binary import Binary -from bson.int64 import Int64 -from bson.son import SON -from bson.timestamp import Timestamp -from pymongo import _csot -from pymongo.cursor import _SocketManager -from pymongo.errors import ( - ConfigurationError, - ConnectionFailure, - InvalidOperation, - OperationFailure, - PyMongoError, - WTimeoutError, -) -from pymongo.helpers import _RETRYABLE_ERROR_CODES -from pymongo.read_concern import ReadConcern -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.server_type import SERVER_TYPE -from pymongo.write_concern import WriteConcern - - -class SessionOptions(object): - """Options for a new :class:`ClientSession`. - - :Parameters: - - `causal_consistency` (optional): If True, read operations are causally - ordered within the session. Defaults to True when the ``snapshot`` - option is ``False``. - - `default_transaction_options` (optional): The default - TransactionOptions to use for transactions started on this session. - - `snapshot` (optional): If True, then all reads performed using this - session will read from the same snapshot. This option is incompatible - with ``causal_consistency=True``. Defaults to ``False``. - - .. versionchanged:: 3.12 - Added the ``snapshot`` parameter. - """ - - def __init__( - self, - causal_consistency: Optional[bool] = None, - default_transaction_options: Optional["TransactionOptions"] = None, - snapshot: Optional[bool] = False, - ) -> None: - if snapshot: - if causal_consistency: - raise ConfigurationError("snapshot reads do not support causal_consistency=True") - causal_consistency = False - elif causal_consistency is None: - causal_consistency = True - self._causal_consistency = causal_consistency - if default_transaction_options is not None: - if not isinstance(default_transaction_options, TransactionOptions): - raise TypeError( - "default_transaction_options must be an instance of " - "pymongo.client_session.TransactionOptions, not: %r" - % (default_transaction_options,) - ) - self._default_transaction_options = default_transaction_options - self._snapshot = snapshot - - @property - def causal_consistency(self) -> bool: - """Whether causal consistency is configured.""" - return self._causal_consistency - - @property - def default_transaction_options(self) -> Optional["TransactionOptions"]: - """The default TransactionOptions to use for transactions started on - this session. - - .. versionadded:: 3.7 - """ - return self._default_transaction_options - - @property - def snapshot(self) -> Optional[bool]: - """Whether snapshot reads are configured. - - .. versionadded:: 3.12 - """ - return self._snapshot - - -class TransactionOptions(object): - """Options for :meth:`ClientSession.start_transaction`. - - :Parameters: - - `read_concern` (optional): The - :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. - If ``None`` (the default) the :attr:`read_preference` of - the :class:`MongoClient` is used. - - `write_concern` (optional): The - :class:`~pymongo.write_concern.WriteConcern` to use for this - transaction. If ``None`` (the default) the :attr:`read_preference` of - the :class:`MongoClient` is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. Transactions which read must use - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - `max_commit_time_ms` (optional): The maximum amount of time to allow a - single commitTransaction command to run. This option is an alias for - maxTimeMS option on the commitTransaction command. If ``None`` (the - default) maxTimeMS is not used. - - .. versionchanged:: 3.9 - Added the ``max_commit_time_ms`` option. - - .. versionadded:: 3.7 - """ - - def __init__( - self, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> None: - self._read_concern = read_concern - self._write_concern = write_concern - self._read_preference = read_preference - self._max_commit_time_ms = max_commit_time_ms - if read_concern is not None: - if not isinstance(read_concern, ReadConcern): - raise TypeError( - "read_concern must be an instance of " - "pymongo.read_concern.ReadConcern, not: %r" % (read_concern,) - ) - if write_concern is not None: - if not isinstance(write_concern, WriteConcern): - raise TypeError( - "write_concern must be an instance of " - "pymongo.write_concern.WriteConcern, not: %r" % (write_concern,) - ) - if not write_concern.acknowledged: - raise ConfigurationError( - "transactions do not support unacknowledged write concern" - ": %r" % (write_concern,) - ) - if read_preference is not None: - if not isinstance(read_preference, _ServerMode): - raise TypeError( - "%r is not valid for read_preference. See " - "pymongo.read_preferences for valid " - "options." % (read_preference,) - ) - if max_commit_time_ms is not None: - if not isinstance(max_commit_time_ms, int): - raise TypeError("max_commit_time_ms must be an integer or None") - - @property - def read_concern(self) -> Optional[ReadConcern]: - """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" - return self._read_concern - - @property - def write_concern(self) -> Optional[WriteConcern]: - """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" - return self._write_concern - - @property - def read_preference(self) -> Optional[_ServerMode]: - """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" - return self._read_preference - - @property - def max_commit_time_ms(self) -> Optional[int]: - """The maxTimeMS to use when running a commitTransaction command. - - .. versionadded:: 3.9 - """ - return self._max_commit_time_ms - - -def _validate_session_write_concern(session, write_concern): - """Validate that an explicit session is not used with an unack'ed write. - - Returns the session to use for the next operation. - """ - if session: - if write_concern is not None and not write_concern.acknowledged: - # For unacknowledged writes without an explicit session, - # drivers SHOULD NOT use an implicit session. If a driver - # creates an implicit session for unacknowledged writes - # without an explicit session, the driver MUST NOT send the - # session ID. - if session._implicit: - return None - else: - raise ConfigurationError( - "Explicit sessions are incompatible with " - "unacknowledged write concern: %r" % (write_concern,) - ) - return session - - -class _TransactionContext(object): - """Internal transaction context manager for start_transaction.""" - - def __init__(self, session): - self.__session = session - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if self.__session.in_transaction: - if exc_val is None: - self.__session.commit_transaction() - else: - self.__session.abort_transaction() - - -class _TxnState(object): - NONE = 1 - STARTING = 2 - IN_PROGRESS = 3 - COMMITTED = 4 - COMMITTED_EMPTY = 5 - ABORTED = 6 - - -class _Transaction(object): - """Internal class to hold transaction information in a ClientSession.""" - - def __init__(self, opts, client): - self.opts = opts - self.state = _TxnState.NONE - self.sharded = False - self.pinned_address = None - self.sock_mgr = None - self.recovery_token = None - self.attempt = 0 - self.client = client - - def active(self): - return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) - - def starting(self): - return self.state == _TxnState.STARTING - - @property - def pinned_conn(self): - if self.active() and self.sock_mgr: - return self.sock_mgr.sock - return None - - def pin(self, server, sock_info): - self.sharded = True - self.pinned_address = server.description.address - if server.description.server_type == SERVER_TYPE.LoadBalancer: - sock_info.pin_txn() - self.sock_mgr = _SocketManager(sock_info, False) - - def unpin(self): - self.pinned_address = None - if self.sock_mgr: - self.sock_mgr.close() - self.sock_mgr = None - - def reset(self): - self.unpin() - self.state = _TxnState.NONE - self.sharded = False - self.recovery_token = None - self.attempt = 0 - - def __del__(self): - if self.sock_mgr: - # Reuse the cursor closing machinery to return the socket to the - # pool soon. - self.client._close_cursor_soon(0, None, self.sock_mgr) - self.sock_mgr = None - - -def _reraise_with_unknown_commit(exc: Any) -> NoReturn: - """Re-raise an exception with the UnknownTransactionCommitResult label.""" - exc._add_error_label("UnknownTransactionCommitResult") - raise - - -def _max_time_expired_error(exc): - """Return true if exc is a MaxTimeMSExpired error.""" - return isinstance(exc, OperationFailure) and exc.code == 50 - - -# From the transactions spec, all the retryable writes errors plus -# WriteConcernFailed. -_UNKNOWN_COMMIT_ERROR_CODES = _RETRYABLE_ERROR_CODES | frozenset( - [ - 64, # WriteConcernFailed - 50, # MaxTimeMSExpired - ] -) - -# From the Convenient API for Transactions spec, with_transaction must -# halt retries after 120 seconds. -# This limit is non-configurable and was chosen to be twice the 60 second -# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. -_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 - - -def _within_time_limit(start_time): - """Are we within the with_transaction retry limit?""" - return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT - - -_T = TypeVar("_T") - -if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient - - -class ClientSession: - """A session for ordering sequential operations. - - :class:`ClientSession` instances are **not thread-safe or fork-safe**. - They can only be used by one thread or process at a time. A single - :class:`ClientSession` cannot be used to run multiple operations - concurrently. - - Should not be initialized directly by application developers - to create a - :class:`ClientSession`, call - :meth:`~pymongo.mongo_client.MongoClient.start_session`. - """ - - def __init__( - self, - client: "MongoClient", - server_session: Any, - options: SessionOptions, - implicit: bool, - ) -> None: - # A MongoClient, a _ServerSession, a SessionOptions, and a set. - self._client: MongoClient = client - self._server_session = server_session - self._options = options - self._cluster_time = None - self._operation_time = None - self._snapshot_time = None - # Is this an implicitly created session? - self._implicit = implicit - self._transaction = _Transaction(None, client) - - def end_session(self) -> None: - """Finish this session. If a transaction has started, abort it. - - It is an error to use the session after the session has ended. - """ - self._end_session(lock=True) - - def _end_session(self, lock): - if self._server_session is not None: - try: - if self.in_transaction: - self.abort_transaction() - # It's possible we're still pinned here when the transaction - # is in the committed state when the session is discarded. - self._unpin() - finally: - self._client._return_server_session(self._server_session, lock) - self._server_session = None - - def _check_ended(self): - if self._server_session is None: - raise InvalidOperation("Cannot use ended session") - - def __enter__(self) -> "ClientSession": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self._end_session(lock=True) - - @property - def client(self) -> "MongoClient": - """The :class:`~pymongo.mongo_client.MongoClient` this session was - created from. - """ - return self._client - - @property - def options(self) -> SessionOptions: - """The :class:`SessionOptions` this session was created with.""" - return self._options - - @property - def session_id(self) -> Mapping[str, Any]: - """A BSON document, the opaque server session identifier.""" - self._check_ended() - return self._server_session.session_id - - @property - def cluster_time(self) -> Optional[Mapping[str, Any]]: - """The cluster time returned by the last operation executed - in this session. - """ - return self._cluster_time - - @property - def operation_time(self) -> Optional[Timestamp]: - """The operation time returned by the last operation executed - in this session. - """ - return self._operation_time - - def _inherit_option(self, name, val): - """Return the inherited TransactionOption value.""" - if val: - return val - txn_opts = self.options.default_transaction_options - val = txn_opts and getattr(txn_opts, name) - if val: - return val - return getattr(self.client, name) - - def with_transaction( - self, - callback: Callable[["ClientSession"], _T], - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> _T: - """Execute a callback in a transaction. - - This method starts a transaction on this session, executes ``callback`` - once, and then commits the transaction. For example:: - - def callback(session): - orders = session.client.db.orders - inventory = session.client.db.inventory - orders.insert_one({"sku": "abc123", "qty": 100}, session=session) - inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, - {"$inc": {"qty": -100}}, session=session) - - with client.start_session() as session: - session.with_transaction(callback) - - To pass arbitrary arguments to the ``callback``, wrap your callable - with a ``lambda`` like this:: - - def callback(session, custom_arg, custom_kwarg=None): - # Transaction operations... - - with client.start_session() as session: - session.with_transaction( - lambda s: callback(s, "custom_arg", custom_kwarg=1)) - - In the event of an exception, ``with_transaction`` may retry the commit - or the entire transaction, therefore ``callback`` may be invoked - multiple times by a single call to ``with_transaction``. Developers - should be mindful of this possiblity when writing a ``callback`` that - modifies application state or has any other side-effects. - Note that even when the ``callback`` is invoked multiple times, - ``with_transaction`` ensures that the transaction will be committed - at-most-once on the server. - - The ``callback`` should not attempt to start new transactions, but - should simply run operations meant to be contained within a - transaction. The ``callback`` should also not commit the transaction; - this is handled automatically by ``with_transaction``. If the - ``callback`` does commit or abort the transaction without error, - however, ``with_transaction`` will return without taking further - action. - - :class:`ClientSession` instances are **not thread-safe or fork-safe**. - Consequently, the ``callback`` must not attempt to execute multiple - operations concurrently. - - When ``callback`` raises an exception, ``with_transaction`` - automatically aborts the current transaction. When ``callback`` or - :meth:`~ClientSession.commit_transaction` raises an exception that - includes the ``"TransientTransactionError"`` error label, - ``with_transaction`` starts a new transaction and re-executes - the ``callback``. - - When :meth:`~ClientSession.commit_transaction` raises an exception with - the ``"UnknownTransactionCommitResult"`` error label, - ``with_transaction`` retries the commit until the result of the - transaction is known. - - This method will cease retrying after 120 seconds has elapsed. This - timeout is not configurable and any exception raised by the - ``callback`` or by :meth:`ClientSession.commit_transaction` after the - timeout is reached will be re-raised. Applications that desire a - different timeout duration should not use this method. - - :Parameters: - - `callback`: The callable ``callback`` to run inside a transaction. - The callable must accept a single argument, this session. Note, - under certain error conditions the callback may be run multiple - times. - - `read_concern` (optional): The - :class:`~pymongo.read_concern.ReadConcern` to use for this - transaction. - - `write_concern` (optional): The - :class:`~pymongo.write_concern.WriteConcern` to use for this - transaction. - - `read_preference` (optional): The read preference to use for this - transaction. If ``None`` (the default) the :attr:`read_preference` - of this :class:`Database` is used. See - :mod:`~pymongo.read_preferences` for options. - - :Returns: - The return value of the ``callback``. - - .. versionadded:: 3.9 - """ - start_time = time.monotonic() - while True: - self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) - try: - ret = callback(self) - except Exception as exc: - if self.in_transaction: - self.abort_transaction() - if ( - isinstance(exc, PyMongoError) - and exc.has_error_label("TransientTransactionError") - and _within_time_limit(start_time) - ): - # Retry the entire transaction. - continue - raise - - if not self.in_transaction: - # Assume callback intentionally ended the transaction. - return ret - - while True: - try: - self.commit_transaction() - except PyMongoError as exc: - if ( - exc.has_error_label("UnknownTransactionCommitResult") - and _within_time_limit(start_time) - and not _max_time_expired_error(exc) - ): - # Retry the commit. - continue - - if exc.has_error_label("TransientTransactionError") and _within_time_limit( - start_time - ): - # Retry the entire transaction. - break - raise - - # Commit succeeded. - return ret - - def start_transaction( - self, - read_concern: Optional[ReadConcern] = None, - write_concern: Optional[WriteConcern] = None, - read_preference: Optional[_ServerMode] = None, - max_commit_time_ms: Optional[int] = None, - ) -> ContextManager: - """Start a multi-statement transaction. - - Takes the same arguments as :class:`TransactionOptions`. - - .. versionchanged:: 3.9 - Added the ``max_commit_time_ms`` option. - - .. versionadded:: 3.7 - """ - self._check_ended() - - if self.options.snapshot: - raise InvalidOperation("Transactions are not supported in snapshot sessions") - - if self.in_transaction: - raise InvalidOperation("Transaction already in progress") - - read_concern = self._inherit_option("read_concern", read_concern) - write_concern = self._inherit_option("write_concern", write_concern) - read_preference = self._inherit_option("read_preference", read_preference) - if max_commit_time_ms is None: - opts = self.options.default_transaction_options - if opts: - max_commit_time_ms = opts.max_commit_time_ms - - self._transaction.opts = TransactionOptions( - read_concern, write_concern, read_preference, max_commit_time_ms - ) - self._transaction.reset() - self._transaction.state = _TxnState.STARTING - self._start_retryable_write() - return _TransactionContext(self) - - def commit_transaction(self) -> None: - """Commit a multi-statement transaction. - - .. versionadded:: 3.7 - """ - self._check_ended() - state = self._transaction.state - if state is _TxnState.NONE: - raise InvalidOperation("No transaction started") - elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): - # Server transaction was never started, no need to send a command. - self._transaction.state = _TxnState.COMMITTED_EMPTY - return - elif state is _TxnState.ABORTED: - raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") - elif state is _TxnState.COMMITTED: - # We're explicitly retrying the commit, move the state back to - # "in progress" so that in_transaction returns true. - self._transaction.state = _TxnState.IN_PROGRESS - - try: - self._finish_transaction_with_retry("commitTransaction") - except ConnectionFailure as exc: - # We do not know if the commit was successfully applied on the - # server or if it satisfied the provided write concern, set the - # unknown commit error label. - exc._remove_error_label("TransientTransactionError") - _reraise_with_unknown_commit(exc) - except WTimeoutError as exc: - # We do not know if the commit has satisfied the provided write - # concern, add the unknown commit error label. - _reraise_with_unknown_commit(exc) - except OperationFailure as exc: - if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: - # The server reports errorLabels in the case. - raise - # We do not know if the commit was successfully applied on the - # server or if it satisfied the provided write concern, set the - # unknown commit error label. - _reraise_with_unknown_commit(exc) - finally: - self._transaction.state = _TxnState.COMMITTED - - def abort_transaction(self) -> None: - """Abort a multi-statement transaction. - - .. versionadded:: 3.7 - """ - self._check_ended() - - state = self._transaction.state - if state is _TxnState.NONE: - raise InvalidOperation("No transaction started") - elif state is _TxnState.STARTING: - # Server transaction was never started, no need to send a command. - self._transaction.state = _TxnState.ABORTED - return - elif state is _TxnState.ABORTED: - raise InvalidOperation("Cannot call abortTransaction twice") - elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): - raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") - - try: - self._finish_transaction_with_retry("abortTransaction") - except (OperationFailure, ConnectionFailure): - # The transactions spec says to ignore abortTransaction errors. - pass - finally: - self._transaction.state = _TxnState.ABORTED - self._unpin() - - def _finish_transaction_with_retry(self, command_name): - """Run commit or abort with one retry after any retryable error. - - :Parameters: - - `command_name`: Either "commitTransaction" or "abortTransaction". - """ - - def func(session, sock_info, retryable): - return self._finish_transaction(sock_info, command_name) - - return self._client._retry_internal(True, func, self, None) - - def _finish_transaction(self, sock_info, command_name): - self._transaction.attempt += 1 - opts = self._transaction.opts - wc = opts.write_concern - cmd = SON([(command_name, 1)]) - if command_name == "commitTransaction": - if opts.max_commit_time_ms and _csot.get_timeout() is None: - cmd["maxTimeMS"] = opts.max_commit_time_ms - - # Transaction spec says that after the initial commit attempt, - # subsequent commitTransaction commands should be upgraded to use - # w:"majority" and set a default value of 10 seconds for wtimeout. - if self._transaction.attempt > 1: - wc_doc = wc.document - wc_doc["w"] = "majority" - wc_doc.setdefault("wtimeout", 10000) - wc = WriteConcern(**wc_doc) - - if self._transaction.recovery_token: - cmd["recoveryToken"] = self._transaction.recovery_token - - return self._client.admin._command( - sock_info, cmd, session=self, write_concern=wc, parse_write_concern_error=True - ) - - def _advance_cluster_time(self, cluster_time): - """Internal cluster time helper.""" - if self._cluster_time is None: - self._cluster_time = cluster_time - elif cluster_time is not None: - if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: - self._cluster_time = cluster_time - - def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: - """Update the cluster time for this session. - - :Parameters: - - `cluster_time`: The - :data:`~pymongo.client_session.ClientSession.cluster_time` from - another `ClientSession` instance. - """ - if not isinstance(cluster_time, _Mapping): - raise TypeError("cluster_time must be a subclass of collections.Mapping") - if not isinstance(cluster_time.get("clusterTime"), Timestamp): - raise ValueError("Invalid cluster_time") - self._advance_cluster_time(cluster_time) - - def _advance_operation_time(self, operation_time): - """Internal operation time helper.""" - if self._operation_time is None: - self._operation_time = operation_time - elif operation_time is not None: - if operation_time > self._operation_time: - self._operation_time = operation_time - - def advance_operation_time(self, operation_time: Timestamp) -> None: - """Update the operation time for this session. - - :Parameters: - - `operation_time`: The - :data:`~pymongo.client_session.ClientSession.operation_time` from - another `ClientSession` instance. - """ - if not isinstance(operation_time, Timestamp): - raise TypeError("operation_time must be an instance of bson.timestamp.Timestamp") - self._advance_operation_time(operation_time) - - def _process_response(self, reply): - """Process a response to a command that was run with this session.""" - self._advance_cluster_time(reply.get("$clusterTime")) - self._advance_operation_time(reply.get("operationTime")) - if self._options.snapshot and self._snapshot_time is None: - if "cursor" in reply: - ct = reply["cursor"].get("atClusterTime") - else: - ct = reply.get("atClusterTime") - self._snapshot_time = ct - if self.in_transaction and self._transaction.sharded: - recovery_token = reply.get("recoveryToken") - if recovery_token: - self._transaction.recovery_token = recovery_token - - @property - def has_ended(self) -> bool: - """True if this session is finished.""" - return self._server_session is None - - @property - def in_transaction(self) -> bool: - """True if this session has an active multi-statement transaction. - - .. versionadded:: 3.10 - """ - return self._transaction.active() - - @property - def _starting_transaction(self): - """True if this session is starting a multi-statement transaction.""" - return self._transaction.starting() - - @property - def _pinned_address(self): - """The mongos address this transaction was created on.""" - if self._transaction.active(): - return self._transaction.pinned_address - return None - - @property - def _pinned_connection(self): - """The connection this transaction was started on.""" - return self._transaction.pinned_conn - - def _pin(self, server, sock_info): - """Pin this session to the given Server or to the given connection.""" - self._transaction.pin(server, sock_info) - - def _unpin(self): - """Unpin this session from any pinned Server.""" - self._transaction.unpin() - - def _txn_read_preference(self): - """Return read preference of this transaction or None.""" - if self.in_transaction: - return self._transaction.opts.read_preference - return None - - def _materialize(self): - if isinstance(self._server_session, _EmptyServerSession): - old = self._server_session - self._server_session = self._client._topology.get_server_session() - if old.started_retryable_write: - self._server_session.inc_transaction_id() - - def _apply_to(self, command, is_retryable, read_preference, sock_info): - self._check_ended() - self._materialize() - if self.options.snapshot: - self._update_read_concern(command, sock_info) - - self._server_session.last_use = time.monotonic() - command["lsid"] = self._server_session.session_id - - if is_retryable: - command["txnNumber"] = self._server_session.transaction_id - return - - if self.in_transaction: - if read_preference != ReadPreference.PRIMARY: - raise InvalidOperation( - "read preference in a transaction must be primary, not: " - "%r" % (read_preference,) - ) - - if self._transaction.state == _TxnState.STARTING: - # First command begins a new transaction. - self._transaction.state = _TxnState.IN_PROGRESS - command["startTransaction"] = True - - if self._transaction.opts.read_concern: - rc = self._transaction.opts.read_concern.document - if rc: - command["readConcern"] = rc - self._update_read_concern(command, sock_info) - - command["txnNumber"] = self._server_session.transaction_id - command["autocommit"] = False - - def _start_retryable_write(self): - self._check_ended() - self._server_session.inc_transaction_id() - - def _update_read_concern(self, cmd, sock_info): - if self.options.causal_consistency and self.operation_time is not None: - cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time - if self.options.snapshot: - if sock_info.max_wire_version < 13: - raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") - rc = cmd.setdefault("readConcern", {}) - rc["level"] = "snapshot" - if self._snapshot_time is not None: - rc["atClusterTime"] = self._snapshot_time - - def __copy__(self) -> NoReturn: - raise TypeError("A ClientSession cannot be copied, create a new session instead") - - -class _EmptyServerSession: - __slots__ = "dirty", "started_retryable_write" - - def __init__(self): - self.dirty = False - self.started_retryable_write = False - - def mark_dirty(self): - self.dirty = True - - def inc_transaction_id(self): - self.started_retryable_write = True - - -class _ServerSession(object): - def __init__(self, generation): - # Ensure id is type 4, regardless of CodecOptions.uuid_representation. - self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} - self.last_use = time.monotonic() - self._transaction_id = 0 - self.dirty = False - self.generation = generation - - def mark_dirty(self): - """Mark this session as dirty. - - A server session is marked dirty when a command fails with a network - error. Dirty sessions are later discarded from the server session pool. - """ - self.dirty = True - - def timed_out(self, session_timeout_minutes): - idle_seconds = time.monotonic() - self.last_use - - # Timed out if we have less than a minute to live. - return idle_seconds > (session_timeout_minutes - 1) * 60 - - @property - def transaction_id(self): - """Positive 64-bit integer.""" - return Int64(self._transaction_id) - - def inc_transaction_id(self): - self._transaction_id += 1 - - -class _ServerSessionPool(collections.deque): - """Pool of _ServerSession objects. - - This class is not thread-safe, access it while holding the Topology lock. - """ - - def __init__(self, *args, **kwargs): - super(_ServerSessionPool, self).__init__(*args, **kwargs) - self.generation = 0 - - def reset(self): - self.generation += 1 - self.clear() - - def pop_all(self): - ids = [] - while self: - ids.append(self.pop().session_id) - return ids - - def get_server_session(self, session_timeout_minutes): - # Although the Driver Sessions Spec says we only clear stale sessions - # in return_server_session, PyMongo can't take a lock when returning - # sessions from a __del__ method (like in Cursor.__die), so it can't - # clear stale sessions there. In case many sessions were returned via - # __del__, check for stale sessions here too. - self._clear_stale(session_timeout_minutes) - - # The most recently used sessions are on the left. - while self: - s = self.popleft() - if not s.timed_out(session_timeout_minutes): - return s - - return _ServerSession(self.generation) - - def return_server_session(self, server_session, session_timeout_minutes): - if session_timeout_minutes is not None: - self._clear_stale(session_timeout_minutes) - if server_session.timed_out(session_timeout_minutes): - return - self.return_server_session_no_lock(server_session) - - def return_server_session_no_lock(self, server_session): - # Discard sessions from an old pool to avoid duplicate sessions in the - # child process after a fork. - if server_session.generation == self.generation and not server_session.dirty: - self.appendleft(server_session) - - def _clear_stale(self, session_timeout_minutes): - # Clear stale sessions. The least recently used are on the right. - while self: - if self[-1].timed_out(session_timeout_minutes): - self.pop() - else: - # The remaining sessions also haven't timed out. - break +__doc__ = original_doc +__all__ = ["ClientSession", "SessionOptions", "TransactionOptions"] # noqa: F405 diff --git a/pymongo/collation.py b/pymongo/collation.py index 5bc73c07c8..8a1eca7aff 100644 --- a/pymongo/collation.py +++ b/pymongo/collation.py @@ -1,10 +1,10 @@ -# Copyright 2016 MongoDB, Inc. +# Copyright 2016-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,14 +14,19 @@ """Tools for working with `collations`_. -.. _collations: http://userguide.icu-project.org/collation/concepts +.. _collations: https://www.mongodb.com/docs/manual/reference/collation/ + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. """ -from typing import Any, Dict, Mapping, Optional, Union +from __future__ import annotations + +from typing import Any, Mapping, Optional, Union from pymongo import common +from pymongo.write_concern import validate_boolean -class CollationStrength(object): +class CollationStrength: """ An enum that defines values for `strength` on a :class:`~pymongo.collation.Collation`. @@ -43,7 +48,7 @@ class CollationStrength(object): """Differentiate unicode code point (characters are exactly identical).""" -class CollationAlternate(object): +class CollationAlternate: """ An enum that defines values for `alternate` on a :class:`~pymongo.collation.Collation`. @@ -62,7 +67,7 @@ class CollationAlternate(object): """ -class CollationMaxVariable(object): +class CollationMaxVariable: """ An enum that defines values for `max_variable` on a :class:`~pymongo.collation.Collation`. @@ -75,7 +80,7 @@ class CollationMaxVariable(object): """Spaces alone are ignored.""" -class CollationCaseFirst(object): +class CollationCaseFirst: """ An enum that defines values for `case_first` on a :class:`~pymongo.collation.Collation`. @@ -91,25 +96,24 @@ class CollationCaseFirst(object): """Default for locale or collation strength.""" -class Collation(object): +class Collation: """Collation - :Parameters: - - `locale`: (string) The locale of the collation. This should be a string + :param locale: (string) The locale of the collation. This should be a string that identifies an `ICU locale ID` exactly. For example, ``en_US`` is valid, but ``en_us`` and ``en-US`` are not. Consult the MongoDB documentation for a list of supported locales. - - `caseLevel`: (optional) If ``True``, turn on case sensitivity if + :param caseLevel: (optional) If ``True``, turn on case sensitivity if `strength` is 1 or 2 (case sensitivity is implied if `strength` is greater than 2). Defaults to ``False``. - - `caseFirst`: (optional) Specify that either uppercase or lowercase + :param caseFirst: (optional) Specify that either uppercase or lowercase characters take precedence. Must be one of the following values: * :data:`~CollationCaseFirst.UPPER` * :data:`~CollationCaseFirst.LOWER` * :data:`~CollationCaseFirst.OFF` (the default) - - `strength`: (optional) Specify the comparison strength. This is also + :param strength: Specify the comparison strength. This is also known as the ICU comparison level. This must be one of the following values: @@ -123,27 +127,27 @@ class Collation(object): `strength` of :data:`~CollationStrength.SECONDARY` differentiates characters based both on the unadorned base character and its accents. - - `numericOrdering`: (optional) If ``True``, order numbers numerically + :param numericOrdering: If ``True``, order numbers numerically instead of in collation order (defaults to ``False``). - - `alternate`: (optional) Specify whether spaces and punctuation are + :param alternate: Specify whether spaces and punctuation are considered base characters. This must be one of the following values: * :data:`~CollationAlternate.NON_IGNORABLE` (the default) * :data:`~CollationAlternate.SHIFTED` - - `maxVariable`: (optional) When `alternate` is + :param maxVariable: When `alternate` is :data:`~CollationAlternate.SHIFTED`, this option specifies what characters may be ignored. This must be one of the following values: * :data:`~CollationMaxVariable.PUNCT` (the default) * :data:`~CollationMaxVariable.SPACE` - - `normalization`: (optional) If ``True``, normalizes text into Unicode + :param normalization: If ``True``, normalizes text into Unicode NFD. Defaults to ``False``. - - `backwards`: (optional) If ``True``, accents on characters are + :param backwards: If ``True``, accents on characters are considered from the back of the word to the front, as it is done in some French dictionary ordering traditions. Defaults to ``False``. - - `kwargs`: (optional) Keyword arguments supplying any additional options + :param kwargs: Keyword arguments supplying any additional options to be sent with this Collation object. .. versionadded: 3.4 @@ -163,18 +167,18 @@ def __init__( maxVariable: Optional[str] = None, normalization: Optional[bool] = None, backwards: Optional[bool] = None, - **kwargs: Any + **kwargs: Any, ) -> None: locale = common.validate_string("locale", locale) - self.__document: Dict[str, Any] = {"locale": locale} + self.__document: dict[str, Any] = {"locale": locale} if caseLevel is not None: - self.__document["caseLevel"] = common.validate_boolean("caseLevel", caseLevel) + self.__document["caseLevel"] = validate_boolean("caseLevel", caseLevel) if caseFirst is not None: self.__document["caseFirst"] = common.validate_string("caseFirst", caseFirst) if strength is not None: self.__document["strength"] = common.validate_integer("strength", strength) if numericOrdering is not None: - self.__document["numericOrdering"] = common.validate_boolean( + self.__document["numericOrdering"] = validate_boolean( "numericOrdering", numericOrdering ) if alternate is not None: @@ -182,15 +186,13 @@ def __init__( if maxVariable is not None: self.__document["maxVariable"] = common.validate_string("maxVariable", maxVariable) if normalization is not None: - self.__document["normalization"] = common.validate_boolean( - "normalization", normalization - ) + self.__document["normalization"] = validate_boolean("normalization", normalization) if backwards is not None: - self.__document["backwards"] = common.validate_boolean("backwards", backwards) + self.__document["backwards"] = validate_boolean("backwards", backwards) self.__document.update(kwargs) @property - def document(self) -> Dict[str, Any]: + def document(self) -> dict[str, Any]: """The document representation of this collation. .. note:: @@ -199,9 +201,9 @@ def document(self) -> Dict[str, Any]: """ return self.__document.copy() - def __repr__(self): + def __repr__(self) -> str: document = self.document - return "Collation(%s)" % (", ".join("%s=%r" % (key, document[key]) for key in document),) + return "Collation({})".format(", ".join(f"{key}={document[key]!r}" for key in document)) def __eq__(self, other: Any) -> bool: if isinstance(other, Collation): @@ -214,11 +216,11 @@ def __ne__(self, other: Any) -> bool: def validate_collation_or_none( value: Optional[Union[Mapping[str, Any], Collation]] -) -> Optional[Dict[str, Any]]: +) -> Optional[dict[str, Any]]: if value is None: return None if isinstance(value, Collation): return value.document if isinstance(value, dict): return value - raise TypeError("collation must be a dict, an instance of collation.Collation, or None.") + raise TypeError("collation must be a dict, an instance of collation.Collation, or None") diff --git a/pymongo/collection.py b/pymongo/collection.py index 9a9ba56618..16063425a7 100644 --- a/pymongo/collection.py +++ b/pymongo/collection.py @@ -1,10 +1,10 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,3193 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Collection level utilities for Mongo.""" +"""Re-import of synchronous Collection API for compatibility.""" +from __future__ import annotations -from collections import abc -from typing import ( - TYPE_CHECKING, - Any, - Generic, - Iterable, - List, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - Tuple, - Union, -) +from pymongo.synchronous.collection import * # noqa: F403 +from pymongo.synchronous.collection import __doc__ as original_doc -from bson.codec_options import CodecOptions -from bson.objectid import ObjectId -from bson.raw_bson import RawBSONDocument -from bson.son import SON -from bson.timestamp import Timestamp -from pymongo import ASCENDING, _csot, common, helpers, message -from pymongo.aggregation import ( - _CollectionAggregationCommand, - _CollectionRawAggregationCommand, -) -from pymongo.bulk import _Bulk -from pymongo.change_stream import CollectionChangeStream -from pymongo.collation import validate_collation_or_none -from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name -from pymongo.cursor import Cursor, RawBatchCursor -from pymongo.errors import ( - ConfigurationError, - InvalidName, - InvalidOperation, - OperationFailure, -) -from pymongo.helpers import _check_write_command_response -from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS -from pymongo.operations import ( - DeleteMany, - DeleteOne, - IndexModel, - InsertOne, - ReplaceOne, - UpdateMany, - UpdateOne, -) -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.results import ( - BulkWriteResult, - DeleteResult, - InsertManyResult, - InsertOneResult, - UpdateResult, -) -from pymongo.typings import _CollationIn, _DocumentIn, _DocumentType, _Pipeline -from pymongo.write_concern import WriteConcern - -_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} - - -_WriteOp = Union[InsertOne, DeleteOne, DeleteMany, ReplaceOne, UpdateOne, UpdateMany] -# Hint supports index name, "myIndex", or list of index pairs: [('x', 1), ('y', -1)] -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] - - -class ReturnDocument(object): - """An enum used with - :meth:`~pymongo.collection.Collection.find_one_and_replace` and - :meth:`~pymongo.collection.Collection.find_one_and_update`. - """ - - BEFORE = False - """Return the original document before it was updated/replaced, or - ``None`` if no document matches the query. - """ - AFTER = True - """Return the updated/replaced or inserted document.""" - - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.database import Database - from pymongo.read_concern import ReadConcern - - -class Collection(common.BaseObject, Generic[_DocumentType]): - """A Mongo collection.""" - - def __init__( - self, - database: "Database[_DocumentType]", - name: str, - create: Optional[bool] = False, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, - **kwargs: Any, - ) -> None: - """Get / create a Mongo collection. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - collection name. Any additional keyword arguments will be used - as options passed to the create command. See - :meth:`~pymongo.database.Database.create_collection` for valid - options. - - If `create` is ``True``, `collation` is specified, or any additional - keyword arguments are present, a ``create`` command will be - sent, using ``session`` if specified. Otherwise, a ``create`` command - will not be sent and the collection will be created implicitly on first - use. The optional ``session`` argument is *only* used for the ``create`` - command, it is not associated with the collection afterward. - - :Parameters: - - `database`: the database to get a collection from - - `name`: the name of the collection to get - - `create` (optional): if ``True``, force collection - creation even without options being set - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) database.codec_options is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) database.read_preference is used. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) database.write_concern is used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) database.read_concern is used. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. If a collation is provided, - it will be passed to the create collection command. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession` that is used with - the create collection command - - `**kwargs` (optional): additional keyword arguments will - be passed as options for the create collection command - - .. versionchanged:: 4.2 - Added the ``clusteredIndex`` and ``encryptedFields`` parameters. - - .. versionchanged:: 4.0 - Removed the reindex, map_reduce, inline_map_reduce, - parallel_scan, initialize_unordered_bulk_op, - initialize_ordered_bulk_op, group, count, insert, save, - update, remove, find_and_modify, and ensure_index methods. See the - :ref:`pymongo4-migration-guide`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Support the `collation` option. - - .. versionchanged:: 3.2 - Added the read_concern option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - Removed the uuid_subtype attribute. - :class:`~pymongo.collection.Collection` no longer returns an - instance of :class:`~pymongo.collection.Collection` for attribute - names with leading underscores. You must use dict-style lookups - instead:: - - collection['__my_collection__'] - - Not: - - collection.__my_collection__ - - .. seealso:: The MongoDB documentation on `collections `_. - """ - super(Collection, self).__init__( - codec_options or database.codec_options, - read_preference or database.read_preference, - write_concern or database.write_concern, - read_concern or database.read_concern, - ) - if not isinstance(name, str): - raise TypeError("name must be an instance of str") - - if not name or ".." in name: - raise InvalidName("collection names cannot be empty") - if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): - raise InvalidName("collection names must not contain '$': %r" % name) - if name[0] == "." or name[-1] == ".": - raise InvalidName("collection names must not start or end with '.': %r" % name) - if "\x00" in name: - raise InvalidName("collection names must not contain the null character") - collation = validate_collation_or_none(kwargs.pop("collation", None)) - - self.__database: Database[_DocumentType] = database - self.__name = name - self.__full_name = "%s.%s" % (self.__database.name, self.__name) - self.__write_response_codec_options = self.codec_options._replace( - unicode_decode_error_handler="replace", document_class=dict - ) - self._timeout = database.client.options.timeout - encrypted_fields = kwargs.pop("encryptedFields", None) - if create or kwargs or collation: - if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) - opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} - self.__create(_esc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(_ecc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(_ecoc_coll_name(encrypted_fields, name), opts, None, session) - self.__create(name, kwargs, collation, session, encrypted_fields=encrypted_fields) - self.create_index([("__safeContent__", ASCENDING)], session) - else: - self.__create(name, kwargs, collation, session) - - def _socket_for_reads(self, session): - return self.__database.client._socket_for_reads(self._read_preference_for(session), session) - - def _socket_for_writes(self, session): - return self.__database.client._socket_for_writes(session) - - def _command( - self, - sock_info, - command, - read_preference=None, - codec_options=None, - check=True, - allowable_errors=None, - read_concern=None, - write_concern=None, - collation=None, - session=None, - retryable_write=False, - user_fields=None, - ): - """Internal command helper. - - :Parameters: - - `sock_info` - A SocketInfo instance. - - `command` - The command itself, as a :class:`~bson.son.SON` instance. - - `codec_options` (optional) - An instance of - :class:`~bson.codec_options.CodecOptions`. - - `check`: raise OperationFailure if there are errors - - `allowable_errors`: errors to ignore if `check` is True - - `read_concern` (optional) - An instance of - :class:`~pymongo.read_concern.ReadConcern`. - - `write_concern`: An instance of - :class:`~pymongo.write_concern.WriteConcern`. - - `collation` (optional) - An instance of - :class:`~pymongo.collation.Collation`. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `retryable_write` (optional): True if this command is a retryable - write. - - `user_fields` (optional): Response fields that should be decoded - using the TypeDecoders from codec_options, passed to - bson._decode_all_selective. - - :Returns: - The result document. - """ - with self.__database.client._tmp_session(session) as s: - return sock_info.command( - self.__database.name, - command, - read_preference or self._read_preference_for(session), - codec_options or self.codec_options, - check, - allowable_errors, - read_concern=read_concern, - write_concern=write_concern, - parse_write_concern_error=True, - collation=collation, - session=s, - client=self.__database.client, - retryable_write=retryable_write, - user_fields=user_fields, - ) - - def __create(self, name, options, collation, session, encrypted_fields=None): - """Sends a create command with the given options.""" - cmd = SON([("create", name)]) - if encrypted_fields: - cmd["encryptedFields"] = encrypted_fields - - if options: - if "size" in options: - options["size"] = float(options["size"]) - cmd.update(options) - with self._socket_for_writes(session) as sock_info: - self._command( - sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=self._write_concern_for(session), - collation=collation, - session=session, - ) - - def __getattr__(self, name: str) -> "Collection[_DocumentType]": - """Get a sub-collection of this collection by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - if name.startswith("_"): - full_name = "%s.%s" % (self.__name, name) - raise AttributeError( - "Collection has no attribute %r. To access the %s" - " collection, use database['%s']." % (name, full_name, full_name) - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> "Collection[_DocumentType]": - return Collection( - self.__database, - "%s.%s" % (self.__name, name), - False, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - - def __repr__(self): - return "Collection(%r, %r)" % (self.__database, self.__name) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Collection): - return self.__database == other.database and self.__name == other.name - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash((self.__database, self.__name)) - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Collection objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: collection is not None" - ) - - @property - def full_name(self) -> str: - """The full name of this :class:`Collection`. - - The full name is of the form `database_name.collection_name`. - """ - return self.__full_name - - @property - def name(self) -> str: - """The name of this :class:`Collection`.""" - return self.__name - - @property - def database(self) -> "Database[_DocumentType]": - """The :class:`~pymongo.database.Database` that this - :class:`Collection` is a part of. - """ - return self.__database - - def with_options( - self, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> "Collection[_DocumentType]": - """Get a clone of this collection changing the specified settings. - - >>> coll1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) - >>> coll1.read_preference - Primary() - >>> coll2.read_preference - Secondary(tag_sets=None) - - :Parameters: - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Collection` - is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Collection` is used. See :mod:`~pymongo.read_preferences` - for options. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Collection` - is used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Collection` - is used. - """ - return Collection( - self.__database, - self.__name, - False, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern, - ) - - @_csot.apply - def bulk_write( - self, - requests: Sequence[_WriteOp], - ordered: bool = True, - bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - let: Optional[Mapping] = None, - ) -> BulkWriteResult: - """Send a batch of write operations to the server. - - Requests are passed as a list of write operation instances ( - :class:`~pymongo.operations.InsertOne`, - :class:`~pymongo.operations.UpdateOne`, - :class:`~pymongo.operations.UpdateMany`, - :class:`~pymongo.operations.ReplaceOne`, - :class:`~pymongo.operations.DeleteOne`, or - :class:`~pymongo.operations.DeleteMany`). - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} - >>> # DeleteMany, UpdateOne, and UpdateMany are also available. - ... - >>> from pymongo import InsertOne, DeleteOne, ReplaceOne - >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), - ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] - >>> result = db.test.bulk_write(requests) - >>> result.inserted_count - 1 - >>> result.deleted_count - 1 - >>> result.modified_count - 0 - >>> result.upserted_ids - {2: ObjectId('54f62ee28891e756a6e1abd5')} - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} - {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} - {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} - - :Parameters: - - `requests`: A list of write operations (see examples above). - - `ordered` (optional): If ``True`` (the default) requests will be - performed on the server serially, in the order provided. If an error - occurs all remaining operations are aborted. If ``False`` requests - will be performed on the server in arbitrary order, possibly in - parallel, and all operations will be attempted. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - :Returns: - An instance of :class:`~pymongo.results.BulkWriteResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - Added ``let`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - common.validate_list("requests", requests) - - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) - for request in requests: - try: - request._add_to_bulk(blk) - except AttributeError: - raise TypeError("%r is not a valid request" % (request,)) - - write_concern = self._write_concern_for(session) - bulk_api_result = blk.execute(write_concern, session) - if bulk_api_result is not None: - return BulkWriteResult(bulk_api_result, True) - return BulkWriteResult({}, False) - - def _insert_one( - self, doc, ordered, write_concern, op_id, bypass_doc_val, session, comment=None - ): - """Internal helper for inserting a single document.""" - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - command = SON([("insert", self.name), ("ordered", ordered), ("documents", [doc])]) - if comment is not None: - command["comment"] = comment - - def _insert_command(session, sock_info, retryable_write): - if bypass_doc_val: - command["bypassDocumentValidation"] = True - - result = sock_info.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ) - - _check_write_command_response(result) - - self.__database.client._retryable_write(acknowledged, _insert_command, session) - - if not isinstance(doc, RawBSONDocument): - return doc.get("_id") - - def insert_one( - self, - document: _DocumentIn, - bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - ) -> InsertOneResult: - """Insert a single document. - - >>> db.test.count_documents({'x': 1}) - 0 - >>> result = db.test.insert_one({'x': 1}) - >>> result.inserted_id - ObjectId('54f112defba522406c9cc208') - >>> db.test.find_one({'x': 1}) - {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} - - :Parameters: - - `document`: The document to insert. Must be a mutable mapping - type. If the document does not have an _id field one will be - added automatically. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - - An instance of :class:`~pymongo.results.InsertOneResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - common.validate_is_document_type("document", document) - if not (isinstance(document, RawBSONDocument) or "_id" in document): - document["_id"] = ObjectId() - - write_concern = self._write_concern_for(session) - return InsertOneResult( - self._insert_one( - document, - ordered=True, - write_concern=write_concern, - op_id=None, - bypass_doc_val=bypass_document_validation, - session=session, - comment=comment, - ), - write_concern.acknowledged, - ) - - @_csot.apply - def insert_many( - self, - documents: Iterable[_DocumentIn], - ordered: bool = True, - bypass_document_validation: bool = False, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - ) -> InsertManyResult: - """Insert an iterable of documents. - - >>> db.test.count_documents({}) - 0 - >>> result = db.test.insert_many([{'x': i} for i in range(2)]) - >>> result.inserted_ids - [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] - >>> db.test.count_documents({}) - 2 - - :Parameters: - - `documents`: A iterable of documents to insert. - - `ordered` (optional): If ``True`` (the default) documents will be - inserted on the server serially, in the order provided. If an error - occurs all remaining inserts are aborted. If ``False``, documents - will be inserted on the server in arbitrary order, possibly in - parallel, and all document inserts will be attempted. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - An instance of :class:`~pymongo.results.InsertManyResult`. - - .. seealso:: :ref:`writes-and-ids` - - .. note:: `bypass_document_validation` requires server version - **>= 3.2** - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.2 - Added bypass_document_validation support - - .. versionadded:: 3.0 - """ - if ( - not isinstance(documents, abc.Iterable) - or isinstance(documents, abc.Mapping) - or not documents - ): - raise TypeError("documents must be a non-empty list") - inserted_ids: List[ObjectId] = [] - - def gen(): - """A generator that validates documents and handles _ids.""" - for document in documents: - common.validate_is_document_type("document", document) - if not isinstance(document, RawBSONDocument): - if "_id" not in document: - document["_id"] = ObjectId() - inserted_ids.append(document["_id"]) - yield (message._INSERT, document) - - write_concern = self._write_concern_for(session) - blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) - blk.ops = [doc for doc in gen()] - blk.execute(write_concern, session=session) - return InsertManyResult(inserted_ids, write_concern.acknowledged) - - def _update( - self, - sock_info, - criteria, - document, - upsert=False, - multi=False, - write_concern=None, - op_id=None, - ordered=True, - bypass_doc_val=False, - collation=None, - array_filters=None, - hint=None, - session=None, - retryable_write=False, - let=None, - comment=None, - ): - """Internal update / replace helper.""" - common.validate_boolean("upsert", upsert) - collation = validate_collation_or_none(collation) - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - update_doc = SON([("q", criteria), ("u", document), ("multi", multi), ("upsert", upsert)]) - if collation is not None: - if not acknowledged: - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - else: - update_doc["collation"] = collation - if array_filters is not None: - if not acknowledged: - raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") - else: - update_doc["arrayFilters"] = array_filters - if hint is not None: - if not acknowledged and sock_info.max_wire_version < 8: - raise ConfigurationError( - "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." - ) - if not isinstance(hint, str): - hint = helpers._index_document(hint) - update_doc["hint"] = hint - command = SON([("update", self.name), ("ordered", ordered), ("updates", [update_doc])]) - if let is not None: - common.validate_is_mapping("let", let) - command["let"] = let - - if comment is not None: - command["comment"] = comment - # Update command. - if bypass_doc_val: - command["bypassDocumentValidation"] = True - - # The command result has to be published for APM unmodified - # so we make a shallow copy here before adding updatedExisting. - result = sock_info.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ).copy() - _check_write_command_response(result) - # Add the updatedExisting field for compatibility. - if result.get("n") and "upserted" not in result: - result["updatedExisting"] = True - else: - result["updatedExisting"] = False - # MongoDB >= 2.6.0 returns the upsert _id in an array - # element. Break it out for backward compatibility. - if "upserted" in result: - result["upserted"] = result["upserted"][0]["_id"] - - if not acknowledged: - return None - return result - - def _update_retryable( - self, - criteria, - document, - upsert=False, - multi=False, - write_concern=None, - op_id=None, - ordered=True, - bypass_doc_val=False, - collation=None, - array_filters=None, - hint=None, - session=None, - let=None, - comment=None, - ): - """Internal update / replace helper.""" - - def _update(session, sock_info, retryable_write): - return self._update( - sock_info, - criteria, - document, - upsert=upsert, - multi=multi, - write_concern=write_concern, - op_id=op_id, - ordered=ordered, - bypass_doc_val=bypass_doc_val, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - retryable_write=retryable_write, - let=let, - comment=comment, - ) - - return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, _update, session - ) - - def replace_one( - self, - filter: Mapping[str, Any], - replacement: Mapping[str, Any], - upsert: bool = False, - bypass_document_validation: bool = False, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Replace a single document matching the filter. - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} - >>> result = db.test.replace_one({'x': 1}, {'y': 1}) - >>> result.matched_count - 1 - >>> result.modified_count - 1 - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} - - The *upsert* option can be used to insert a new document if a matching - document does not exist. - - >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) - >>> result.matched_count - 0 - >>> result.modified_count - 0 - >>> result.upserted_id - ObjectId('54f11e5c8891e756a6e1abd4') - >>> db.test.find_one({'x': 1}) - {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} - - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The new document. - - `upsert` (optional): If ``True``, perform an insert if no documents - match the filter. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_replace(replacement) - if let is not None: - common.validate_is_mapping("let", let) - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - replacement, - upsert, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def update_one( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - bypass_document_validation: bool = False, - collation: Optional[_CollationIn] = None, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Update a single document matching the filter. - - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) - >>> result.matched_count - 1 - >>> result.modified_count - 1 - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 4, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - - If ``upsert=True`` and no documents match the filter, create a - new document based on the filter criteria and update modifications. - - >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) - >>> result.matched_count - 0 - >>> result.modified_count - 0 - >>> result.upserted_id - ObjectId('626a678eeaa80587d4bb3fb7') - >>> db.test.find_one(result.upserted_id) - {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} - - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents - match the filter. - - `bypass_document_validation`: (optional) If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the ``update``. - .. versionchanged:: 3.6 - Added the ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Added ``bypass_document_validation`` support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - update, - upsert, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def update_many( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - bypass_document_validation: Optional[bool] = None, - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> UpdateResult: - """Update one or more documents that match the filter. - - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) - >>> result.matched_count - 3 - >>> result.modified_count - 3 - >>> for doc in db.test.find(): - ... print(doc) - ... - {'x': 4, '_id': 0} - {'x': 4, '_id': 1} - {'x': 4, '_id': 2} - - :Parameters: - - `filter`: A query that matches the documents to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents - match the filter. - - `bypass_document_validation` (optional): If ``True``, allows the - write to opt-out of document level validation. Default is - ``False``. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.2 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - - An instance of :class:`~pymongo.results.UpdateResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the `update`. - .. versionchanged:: 3.6 - Added ``array_filters`` and ``session`` parameters. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionchanged:: 3.2 - Added bypass_document_validation support. - - .. versionadded:: 3.0 - """ - common.validate_is_mapping("filter", filter) - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - - write_concern = self._write_concern_for(session) - return UpdateResult( - self._update_retryable( - filter, - update, - upsert, - multi=True, - write_concern=write_concern, - bypass_doc_val=bypass_document_validation, - collation=collation, - array_filters=array_filters, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def drop( - self, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, - ) -> None: - """Alias for :meth:`~pymongo.database.Database.drop_collection`. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. - - The following two calls are equivalent: - - >>> db.foo.drop() - >>> db.drop_collection("foo") - - .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.7 - :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - dbo = self.__database.client.get_database( - self.__database.name, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - dbo.drop_collection( - self.__name, session=session, comment=comment, encrypted_fields=encrypted_fields - ) - - def _delete( - self, - sock_info, - criteria, - multi, - write_concern=None, - op_id=None, - ordered=True, - collation=None, - hint=None, - session=None, - retryable_write=False, - let=None, - comment=None, - ): - """Internal delete helper.""" - common.validate_is_mapping("filter", criteria) - write_concern = write_concern or self.write_concern - acknowledged = write_concern.acknowledged - delete_doc = SON([("q", criteria), ("limit", int(not multi))]) - collation = validate_collation_or_none(collation) - if collation is not None: - if not acknowledged: - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - else: - delete_doc["collation"] = collation - if hint is not None: - if not acknowledged and sock_info.max_wire_version < 9: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." - ) - if not isinstance(hint, str): - hint = helpers._index_document(hint) - delete_doc["hint"] = hint - command = SON([("delete", self.name), ("ordered", ordered), ("deletes", [delete_doc])]) - - if let is not None: - common.validate_is_document_type("let", let) - command["let"] = let - - if comment is not None: - command["comment"] = comment - - # Delete command. - result = sock_info.command( - self.__database.name, - command, - write_concern=write_concern, - codec_options=self.__write_response_codec_options, - session=session, - client=self.__database.client, - retryable_write=retryable_write, - ) - _check_write_command_response(result) - return result - - def _delete_retryable( - self, - criteria, - multi, - write_concern=None, - op_id=None, - ordered=True, - collation=None, - hint=None, - session=None, - let=None, - comment=None, - ): - """Internal delete helper.""" - - def _delete(session, sock_info, retryable_write): - return self._delete( - sock_info, - criteria, - multi, - write_concern=write_concern, - op_id=op_id, - ordered=ordered, - collation=collation, - hint=hint, - session=session, - retryable_write=retryable_write, - let=let, - comment=comment, - ) - - return self.__database.client._retryable_write( - (write_concern or self.write_concern).acknowledged and not multi, _delete, session - ) - - def delete_one( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> DeleteResult: - """Delete a single document matching the filter. - - >>> db.test.count_documents({'x': 1}) - 3 - >>> result = db.test.delete_one({'x': 1}) - >>> result.deleted_count - 1 - >>> db.test.count_documents({'x': 1}) - 2 - - :Parameters: - - `filter`: A query that matches the document to delete. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - - An instance of :class:`~pymongo.results.DeleteResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - write_concern = self._write_concern_for(session) - return DeleteResult( - self._delete_retryable( - filter, - False, - write_concern=write_concern, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def delete_many( - self, - filter: Mapping[str, Any], - collation: Optional[_CollationIn] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - ) -> DeleteResult: - """Delete one or more documents matching the filter. - - >>> db.test.count_documents({'x': 1}) - 3 - >>> result = db.test.delete_many({'x': 1}) - >>> result.deleted_count - 3 - >>> db.test.count_documents({'x': 1}) - 0 - - :Parameters: - - `filter`: A query that matches the documents to delete. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - - An instance of :class:`~pymongo.results.DeleteResult`. - - .. versionchanged:: 4.1 - Added ``let`` parameter. - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - write_concern = self._write_concern_for(session) - return DeleteResult( - self._delete_retryable( - filter, - True, - write_concern=write_concern, - collation=collation, - hint=hint, - session=session, - let=let, - comment=comment, - ), - write_concern.acknowledged, - ) - - def find_one( - self, filter: Optional[Any] = None, *args: Any, **kwargs: Any - ) -> Optional[_DocumentType]: - """Get a single document from the database. - - All arguments to :meth:`find` are also valid arguments for - :meth:`find_one`, although any `limit` argument will be - ignored. Returns a single document, or ``None`` if no matching - document is found. - - The :meth:`find_one` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :Parameters: - - - `filter` (optional): a dictionary specifying - the query to be performed OR any other type to be used as - the value for a query for ``"_id"``. - - - `*args` (optional): any additional positional arguments - are the same as the arguments to :meth:`find`. - - - `**kwargs` (optional): any additional keyword arguments - are the same as the arguments to :meth:`find`. - - >>> collection.find_one(max_time_ms=100) - - """ - if filter is not None and not isinstance(filter, abc.Mapping): - filter = {"_id": filter} - cursor = self.find(filter, *args, **kwargs) - for result in cursor.limit(-1): - return result - return None - - def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: - """Query the database. - - The `filter` argument is a query document that all results - must match. For example: - - >>> db.test.find({"hello": "world"}) - - only matches documents that have a key "hello" with value - "world". Matches can have other keys *in addition* to - "hello". The `projection` argument is used to specify a subset - of fields that should be included in the result documents. By - limiting results to a certain subset of fields you can cut - down on network traffic and decoding time. - - Raises :class:`TypeError` if any of the arguments are of - improper type. Returns an instance of - :class:`~pymongo.cursor.Cursor` corresponding to this query. - - The :meth:`find` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :Parameters: - - `filter` (optional): A query document that selects which documents - to include in the result set. Can be an empty document to include - all documents. - - `projection` (optional): a list of field names that should be - returned in the result set or a dict specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a dict to exclude fields from - the result (e.g. projection={'_id': False}). - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `skip` (optional): the number of documents to omit (from - the start of the result set) when returning the results - - `limit` (optional): the maximum number of results to - return. A limit of 0 (the default) is equivalent to setting no - limit. - - `no_cursor_timeout` (optional): if False (the default), any - returned cursor is closed by the server after 10 minutes of - inactivity. If set to True, the returned cursor will never - time out on the server. Care should be taken to ensure that - cursors with no_cursor_timeout turned on are properly closed. - - `cursor_type` (optional): the type of cursor to return. The valid - options are defined by :class:`~pymongo.cursor.CursorType`: - - - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of - this find call will return a standard cursor over the result set. - - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this - find call will be a tailable cursor - tailable cursors are only - for use with capped collections. They are not closed when the - last data is retrieved but are kept open and the cursor location - marks the final document position. If more data is received - iteration of the cursor will continue from the last document - received. For details, see the `tailable cursor documentation - `_. - - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result - of this find call will be a tailable cursor with the await flag - set. The server will wait for a few seconds after returning the - full result set so that it can capture and return additional data - added during the query. - - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this - find call will be an exhaust cursor. MongoDB will stream batched - results to the client without waiting for the client to request - each batch, reducing latency. See notes on compatibility below. - - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for this query. See - :meth:`~pymongo.cursor.Cursor.sort` for details. - - `allow_partial_results` (optional): if True, mongos will return - partial results if some shards are down instead of returning an - error. - - `oplog_replay` (optional): **DEPRECATED** - if True, set the - oplogReplay query flag. Default: False. - - `batch_size` (optional): Limits the number of documents returned in - a single batch. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `return_key` (optional): If True, return only the index keys in - each document. - - `show_record_id` (optional): If True, adds a field ``$recordId`` in - each document with the storage engine's internal record identifier. - - `snapshot` (optional): **DEPRECATED** - If True, prevents the - cursor from returning a document more than once because of an - intervening write operation. - - `hint` (optional): An index, in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the - proper index to use for the query. - - `max_time_ms` (optional): Specifies a time limit for a query - operation. If the specified time is exceeded, the operation will be - aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass - this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. - - `max_scan` (optional): **DEPRECATED** - The maximum number of - documents to scan. Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. - - `min` (optional): A list of field, limit pairs specifying the - inclusive lower bound for all keys of a specific index in order. - Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must - also be passed to ensure the query utilizes the correct index. - - `max` (optional): A list of field, limit pairs specifying the - exclusive upper bound for all keys of a specific index in order. - Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must - also be passed to ensure the query utilizes the correct index. - - `comment` (optional): A string to attach to the query to help - interpret and trace the operation in the server logs and in profile - data. Pass this as an alternative to calling - :meth:`~pymongo.cursor.Cursor.comment` on the cursor. - - `allow_disk_use` (optional): if True, MongoDB may use temporary - disk files to store data exceeding the system memory limit while - processing a blocking sort operation. The option has no effect if - MongoDB can satisfy the specified sort using an index, or if the - blocking sort requires less memory than the 100 MiB limit. This - option is only supported on MongoDB 4.4 and above. - - .. note:: There are a number of caveats to using - :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: - - - The `limit` option can not be used with an exhaust cursor. - - - Exhaust cursors are not supported by mongos and can not be - used with a sharded cluster. - - - A :class:`~pymongo.cursor.Cursor` instance created with the - :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an - exclusive :class:`~socket.socket` connection to MongoDB. If the - :class:`~pymongo.cursor.Cursor` is discarded without being - completely iterated the underlying :class:`~socket.socket` - connection will be closed and discarded without being returned to - the connection pool. - - .. versionchanged:: 4.0 - Removed the ``modifiers`` option. - Empty projections (eg {} or []) are passed to the server as-is, - rather than the previous behavior which substituted in a - projection of ``{"_id": 1}``. This means that an empty projection - will now return the entire document, not just the ``"_id"`` field. - - .. versionchanged:: 3.11 - Added the ``allow_disk_use`` option. - Deprecated the ``oplog_replay`` option. Support for this option is - deprecated in MongoDB 4.4. The query engine now automatically - optimizes queries against the oplog without requiring this - option to be set. - - .. versionchanged:: 3.7 - Deprecated the ``snapshot`` option, which is deprecated in MongoDB - 3.6 and removed in MongoDB 4.0. - Deprecated the ``max_scan`` option. Support for this option is - deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit - server-side execution time. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.5 - Added the options ``return_key``, ``show_record_id``, ``snapshot``, - ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and - ``comment``. - Deprecated the ``modifiers`` option. - - .. versionchanged:: 3.4 - Added support for the ``collation`` option. - - .. versionchanged:: 3.0 - Changed the parameter names ``spec``, ``fields``, ``timeout``, and - ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, - and ``allow_partial_results`` respectively. - Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` - options. - Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, - ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, - ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and - slave_okay parameters. - Removed ``compile_re`` option: PyMongo now always - represents BSON regular expressions as :class:`~bson.regex.Regex` - objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to - convert from a BSON regular expression to a Python regular - expression object. - Soft deprecated the ``manipulate`` option. - - .. seealso:: The MongoDB documentation on `find `_. - """ - return Cursor(self, *args, **kwargs) - - def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: - """Query the database and retrieve batches of raw BSON. - - Similar to the :meth:`find` method but returns a - :class:`~pymongo.cursor.RawBatchCursor`. - - This example demonstrates how to work with raw batches, but in practice - raw batches should be passed to an external library that can decode - BSON into another data type, rather than used with PyMongo's - :mod:`bson` module. - - >>> import bson - >>> cursor = db.test.find_raw_batches() - >>> for batch in cursor: - ... print(bson.decode_all(batch)) - - .. note:: find_raw_batches does not support auto encryption. - - .. versionchanged:: 3.12 - Instead of ignoring the user-specified read concern, this method - now sends it to the server when connected to MongoDB 3.6+. - - Added session support. - - .. versionadded:: 3.6 - """ - # OP_MSG is required to support encryption. - if self.__database.client._encrypter: - raise InvalidOperation("find_raw_batches does not support auto encryption") - return RawBatchCursor(self, *args, **kwargs) - - def _count_cmd(self, session, sock_info, read_preference, cmd, collation): - """Internal count command helper.""" - # XXX: "ns missing" checks can be removed when we drop support for - # MongoDB 3.0, see SERVER-17051. - res = self._command( - sock_info, - cmd, - read_preference=read_preference, - allowable_errors=["ns missing"], - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session, - ) - if res.get("errmsg", "") == "ns missing": - return 0 - return int(res["n"]) - - def _aggregate_one_result(self, sock_info, read_preference, cmd, collation, session): - """Internal helper to run an aggregate that returns a single result.""" - result = self._command( - sock_info, - cmd, - read_preference, - allowable_errors=[26], # Ignore NamespaceNotFound. - codec_options=self.__write_response_codec_options, - read_concern=self.read_concern, - collation=collation, - session=session, - ) - # cursor will not be present for NamespaceNotFound errors. - if "cursor" not in result: - return None - batch = result["cursor"]["firstBatch"] - return batch[0] if batch else None - - def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: - """Get an estimate of the number of documents in this collection using - collection metadata. - - The :meth:`estimated_document_count` method is **not** supported in a - transaction. - - All optional parameters should be passed as keyword arguments - to this method. Valid options include: - - - `maxTimeMS` (int): The maximum amount of time to allow this - operation to run, in milliseconds. - - :Parameters: - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): See list of options above. - - .. versionchanged:: 4.2 - This method now always uses the `count`_ command. Due to an oversight in versions - 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the - :ref:`versioned-api-ref`. Users of the Stable API with estimated_document_count are - recommended to upgrade their server version to 5.0.9+ or set - :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. - - .. versionadded:: 3.7 - .. _count: https://mongodb.com/docs/manual/reference/command/count/ - """ - if "session" in kwargs: - raise ConfigurationError("estimated_document_count does not support sessions") - if comment is not None: - kwargs["comment"] = comment - - def _cmd(session, server, sock_info, read_preference): - cmd = SON([("count", self.__name)]) - cmd.update(kwargs) - return self._count_cmd(session, sock_info, read_preference, cmd, collation=None) - - return self._retryable_non_cursor_read(_cmd, None) - - def count_documents( - self, - filter: Mapping[str, Any], - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> int: - """Count the number of documents in this collection. - - .. note:: For a fast count of the total documents in a collection see - :meth:`estimated_document_count`. - - The :meth:`count_documents` method is supported in a transaction. - - All optional parameters should be passed as keyword arguments - to this method. Valid options include: - - - `skip` (int): The number of matching documents to skip before - returning results. - - `limit` (int): The maximum number of documents to count. Must be - a positive integer. If not provided, no limit is imposed. - - `maxTimeMS` (int): The maximum amount of time to allow this - operation to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `hint` (string or list of tuples): The index to use. Specify either - the index name as a string or the index specification as a list of - tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). - - The :meth:`count_documents` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - .. note:: When migrating from :meth:`count` to :meth:`count_documents` - the following query operators must be replaced: - - +-------------+-------------------------------------+ - | Operator | Replacement | - +=============+=====================================+ - | $where | `$expr`_ | - +-------------+-------------------------------------+ - | $near | `$geoWithin`_ with `$center`_ | - +-------------+-------------------------------------+ - | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | - +-------------+-------------------------------------+ - - :Parameters: - - `filter` (required): A query document that selects which documents - to count in the collection. Can be an empty document to count all - documents. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): See list of options above. - - - .. versionadded:: 3.7 - - .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ - .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ - .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ - .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ - """ - pipeline = [{"$match": filter}] - if "skip" in kwargs: - pipeline.append({"$skip": kwargs.pop("skip")}) - if "limit" in kwargs: - pipeline.append({"$limit": kwargs.pop("limit")}) - if comment is not None: - kwargs["comment"] = comment - pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) - cmd = SON([("aggregate", self.__name), ("pipeline", pipeline), ("cursor", {})]) - if "hint" in kwargs and not isinstance(kwargs["hint"], str): - kwargs["hint"] = helpers._index_document(kwargs["hint"]) - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - - def _cmd(session, server, sock_info, read_preference): - result = self._aggregate_one_result(sock_info, read_preference, cmd, collation, session) - if not result: - return 0 - return result["n"] - - return self._retryable_non_cursor_read(_cmd, session) - - def _retryable_non_cursor_read(self, func, session): - """Non-cursor read helper to handle implicit session creation.""" - client = self.__database.client - with client._tmp_session(session) as s: - return client._retryable_read(func, self._read_preference_for(s), s) - - def create_indexes( - self, - indexes: Sequence[IndexModel], - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> List[str]: - """Create one or more indexes on this collection. - - >>> from pymongo import IndexModel, ASCENDING, DESCENDING - >>> index1 = IndexModel([("hello", DESCENDING), - ... ("world", ASCENDING)], name="hello_world") - >>> index2 = IndexModel([("goodbye", DESCENDING)]) - >>> db.test.create_indexes([index1, index2]) - ["hello_world", "goodbye_-1"] - - :Parameters: - - `indexes`: A list of :class:`~pymongo.operations.IndexModel` - instances. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - - - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - .. versionadded:: 3.0 - - .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ - """ - common.validate_list("indexes", indexes) - if comment is not None: - kwargs["comment"] = comment - return self.__create_indexes(indexes, session, **kwargs) - - @_csot.apply - def __create_indexes(self, indexes, session, **kwargs): - """Internal createIndexes helper. - - :Parameters: - - `indexes`: A list of :class:`~pymongo.operations.IndexModel` - instances. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - """ - names = [] - with self._socket_for_writes(session) as sock_info: - supports_quorum = sock_info.max_wire_version >= 9 - - def gen_indexes(): - for index in indexes: - if not isinstance(index, IndexModel): - raise TypeError( - "%r is not an instance of pymongo.operations.IndexModel" % (index,) - ) - document = index.document - names.append(document["name"]) - yield document - - cmd = SON([("createIndexes", self.name), ("indexes", list(gen_indexes()))]) - cmd.update(kwargs) - if "commitQuorum" in kwargs and not supports_quorum: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use the " - "commitQuorum option for createIndexes" - ) - - self._command( - sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - write_concern=self._write_concern_for(session), - session=session, - ) - return names - - def create_index( - self, - keys: _IndexKeyHint, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> str: - """Creates an index on this collection. - - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, - :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, - :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). - - To create a single key ascending index on the key ``'mike'`` we just - use a string argument:: - - >>> my_collection.create_index("mike") - - For a compound index on ``'mike'`` descending and ``'eliot'`` - ascending we need to use a list of tuples:: - - >>> my_collection.create_index([("mike", pymongo.DESCENDING), - ... ("eliot", pymongo.ASCENDING)]) - - All optional index creation parameters should be passed as - keyword arguments to this method. For example:: - - >>> my_collection.create_index([("mike", pymongo.DESCENDING)], - ... background=True) - - Valid options include, but are not limited to: - - - `name`: custom name to use for this index - if none is - given, a name will be generated. - - `unique`: if ``True``, creates a uniqueness constraint on the - index. - - `background`: if ``True``, this index should be created in the - background. - - `sparse`: if ``True``, omit from the index any documents that lack - the indexed field. - - `bucketSize`: for use with geoHaystack indexes. - Number of documents to group together within a certain proximity - to a given longitude and latitude. - - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` - index. - - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` - index. - - `expireAfterSeconds`: Used to create an expiring (TTL) - collection. MongoDB will automatically delete documents from - this collection after seconds. The indexed field must - be a UTC datetime or the data will not expire. - - `partialFilterExpression`: A document that specifies a filter for - a partial index. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `wildcardProjection`: Allows users to include or exclude specific - field paths from a `wildcard index`_ using the {"$**" : 1} key - pattern. Requires MongoDB >= 4.2. - - `hidden`: if ``True``, this index will be hidden from the query - planner and will not be evaluated as part of query plan - selection. Requires MongoDB >= 4.4. - - See the MongoDB documentation for a full list of supported options by - server version. - - .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The - option is silently ignored by the server and unique index builds - using the option will fail if a duplicate value is detected. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - :Parameters: - - `keys`: a single key or a list of (key, direction) - pairs specifying the index to create - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - arguments - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): any additional index creation - options (see the above list) should be passed as keyword - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - .. versionchanged:: 3.11 - Added the ``hidden`` option. - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for passing maxTimeMS - in kwargs. - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. Support the `collation` option. - .. versionchanged:: 3.2 - Added partialFilterExpression to support partial indexes. - .. versionchanged:: 3.0 - Renamed `key_or_list` to `keys`. Removed the `cache_for` option. - :meth:`create_index` no longer caches index names. Removed support - for the drop_dups and bucket_size aliases. - - .. seealso:: The MongoDB documentation on `indexes `_. - - .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ - """ - cmd_options = {} - if "maxTimeMS" in kwargs: - cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") - if comment is not None: - cmd_options["comment"] = comment - index = IndexModel(keys, **kwargs) - return self.__create_indexes([index], session, **cmd_options)[0] - - def drop_indexes( - self, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Drops all indexes on this collection. - - Can be used on non-existant collections or collections with no indexes. - Raises OperationFailure on an error. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - arguments - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - if comment is not None: - kwargs["comment"] = comment - self.drop_index("*", session=session, **kwargs) - - @_csot.apply - def drop_index( - self, - index_or_name: _IndexKeyHint, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> None: - """Drops the specified index on this collection. - - Can be used on non-existant collections or collections with no - indexes. Raises OperationFailure on an error (e.g. trying to - drop an index that does not exist). `index_or_name` - can be either an index name (as returned by `create_index`), - or an index specifier (as passed to `create_index`). An index - specifier should be a list of (key, direction) pairs. Raises - TypeError if index is not an instance of (str, unicode, list). - - .. warning:: - - if a custom name was used on index creation (by - passing the `name` parameter to :meth:`create_index`) the index - **must** be dropped by name. - - :Parameters: - - `index_or_name`: index (or name of index) to drop - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): optional arguments to the createIndexes - command (like maxTimeMS) can be passed as keyword arguments. - - - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - - .. versionchanged:: 3.6 - Added ``session`` parameter. Added support for arbitrary keyword - arguments. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = index_or_name - if isinstance(index_or_name, list): - name = helpers._gen_index_name(index_or_name) - - if not isinstance(name, str): - raise TypeError("index_or_name must be an instance of str or list") - - cmd = SON([("dropIndexes", self.__name), ("index", name)]) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - with self._socket_for_writes(session) as sock_info: - self._command( - sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - session=session, - ) - - def list_indexes( - self, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - ) -> CommandCursor[MutableMapping[str, Any]]: - """Get a cursor over the index documents for this collection. - - >>> for index in db.test.list_indexes(): - ... print(index) - ... - SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionadded:: 3.0 - """ - codec_options: CodecOptions = CodecOptions(SON) - coll = self.with_options( - codec_options=codec_options, read_preference=ReadPreference.PRIMARY - ) - read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - explicit_session = session is not None - - def _cmd(session, server, sock_info, read_preference): - cmd = SON([("listIndexes", self.__name), ("cursor", {})]) - if comment is not None: - cmd["comment"] = comment - - try: - cursor = self._command( - sock_info, cmd, read_preference, codec_options, session=session - )["cursor"] - except OperationFailure as exc: - # Ignore NamespaceNotFound errors to match the behavior - # of reading from *.system.indexes. - if exc.code != 26: - raise - cursor = {"id": 0, "firstBatch": []} - cmd_cursor = CommandCursor( - coll, - cursor, - sock_info.address, - session=session, - explicit_session=explicit_session, - comment=cmd.get("comment"), - ) - cmd_cursor._maybe_pin_connection(sock_info) - return cmd_cursor - - with self.__database.client._tmp_session(session, False) as s: - return self.__database.client._retryable_read(_cmd, read_pref, s) - - def index_information( - self, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - ) -> MutableMapping[str, Any]: - """Get information on this collection's indexes. - - Returns a dictionary where the keys are index names (as - returned by create_index()) and the values are dictionaries - containing information about each index. The dictionary is - guaranteed to contain at least a single key, ``"key"`` which - is a list of (key, direction) pairs specifying the index (as - passed to create_index()). It will also contain any other - metadata about the indexes, except for the ``"ns"`` and - ``"name"`` keys, which are cleaned. Example output might look - like this: - - >>> db.test.create_index("x", unique=True) - 'x_1' - >>> db.test.index_information() - {'_id_': {'key': [('_id', 1)]}, - 'x_1': {'unique': True, 'key': [('x', 1)]}} - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - cursor = self.list_indexes(session=session, comment=comment) - info = {} - for index in cursor: - index["key"] = list(index["key"].items()) - index = dict(index) - info[index.pop("name")] = index - return info - - def options( - self, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - ) -> MutableMapping[str, Any]: - """Get the options set on this collection. - - Returns a dictionary of options and their values - see - :meth:`~pymongo.database.Database.create_collection` for more - information on the possible options. Returns an empty - dictionary if the collection has not been created yet. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - dbo = self.__database.client.get_database( - self.__database.name, - self.codec_options, - self.read_preference, - self.write_concern, - self.read_concern, - ) - cursor = dbo.list_collections( - session=session, filter={"name": self.__name}, comment=comment - ) - - result = None - for doc in cursor: - result = doc - break - - if not result: - return {} - - options = result.get("options", {}) - assert options is not None - if "create" in options: - del options["create"] - - return options - - @_csot.apply - def _aggregate( - self, - aggregation_command, - pipeline, - cursor_class, - session, - explicit_session, - let=None, - comment=None, - **kwargs, - ): - if comment is not None: - kwargs["comment"] = comment - cmd = aggregation_command( - self, - cursor_class, - pipeline, - kwargs, - explicit_session, - let, - user_fields={"cursor": {"firstBatch": 1}}, - ) - - return self.__database.client._retryable_read( - cmd.get_cursor, - cmd.get_read_preference(session), - session, - retryable=not cmd._performs_write, - ) - - def aggregate( - self, - pipeline: _Pipeline, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[_DocumentType]: - """Perform an aggregation using the aggregation framework on this - collection. - - The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Collection`, except when ``$out`` or ``$merge`` are used on - MongoDB <5.0, in which case - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. - - .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. An - example is included in the :ref:`aggregate-examples` documentation. - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - :Parameters: - - `pipeline`: a list of aggregation pipeline stages - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): extra `aggregate command`_ parameters. - - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `let` (dict): A dict of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. ``"$$var"``). This option is - only supported on MongoDB >= 5.0. - - `comment` (optional): A user-provided comment to attach to this - command. - - - :Returns: - A :class:`~pymongo.command_cursor.CommandCursor` over the result - set. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - Added ``let`` parameter. - Support $merge and $out executing on secondaries according to the - collection's :attr:`read_preference`. - .. versionchanged:: 4.0 - Removed the ``useCursor`` option. - .. versionchanged:: 3.9 - Apply this collection's read concern to pipelines containing the - `$out` stage when connected to MongoDB >= 4.2. - Added support for the ``$merge`` pipeline stage. - Aggregations that write always use read preference - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - .. versionchanged:: 3.6 - Added the `session` parameter. Added the `maxAwaitTimeMS` option. - Deprecated the `useCursor` option. - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. Support the `collation` option. - .. versionchanged:: 3.0 - The :meth:`aggregate` method always returns a CommandCursor. The - pipeline argument must be a list. - - .. seealso:: :doc:`/examples/aggregation` - - .. _aggregate command: - https://mongodb.com/docs/manual/reference/command/aggregate - """ - - with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate( - _CollectionAggregationCommand, - pipeline, - CommandCursor, - session=s, - explicit_session=session is not None, - let=let, - comment=comment, - **kwargs, - ) - - def aggregate_raw_batches( - self, - pipeline: _Pipeline, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> RawBatchCursor[_DocumentType]: - """Perform an aggregation and retrieve batches of raw BSON. - - Similar to the :meth:`aggregate` method but returns a - :class:`~pymongo.cursor.RawBatchCursor`. - - This example demonstrates how to work with raw batches, but in practice - raw batches should be passed to an external library that can decode - BSON into another data type, rather than used with PyMongo's - :mod:`bson` module. - - >>> import bson - >>> cursor = db.test.aggregate_raw_batches([ - ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) - >>> for batch in cursor: - ... print(bson.decode_all(batch)) - - .. note:: aggregate_raw_batches does not support auto encryption. - - .. versionchanged:: 3.12 - Added session support. - - .. versionadded:: 3.6 - """ - # OP_MSG is required to support encryption. - if self.__database.client._encrypter: - raise InvalidOperation("aggregate_raw_batches does not support auto encryption") - if comment is not None: - kwargs["comment"] = comment - with self.__database.client._tmp_session(session, close=False) as s: - return self._aggregate( - _CollectionRawAggregationCommand, - pipeline, - RawBatchCommandCursor, - session=s, - explicit_session=session is not None, - **kwargs, - ) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - ) -> CollectionChangeStream[_DocumentType]: - """Watch changes on this collection. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.CollectionChangeStream` cursor which - iterates over changes on this collection. - - .. code-block:: python - - with db.collection.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.CollectionChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.CollectionChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.collection.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error('...') - - For a precise description of the resume process see the - `change streams specification`_. - - .. note:: Using this helper method is preferred to directly calling - :meth:`~pymongo.collection.Collection.aggregate` with a - ``$changeStream`` stage, for the purpose of supporting - resumability. - - .. warning:: This Collection's :attr:`read_concern` must be - ``ReadConcern("majority")`` in order to use the ``$changeStream`` - stage. - - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - - `batch_size` (optional): The maximum number of documents to return - per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` - to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionchanged:: 3.7 - Added the ``start_at_operation_time`` parameter. - - .. versionadded:: 3.6 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst - """ - return CollectionChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - ) - - @_csot.apply - def rename( - self, - new_name: str, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> MutableMapping[str, Any]: - """Rename this collection. - - If operating in auth mode, client must be authorized as an - admin to perform this operation. Raises :class:`TypeError` if - `new_name` is not an instance of :class:`basestring` - (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName` - if `new_name` is not a valid collection name. - - :Parameters: - - `new_name`: new name for this collection - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): additional arguments to the rename command - may be passed as keyword arguments to this helper method - (i.e. ``dropTarget=True``) - - .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of - this collection is automatically applied to this operation. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this collection's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - if not isinstance(new_name, str): - raise TypeError("new_name must be an instance of str") - - if not new_name or ".." in new_name: - raise InvalidName("collection names cannot be empty") - if new_name[0] == "." or new_name[-1] == ".": - raise InvalidName("collecion names must not start or end with '.'") - if "$" in new_name and not new_name.startswith("oplog.$main"): - raise InvalidName("collection names must not contain '$'") - - new_name = "%s.%s" % (self.__database.name, new_name) - cmd = SON([("renameCollection", self.__full_name), ("to", new_name)]) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - write_concern = self._write_concern_for_cmd(cmd, session) - - with self._socket_for_writes(session) as sock_info: - with self.__database.client._tmp_session(session) as s: - return sock_info.command( - "admin", - cmd, - write_concern=write_concern, - parse_write_concern_error=True, - session=s, - client=self.__database.client, - ) - - def distinct( - self, - key: str, - filter: Optional[Mapping[str, Any]] = None, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> List: - """Get a list of distinct values for `key` among all documents - in this collection. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). - - All optional distinct parameters should be passed as keyword arguments - to this method. Valid options include: - - - `maxTimeMS` (int): The maximum amount of time to allow the count - command to run, in milliseconds. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - The :meth:`distinct` method obeys the :attr:`read_preference` of - this :class:`Collection`. - - :Parameters: - - `key`: name of the field for which we want to get the distinct - values - - `filter` (optional): A query document that specifies the documents - from which to retrieve the distinct values. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): See list of options above. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Support the `collation` option. - - """ - if not isinstance(key, str): - raise TypeError("key must be an instance of str") - cmd = SON([("distinct", self.__name), ("key", key)]) - if filter is not None: - if "query" in kwargs: - raise ConfigurationError("can't pass both filter and query") - kwargs["query"] = filter - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - - def _cmd(session, server, sock_info, read_preference): - return self._command( - sock_info, - cmd, - read_preference=read_preference, - read_concern=self.read_concern, - collation=collation, - session=session, - user_fields={"values": 1}, - )["values"] - - return self._retryable_non_cursor_read(_cmd, session) - - def _write_concern_for_cmd(self, cmd, session): - raw_wc = cmd.get("writeConcern") - if raw_wc is not None: - return WriteConcern(**raw_wc) - else: - return self._write_concern_for(session) - - def __find_and_modify( - self, - filter, - projection, - sort, - upsert=None, - return_document=ReturnDocument.BEFORE, - array_filters=None, - hint=None, - session=None, - let=None, - **kwargs, - ): - """Internal findAndModify helper.""" - - common.validate_is_mapping("filter", filter) - if not isinstance(return_document, bool): - raise ValueError( - "return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER" - ) - collation = validate_collation_or_none(kwargs.pop("collation", None)) - cmd = SON([("findAndModify", self.__name), ("query", filter), ("new", return_document)]) - if let is not None: - common.validate_is_mapping("let", let) - cmd["let"] = let - cmd.update(kwargs) - if projection is not None: - cmd["fields"] = helpers._fields_list_to_dict(projection, "projection") - if sort is not None: - cmd["sort"] = helpers._index_document(sort) - if upsert is not None: - common.validate_boolean("upsert", upsert) - cmd["upsert"] = upsert - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) - - write_concern = self._write_concern_for_cmd(cmd, session) - - def _find_and_modify(session, sock_info, retryable_write): - acknowledged = write_concern.acknowledged - if array_filters is not None: - if not acknowledged: - raise ConfigurationError( - "arrayFilters is unsupported for unacknowledged writes." - ) - cmd["arrayFilters"] = list(array_filters) - if hint is not None: - if sock_info.max_wire_version < 8: - raise ConfigurationError( - "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." - ) - elif not acknowledged and sock_info.max_wire_version < 9: - raise ConfigurationError( - "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." - ) - cmd["hint"] = hint - out = self._command( - sock_info, - cmd, - read_preference=ReadPreference.PRIMARY, - write_concern=write_concern, - collation=collation, - session=session, - retryable_write=retryable_write, - user_fields=_FIND_AND_MODIFY_DOC_FIELDS, - ) - _check_write_command_response(out) - - return out.get("value") - - return self.__database.client._retryable_write( - write_concern.acknowledged, _find_and_modify, session - ) - - def find_one_and_delete( - self, - filter: Mapping[str, Any], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and deletes it, returning the document. - - >>> db.test.count_documents({'x': 1}) - 2 - >>> db.test.find_one_and_delete({'x': 1}) - {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} - >>> db.test.count_documents({'x': 1}) - 1 - - If multiple documents match *filter*, a *sort* can be applied. - - >>> for doc in db.test.find({'x': 1}): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> db.test.find_one_and_delete( - ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) - {'x': 1, '_id': 2} - - The *projection* option can be used to limit the fields returned. - - >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) - {'x': 1} - - :Parameters: - - `filter`: A query that matches the document to delete. - - `projection` (optional): a list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a mapping to exclude fields from - the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is deleted. - - `hint` (optional): An index to use to support the query predicate - specified either by its string name, or in the same format as - passed to :meth:`~pymongo.collection.Collection.create_index` - (e.g. ``[('field', ASCENDING)]``). This option is only supported - on MongoDB 4.4 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 4.1 - Added ``let`` parameter. - .. versionchanged:: 3.11 - Added ``hint`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionchanged:: 3.4 - Added the `collation` option. - .. versionadded:: 3.0 - """ - kwargs["remove"] = True - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, projection, sort, let=let, hint=hint, session=session, **kwargs - ) - - def find_one_and_replace( - self, - filter: Mapping[str, Any], - replacement: Mapping[str, Any], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - upsert: bool = False, - return_document: bool = ReturnDocument.BEFORE, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and replaces it, returning either the - original or the replaced document. - - The :meth:`find_one_and_replace` method differs from - :meth:`find_one_and_update` by replacing the document matched by - *filter*, rather than modifying the existing document. - - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'x': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) - {'x': 1, '_id': 0} - >>> for doc in db.test.find({}): - ... print(doc) - ... - {'y': 1, '_id': 0} - {'x': 1, '_id': 1} - {'x': 1, '_id': 2} - - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The replacement document. - - `projection` (optional): A list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a mapping to exclude fields from - the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is replaced. - - `upsert` (optional): When ``True``, inserts a new document if no - document matches the query. Defaults to ``False``. - - `return_document`: If - :attr:`ReturnDocument.BEFORE` (the default), - returns the original document before it was replaced, or ``None`` - if no document matches. If - :attr:`ReturnDocument.AFTER`, returns the replaced - or inserted document. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 4.1 - Added ``let`` parameter. - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.6 - Added ``session`` parameter. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionadded:: 3.0 - """ - common.validate_ok_for_replace(replacement) - kwargs["update"] = replacement - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, - projection, - sort, - upsert, - return_document, - let=let, - hint=hint, - session=session, - **kwargs, - ) - - def find_one_and_update( - self, - filter: Mapping[str, Any], - update: Union[Mapping[str, Any], _Pipeline], - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - sort: Optional[_IndexList] = None, - upsert: bool = False, - return_document: bool = ReturnDocument.BEFORE, - array_filters: Optional[Sequence[Mapping[str, Any]]] = None, - hint: Optional[_IndexKeyHint] = None, - session: Optional["ClientSession"] = None, - let: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _DocumentType: - """Finds a single document and updates it, returning either the - original or the updated document. - - >>> db.test.find_one_and_update( - ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) - {'_id': 665, 'done': False, 'count': 25}} - - Returns ``None`` if no document matches the filter. - - >>> db.test.find_one_and_update( - ... {'_exists': False}, {'$inc': {'count': 1}}) - - When the filter matches, by default :meth:`find_one_and_update` - returns the original version of the document before the update was - applied. To return the updated (or inserted in the case of - *upsert*) version of the document instead, use the *return_document* - option. - - >>> from pymongo import ReturnDocument - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... return_document=ReturnDocument.AFTER) - {'_id': 'userid', 'seq': 1} - - You can limit the fields returned with the *projection* option. - - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... projection={'seq': True, '_id': False}, - ... return_document=ReturnDocument.AFTER) - {'seq': 2} - - The *upsert* option can be used to create the document if it doesn't - already exist. - - >>> db.example.delete_many({}).deleted_count - 1 - >>> db.example.find_one_and_update( - ... {'_id': 'userid'}, - ... {'$inc': {'seq': 1}}, - ... projection={'seq': True, '_id': False}, - ... upsert=True, - ... return_document=ReturnDocument.AFTER) - {'seq': 1} - - If multiple documents match *filter*, a *sort* can be applied. - - >>> for doc in db.test.find({'done': True}): - ... print(doc) - ... - {'_id': 665, 'done': True, 'result': {'count': 26}} - {'_id': 701, 'done': True, 'result': {'count': 17}} - >>> db.test.find_one_and_update( - ... {'done': True}, - ... {'$set': {'final': True}}, - ... sort=[('_id', pymongo.DESCENDING)]) - {'_id': 701, 'done': True, 'result': {'count': 17}} - - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The update operations to apply. - - `projection` (optional): A list of field names that should be - returned in the result document or a mapping specifying the fields - to include or exclude. If `projection` is a list "_id" will - always be returned. Use a dict to exclude fields from - the result (e.g. projection={'_id': False}). - - `sort` (optional): a list of (key, direction) pairs - specifying the sort order for the query. If multiple documents - match the query, they are sorted and the first is updated. - - `upsert` (optional): When ``True``, inserts a new document if no - document matches the query. Defaults to ``False``. - - `return_document`: If - :attr:`ReturnDocument.BEFORE` (the default), - returns the original document before it was updated. If - :attr:`ReturnDocument.AFTER`, returns the updated - or inserted document. - - `array_filters` (optional): A list of filters specifying which - array elements an update should apply. - - `hint` (optional): An index to use to support the query - predicate specified either by its string name, or in the same - format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. - ``[('field', ASCENDING)]``). This option is only supported on - MongoDB 4.4 and above. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `let` (optional): Map of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. "$$var"). - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): additional command arguments can be passed - as keyword arguments (for example maxTimeMS can be used with - recent server versions). - - .. versionchanged:: 3.11 - Added the ``hint`` option. - .. versionchanged:: 3.9 - Added the ability to accept a pipeline as the ``update``. - .. versionchanged:: 3.6 - Added the ``array_filters`` and ``session`` options. - .. versionchanged:: 3.4 - Added the ``collation`` option. - .. versionchanged:: 3.2 - Respects write concern. - - .. warning:: Starting in PyMongo 3.2, this command uses the - :class:`~pymongo.write_concern.WriteConcern` of this - :class:`~pymongo.collection.Collection` when connected to MongoDB >= - 3.2. Note that using an elevated write concern with this command may - be slower compared to using the default write concern. - - .. versionadded:: 3.0 - """ - common.validate_ok_for_update(update) - common.validate_list_or_none("array_filters", array_filters) - kwargs["update"] = update - if comment is not None: - kwargs["comment"] = comment - return self.__find_and_modify( - filter, - projection, - sort, - upsert, - return_document, - array_filters, - hint=hint, - let=let, - session=session, - **kwargs, - ) - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Collection' object is not iterable") - - next = __next__ - - def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: - """This is only here so that some API misusages are easier to debug.""" - if "." not in self.__name: - raise TypeError( - "'Collection' object is not callable. If you " - "meant to call the '%s' method on a 'Database' " - "object it is failing because no such method " - "exists." % self.__name - ) - raise TypeError( - "'Collection' object is not callable. If you meant to " - "call the '%s' method on a 'Collection' object it is " - "failing because no such method exists." % self.__name.split(".")[-1] - ) +__doc__ = original_doc +__all__ = [ # noqa: F405 + "Collection", + "ReturnDocument", +] diff --git a/pymongo/command_cursor.py b/pymongo/command_cursor.py index 6f3f244419..941e3a0eda 100644 --- a/pymongo/command_cursor.py +++ b/pymongo/command_cursor.py @@ -1,4 +1,4 @@ -# Copyright 2014-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,342 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""CommandCursor class to iterate over command results.""" +"""Re-import of synchronous CommandCursor API for compatibility.""" +from __future__ import annotations -from collections import deque -from typing import TYPE_CHECKING, Any, Generic, Iterator, Mapping, NoReturn, Optional +from pymongo.synchronous.command_cursor import * # noqa: F403 +from pymongo.synchronous.command_cursor import __doc__ as original_doc -from bson import _convert_raw_document_lists_to_streams -from pymongo.cursor import _CURSOR_CLOSED_ERRORS, _SocketManager -from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.message import _CursorAddress, _GetMore, _RawBatchGetMore -from pymongo.response import PinnedResponse -from pymongo.typings import _Address, _DocumentType - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - - -class CommandCursor(Generic[_DocumentType]): - """A cursor / iterator over command cursors.""" - - _getmore_class = _GetMore - - def __init__( - self, - collection: "Collection[_DocumentType]", - cursor_info: Mapping[str, Any], - address: Optional[_Address], - batch_size: int = 0, - max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, - explicit_session: bool = False, - comment: Any = None, - ) -> None: - """Create a new command cursor.""" - self.__sock_mgr: Any = None - self.__collection: Collection[_DocumentType] = collection - self.__id = cursor_info["id"] - self.__data = deque(cursor_info["firstBatch"]) - self.__postbatchresumetoken = cursor_info.get("postBatchResumeToken") - self.__address = address - self.__batch_size = batch_size - self.__max_await_time_ms = max_await_time_ms - self.__session = session - self.__explicit_session = explicit_session - self.__killed = self.__id == 0 - self.__comment = comment - if self.__killed: - self.__end_session(True) - - if "ns" in cursor_info: - self.__ns = cursor_info["ns"] - else: - self.__ns = collection.full_name - - self.batch_size(batch_size) - - if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") - - def __del__(self) -> None: - self.__die() - - def __die(self, synchronous=False): - """Closes this cursor.""" - already_killed = self.__killed - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - address = _CursorAddress(self.__address, self.__ns) - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def __end_session(self, synchronous): - if self.__session and not self.__explicit_session: - self.__session._end_session(lock=synchronous) - self.__session = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) - - def batch_size(self, batch_size: int) -> "CommandCursor[_DocumentType]": - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - - :Parameters: - - `batch_size`: The size of each batch of results requested. - """ - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - - self.__batch_size = batch_size == 1 and 2 or batch_size - return self - - def _has_next(self): - """Returns `True` if the cursor has documents remaining from the - previous batch.""" - return len(self.__data) > 0 - - @property - def _post_batch_resume_token(self): - """Retrieve the postBatchResumeToken from the response to a - changeStream aggregate or getMore.""" - return self.__postbatchresumetoken - - def _maybe_pin_connection(self, sock_info): - client = self.__collection.database.client - if not client._should_pin_cursor(self.__session): - return - if not self.__sock_mgr: - sock_info.pin_cursor() - sock_mgr = _SocketManager(sock_info, False) - # Ensure the connection gets returned when the entire result is - # returned in the first batch. - if self.__id == 0: - sock_mgr.close() - else: - self.__sock_mgr = sock_mgr - - def __send_message(self, operation): - """Send a getmore message and handle the response.""" - client = self.__collection.database.client - try: - response = client._run_operation( - operation, self._unpack_response, address=self.__address - ) - except OperationFailure as exc: - if exc.code in _CURSOR_CLOSED_ERRORS: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - # Return the session and pinned connection, if necessary. - self.close() - raise - except ConnectionFailure: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - # Return the session and pinned connection, if necessary. - self.close() - raise - except Exception: - self.close() - raise - - if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) - if response.from_command: - cursor = response.docs[0]["cursor"] - documents = cursor["nextBatch"] - self.__postbatchresumetoken = cursor.get("postBatchResumeToken") - self.__id = cursor["id"] - else: - documents = response.docs - self.__id = response.data.cursor_id - - if self.__id == 0: - self.close() - self.__data = deque(documents) - - def _unpack_response( - self, response, cursor_id, codec_options, user_fields=None, legacy_response=False - ): - return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) - - def _refresh(self): - """Refreshes the cursor with more data from the server. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if self.__id: # Get More - dbname, collname = self.__ns.split(".", 1) - read_pref = self.__collection._read_preference_for(self.session) - self.__send_message( - self._getmore_class( - dbname, - collname, - self.__batch_size, - self.__id, - self.__collection.codec_options, - read_pref, - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - False, - self.__comment, - ) - ) - else: # Cursor id is zero nothing else to return - self.__die(True) - - return len(self.__data) - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - Even if :attr:`alive` is ``True``, :meth:`next` can raise - :exc:`StopIteration`. Best to use a for loop:: - - for doc in collection.aggregate(pipeline): - print(doc) - - .. note:: :attr:`alive` can be True while iterating a cursor from - a failed server. In this case :attr:`alive` will return False after - :meth:`next` fails to retrieve the next batch of results from the - server. - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self) -> int: - """Returns the id of the cursor.""" - return self.__id - - @property - def address(self) -> Optional[_Address]: - """The (host, port) of the server used, or None. - - .. versionadded:: 3.0 - """ - return self.__address - - @property - def session(self) -> Optional["ClientSession"]: - """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. - - .. versionadded:: 3.6 - """ - if self.__explicit_session: - return self.__session - return None - - def __iter__(self) -> Iterator[_DocumentType]: - return self - - def next(self) -> _DocumentType: - """Advance the cursor.""" - # Block until a document is returnable. - while self.alive: - doc = self._try_next(True) - if doc is not None: - return doc - - raise StopIteration - - __next__ = next - - def _try_next(self, get_more_allowed): - """Advance the cursor blocking for at most one getMore command.""" - if not len(self.__data) and not self.__killed and get_more_allowed: - self._refresh() - if len(self.__data): - return self.__data.popleft() - else: - return None - - def __enter__(self) -> "CommandCursor[_DocumentType]": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - -class RawBatchCommandCursor(CommandCursor, Generic[_DocumentType]): - _getmore_class = _RawBatchGetMore - - def __init__( - self, - collection: "Collection[_DocumentType]", - cursor_info: Mapping[str, Any], - address: Optional[_Address], - batch_size: int = 0, - max_await_time_ms: Optional[int] = None, - session: Optional["ClientSession"] = None, - explicit_session: bool = False, - comment: Any = None, - ) -> None: - """Create a new cursor / iterator over raw batches of BSON data. - - Should not be called directly by application developers - - see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` - instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - assert not cursor_info.get("firstBatch") - super(RawBatchCommandCursor, self).__init__( - collection, - cursor_info, - address, - batch_size, - max_await_time_ms, - session, - explicit_session, - comment, - ) - - def _unpack_response( - self, response, cursor_id, codec_options, user_fields=None, legacy_response=False - ): - raw_response = response.raw_response(cursor_id, user_fields=user_fields) - if not legacy_response: - # OP_MSG returns firstBatch/nextBatch documents as a BSON array - # Re-assemble the array of documents into a document stream - _convert_raw_document_lists_to_streams(raw_response[0]) - return raw_response - - def __getitem__(self, index: int) -> NoReturn: - raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") +__doc__ = original_doc +__all__ = ["CommandCursor", "RawBatchCommandCursor"] # noqa: F405 diff --git a/pymongo/common.py b/pymongo/common.py index 6ffc97f2a8..e23adac426 100644 --- a/pymongo/common.py +++ b/pymongo/common.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,31 +14,33 @@ """Functions and classes common to multiple pymongo modules.""" +from __future__ import annotations import datetime import warnings from collections import OrderedDict, abc +from difflib import get_close_matches +from importlib.metadata import requires, version from typing import ( + TYPE_CHECKING, Any, Callable, - Dict, - List, + Iterator, Mapping, MutableMapping, NoReturn, Optional, Sequence, - Tuple, Type, Union, + overload, ) from urllib.parse import unquote_plus from bson import SON from bson.binary import UuidRepresentation -from bson.codec_options import CodecOptions, TypeRegistry +from bson.codec_options import CodecOptions, DatetimeConversion, TypeRegistry from bson.raw_bson import RawBSONDocument -from pymongo.auth import MECHANISMS from pymongo.compression_support import ( validate_compressors, validate_zlib_compression_level, @@ -49,21 +51,26 @@ from pymongo.read_concern import ReadConcern from pymongo.read_preferences import _MONGOS_MODES, _ServerMode from pymongo.server_api import ServerApi -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean -ORDERED_TYPES: Sequence[Type] = (SON, OrderedDict) +if TYPE_CHECKING: + from pymongo.typings import _AgnosticClientSession + + +ORDERED_TYPES: Sequence[Type[Any]] = (SON, OrderedDict) # Defaults until we connect to a server and get updated limits. MAX_BSON_SIZE = 16 * (1024**2) -MAX_MESSAGE_SIZE: int = 2 * MAX_BSON_SIZE +MAX_MESSAGE_SIZE = 48 * 1000 * 1000 MIN_WIRE_VERSION = 0 MAX_WIRE_VERSION = 0 -MAX_WRITE_BATCH_SIZE = 1000 +MAX_WRITE_BATCH_SIZE = 100000 # What this version of PyMongo supports. -MIN_SUPPORTED_SERVER_VERSION = "3.6" -MIN_SUPPORTED_WIRE_VERSION = 6 -MAX_SUPPORTED_WIRE_VERSION = 17 +MIN_SUPPORTED_SERVER_VERSION = "4.2" +MIN_SUPPORTED_WIRE_VERSION = 8 +# MongoDB 8.0 +MAX_SUPPORTED_WIRE_VERSION = 25 # Frequency to call hello on servers, in seconds. HEARTBEAT_FREQUENCY = 10 @@ -130,8 +137,14 @@ # Default value for srvServiceName SRV_SERVICE_NAME = "mongodb" +# Default value for serverMonitoringMode +SERVER_MONITORING_MODE = "auto" # poll/stream/auto + +# Auth mechanism properties that must raise an error instead of warning if they invalidate. +_MECH_PROP_MUST_RAISE = ["CANONICALIZE_HOST_NAME"] + -def partition_node(node: str) -> Tuple[str, int]: +def partition_node(node: str) -> tuple[str, int]: """Split a host:port string into (host, int(port)) pair.""" host = node port = 27017 @@ -143,20 +156,23 @@ def partition_node(node: str) -> Tuple[str, int]: return host, port -def clean_node(node: str) -> Tuple[str, int]: +def clean_node(node: str) -> tuple[str, int]: """Split and normalize a node name from a hello response.""" host, port = partition_node(node) # Normalize hostname to lowercase, since DNS is case-insensitive: - # http://tools.ietf.org/html/rfc4343 + # https://tools.ietf.org/html/rfc4343 # This prevents useless rediscovery if "foo.com" is in the seed list but # "FOO.com" is in the hello response. return host.lower(), port -def raise_config_error(key: str, dummy: Any) -> NoReturn: +def raise_config_error(key: str, suggestions: Optional[list[str]] = None) -> NoReturn: """Raise ConfigurationError with the given key name.""" - raise ConfigurationError("Unknown option %s" % (key,)) + msg = f"Unknown option: {key}." + if suggestions: + msg += f" Did you mean one of ({', '.join(suggestions)}) or maybe a camelCase version of one? Refer to docstring." + raise ConfigurationError(msg) # Mapping of URI uuid representation options to valid subtypes. @@ -169,18 +185,11 @@ def raise_config_error(key: str, dummy: Any) -> NoReturn: } -def validate_boolean(option: str, value: Any) -> bool: - """Validates that 'value' is True or False.""" - if isinstance(value, bool): - return value - raise TypeError("%s must be True or False" % (option,)) - - def validate_boolean_or_string(option: str, value: Any) -> bool: """Validates that value is True, False, 'true', or 'false'.""" if isinstance(value, str): if value not in ("true", "false"): - raise ValueError("The value of %s must be 'true' or 'false'" % (option,)) + raise ValueError(f"The value of {option} must be 'true' or 'false'") return value == "true" return validate_boolean(option, value) @@ -193,15 +202,15 @@ def validate_integer(option: str, value: Any) -> int: try: return int(value) except ValueError: - raise ValueError("The value of %s must be an integer" % (option,)) - raise TypeError("Wrong type for %s, value must be an integer" % (option,)) + raise ValueError(f"The value of {option} must be an integer") from None + raise TypeError(f"Wrong type for {option}, value must be an integer, not {type(value)}") def validate_positive_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer, which does not include 0.""" val = validate_integer(option, value) if val <= 0: - raise ValueError("The value of %s must be a positive integer" % (option,)) + raise ValueError(f"The value of {option} must be a positive integer") return val @@ -209,7 +218,7 @@ def validate_non_negative_integer(option: str, value: Any) -> int: """Validate that 'value' is a positive integer or 0.""" val = validate_integer(option, value) if val < 0: - raise ValueError("The value of %s must be a non negative integer" % (option,)) + raise ValueError(f"The value of {option} must be a non negative integer") return val @@ -220,7 +229,7 @@ def validate_readable(option: str, value: Any) -> Optional[str]: # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) - open(value, "r").close() + open(value).close() return value @@ -242,7 +251,7 @@ def validate_string(option: str, value: Any) -> str: """Validates that 'value' is an instance of `str`.""" if isinstance(value, str): return value - raise TypeError("Wrong type for %s, value must be an instance of str" % (option,)) + raise TypeError(f"Wrong type for {option}, value must be an instance of str, not {type(value)}") def validate_string_or_none(option: str, value: Any) -> Optional[str]: @@ -261,7 +270,9 @@ def validate_int_or_basestring(option: str, value: Any) -> Union[int, str]: return int(value) except ValueError: return value - raise TypeError("Wrong type for %s, value must be an integer or a string" % (option,)) + raise TypeError( + f"Wrong type for {option}, value must be an integer or a string, not {type(value)}" + ) def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[int, str]: @@ -275,7 +286,7 @@ def validate_non_negative_int_or_basestring(option: Any, value: Any) -> Union[in return value return validate_non_negative_integer(option, val) raise TypeError( - "Wrong type for %s, value must be an non negative integer or a string" % (option,) + f"Wrong type for {option}, value must be an non negative integer or a string, not {type(value)}" ) @@ -283,18 +294,18 @@ def validate_positive_float(option: str, value: Any) -> float: """Validates that 'value' is a float, or can be converted to one, and is positive. """ - errmsg = "%s must be an integer or float" % (option,) + errmsg = f"{option} must be an integer or float" try: value = float(value) except ValueError: - raise ValueError(errmsg) + raise ValueError(errmsg) from None except TypeError: - raise TypeError(errmsg) + raise TypeError(errmsg) from None # float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at # one billion - this is a reasonable approximation for infinity if not 0 < value < 1e9: - raise ValueError("%s must be greater than 0 and less than one billion" % (option,)) + raise ValueError(f"{option} must be greater than 0 and less than one billion") return value @@ -323,7 +334,7 @@ def validate_timeout_or_zero(option: str, value: Any) -> float: config error. """ if value is None: - raise ConfigurationError("%s cannot be None" % (option,)) + raise ConfigurationError(f"{option} cannot be None") if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0 @@ -359,7 +370,7 @@ def validate_max_staleness(option: str, value: Any) -> int: def validate_read_preference(dummy: Any, value: Any) -> _ServerMode: """Validate a read preference.""" if not isinstance(value, _ServerMode): - raise TypeError("%r is not a read preference." % (value,)) + raise TypeError(f"{value!r} is not a read preference") return value @@ -371,14 +382,16 @@ def validate_read_preference_mode(dummy: Any, value: Any) -> _ServerMode: mode. """ if value not in _MONGOS_MODES: - raise ValueError("%s is not a valid read preference" % (value,)) + raise ValueError(f"{value} is not a valid read preference") return value def validate_auth_mechanism(option: str, value: Any) -> str: """Validate the authMechanism URI option.""" + from pymongo.auth_shared import MECHANISMS + if value not in MECHANISMS: - raise ValueError("%s must be in %s" % (option, tuple(MECHANISMS))) + raise ValueError(f"{option} must be in {tuple(MECHANISMS)}") return value @@ -388,18 +401,18 @@ def validate_uuid_representation(dummy: Any, value: Any) -> int: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError( - "%s is an invalid UUID representation. " + f"{value} is an invalid UUID representation. " "Must be one of " - "%s" % (value, tuple(_UUID_REPRESENTATIONS)) - ) + f"{tuple(_UUID_REPRESENTATIONS)}" + ) from None -def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]]: +def validate_read_preference_tags(name: str, value: Any) -> list[dict[str, str]]: """Parse readPreferenceTags if passed as a client kwarg.""" if not isinstance(value, list): value = [value] - tag_sets: List = [] + tag_sets: list[dict[str, Any]] = [] for tag_set in value: if tag_set == "": tag_sets.append({}) @@ -411,51 +424,81 @@ def validate_read_preference_tags(name: str, value: Any) -> List[Dict[str, str]] tags[unquote_plus(key)] = unquote_plus(val) tag_sets.append(tags) except Exception: - raise ValueError("%r not a valid value for %s" % (tag_set, name)) + raise ValueError(f"{tag_set!r} not a valid value for {name}") from None return tag_sets _MECHANISM_PROPS = frozenset( - ["SERVICE_NAME", "CANONICALIZE_HOST_NAME", "SERVICE_REALM", "AWS_SESSION_TOKEN"] + [ + "SERVICE_NAME", + "SERVICE_HOST", + "CANONICALIZE_HOST_NAME", + "SERVICE_REALM", + "AWS_SESSION_TOKEN", + "ENVIRONMENT", + "TOKEN_RESOURCE", + ] ) -def validate_auth_mechanism_properties(option: str, value: Any) -> Dict[str, Union[bool, str]]: +def validate_auth_mechanism_properties(option: str, value: Any) -> dict[str, Union[bool, str]]: """Validate authMechanismProperties.""" + props: dict[str, Any] = {} + if not isinstance(value, str): + if not isinstance(value, dict): + raise ValueError( + f"Auth mechanism properties must be given as a string or a dictionary, not {type(value)}" + ) + for key, value in value.items(): # noqa: B020 + if isinstance(value, str): + props[key] = value + elif isinstance(value, bool): + props[key] = str(value).lower() + elif key in ["ALLOWED_HOSTS"] and isinstance(value, list): + props[key] = value + elif key in ["OIDC_CALLBACK", "OIDC_HUMAN_CALLBACK"]: + from pymongo.auth_oidc_shared import OIDCCallback + + if not isinstance(value, OIDCCallback): + raise ValueError(f"callback must be an OIDCCallback object, not {type(value)}") + props[key] = value + else: + raise ValueError(f"Invalid type for auth mechanism property {key}, {type(value)}") + return props + value = validate_string(option, value) - props: Dict[str, Any] = {} + value = unquote_plus(value) for opt in value.split(","): - try: - key, val = opt.split(":") - except ValueError: + key, _, val = opt.partition(":") + if not val: + raise ValueError("Malformed auth mechanism properties") + if key not in _MECHANISM_PROPS: # Try not to leak the token. - if "AWS_SESSION_TOKEN" in opt: - opt = ( - "AWS_SESSION_TOKEN:, did you forget " - "to percent-escape the token with quote_plus?" + if "AWS_SESSION_TOKEN" in key: + raise ValueError( + "auth mechanism properties must be " + "key:value pairs like AWS_SESSION_TOKEN:" ) + raise ValueError( - "auth mechanism properties must be " - "key:value pairs like SERVICE_NAME:" - "mongodb, not %s." % (opt,) - ) - if key not in _MECHANISM_PROPS: - raise ValueError( - "%s is not a supported auth " + f"{key} is not a supported auth " "mechanism property. Must be one of " - "%s." % (key, tuple(_MECHANISM_PROPS)) + f"{tuple(_MECHANISM_PROPS)}" ) + if key == "CANONICALIZE_HOST_NAME": - props[key] = validate_boolean_or_string(key, val) + from pymongo.auth_shared import _validate_canonicalize_host_name + + props[key] = _validate_canonicalize_host_name(val) else: - props[key] = unquote_plus(val) + props[key] = val return props def validate_document_class( option: str, value: Any -) -> Union[Type[MutableMapping], Type[RawBSONDocument]]: +) -> Union[Type[MutableMapping[str, Any]], Type[RawBSONDocument]]: """Validate the document_class option.""" # issubclass can raise TypeError for generic aliases like SON[str, Any]. # In that case we can use the base class for the comparison. @@ -467,9 +510,9 @@ def validate_document_class( is_mapping = issubclass(value.__origin__, abc.MutableMapping) if not is_mapping and not issubclass(value, RawBSONDocument): raise TypeError( - "%s must be dict, bson.son.SON, " + f"{option} must be dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or a " - "sublass of collections.MutableMapping" % (option,) + "subclass of collections.MutableMapping" ) return value @@ -477,18 +520,18 @@ def validate_document_class( def validate_type_registry(option: Any, value: Any) -> Optional[TypeRegistry]: """Validate the type_registry option.""" if value is not None and not isinstance(value, TypeRegistry): - raise TypeError("%s must be an instance of %s" % (option, TypeRegistry)) + raise TypeError(f"{option} must be an instance of {TypeRegistry}") return value -def validate_list(option: str, value: Any) -> List: +def validate_list(option: str, value: Any) -> list[Any]: """Validates that 'value' is a list.""" if not isinstance(value, list): - raise TypeError("%s must be a list" % (option,)) + raise TypeError(f"{option} must be a list, not {type(value)}") return value -def validate_list_or_none(option: Any, value: Any) -> Optional[List]: +def validate_list_or_none(option: Any, value: Any) -> Optional[list[Any]]: """Validates that 'value' is a list or None.""" if value is None: return value @@ -499,9 +542,9 @@ def validate_list_or_mapping(option: Any, value: Any) -> None: """Validates that 'value' is a list or a document.""" if not isinstance(value, (abc.Mapping, list)): raise TypeError( - "%s must either be a list or an instance of dict, " + f"{option} must either be a list or an instance of dict, " "bson.son.SON, or any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping" ) @@ -509,9 +552,9 @@ def validate_is_mapping(option: str, value: Any) -> None: """Validate the type of method arguments that expect a document.""" if not isinstance(value, abc.Mapping): raise TypeError( - "%s must be an instance of dict, bson.son.SON, or " + f"{option} must be an instance of dict, bson.son.SON, or " "any other type that inherits from " - "collections.Mapping" % (option,) + "collections.Mapping" ) @@ -519,10 +562,10 @@ def validate_is_document_type(option: str, value: Any) -> None: """Validate the type of method arguments that expect a MongoDB document.""" if not isinstance(value, (abc.MutableMapping, RawBSONDocument)): raise TypeError( - "%s must be an instance of dict, bson.son.SON, " + f"{option} must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " - "collections.MutableMapping" % (option,) + "collections.MutableMapping" ) @@ -533,7 +576,7 @@ def validate_appname_or_none(option: str, value: Any) -> Optional[str]: validate_string(option, value) # We need length in bytes, so encode utf8 first. if len(value.encode("utf-8")) > 128: - raise ValueError("%s must be <= 128 bytes" % (option,)) + raise ValueError(f"{option} must be <= 128 bytes") return value @@ -542,7 +585,7 @@ def validate_driver_or_none(option: Any, value: Any) -> Optional[DriverInfo]: if value is None: return value if not isinstance(value, DriverInfo): - raise TypeError("%s must be an instance of DriverInfo" % (option,)) + raise TypeError(f"{option} must be an instance of DriverInfo") return value @@ -551,16 +594,16 @@ def validate_server_api_or_none(option: Any, value: Any) -> Optional[ServerApi]: if value is None: return value if not isinstance(value, ServerApi): - raise TypeError("%s must be an instance of ServerApi" % (option,)) + raise TypeError(f"{option} must be an instance of ServerApi, not {type(value)}") return value -def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable]: +def validate_is_callable_or_none(option: Any, value: Any) -> Optional[Callable[..., Any]]: """Validates that 'value' is a callable.""" if value is None: return value if not callable(value): - raise ValueError("%s must be a callable" % (option,)) + raise ValueError(f"{option} must be a callable, not {type(value)}") return value @@ -594,9 +637,9 @@ def validate_unicode_decode_error_handler(dummy: Any, value: str) -> str: """Validate the Unicode decode error handler option of CodecOptions.""" if value not in _UNICODE_DECODE_ERROR_HANDLERS: raise ValueError( - "%s is an invalid Unicode decode error handler. " + f"{value} is an invalid Unicode decode error handler. " "Must be one of " - "%s" % (value, tuple(_UNICODE_DECODE_ERROR_HANDLERS)) + f"{tuple(_UNICODE_DECODE_ERROR_HANDLERS)}" ) return value @@ -615,14 +658,40 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A from pymongo.encryption_options import AutoEncryptionOpts if not isinstance(value, AutoEncryptionOpts): - raise TypeError("%s must be an instance of AutoEncryptionOpts" % (option,)) + raise TypeError(f"{option} must be an instance of AutoEncryptionOpts, not {type(value)}") return value +def validate_datetime_conversion(option: Any, value: Any) -> Optional[DatetimeConversion]: + """Validate a DatetimeConversion string.""" + if value is None: + return DatetimeConversion.DATETIME + + if isinstance(value, str): + if value.isdigit(): + return DatetimeConversion(int(value)) + return DatetimeConversion[value] + elif isinstance(value, int): + return DatetimeConversion(value) + + raise TypeError( + f"{option} must be a str or int representing DatetimeConversion, not {type(value)}" + ) + + +def validate_server_monitoring_mode(option: str, value: str) -> str: + """Validate the serverMonitoringMode option.""" + if value not in {"auto", "stream", "poll"}: + raise ValueError( + f'{option}={value!r} is invalid. Must be one of "auto", "stream", or "poll"' + ) + return value + + # Dictionary where keys are the names of public URI options, and values # are lists of aliases for that option. -URI_OPTIONS_ALIAS_MAP: Dict[str, List[str]] = { +URI_OPTIONS_ALIAS_MAP: dict[str, list[str]] = { "tls": ["ssl"], } @@ -630,7 +699,7 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A # are functions that validate user-input values for that option. If an option # alias uses a different validator than its public counterpart, it should be # included here as a key, value pair. -URI_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { +URI_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { "appname": validate_appname_or_none, "authmechanism": validate_auth_mechanism, "authmechanismproperties": validate_auth_mechanism_properties, @@ -668,11 +737,12 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "srvservicename": validate_string, "srvmaxhosts": validate_non_negative_integer, "timeoutms": validate_timeoutms, + "servermonitoringmode": validate_server_monitoring_mode, } # Dictionary where keys are the names of URI options specific to pymongo, # and values are functions that validate user-input values for those options. -NONSPEC_OPTIONS_VALIDATOR_MAP: Dict[str, Callable[[Any, Any], Any]] = { +NONSPEC_OPTIONS_VALIDATOR_MAP: dict[str, Callable[[Any, Any], Any]] = { "connect": validate_boolean_or_string, "driver": validate_driver_or_none, "server_api": validate_server_api_or_none, @@ -684,12 +754,13 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "uuidrepresentation": validate_uuid_representation, "waitqueuemultiple": validate_non_negative_integer_or_none, "waitqueuetimeoutms": validate_timeout_or_none, + "datetime_conversion": validate_datetime_conversion, } # Dictionary where keys are the names of keyword-only options for the # MongoClient constructor, and values are functions that validate user-input # values for those options. -KW_VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = { +KW_VALIDATORS: dict[str, Callable[[Any, Any], Any]] = { "document_class": validate_document_class, "type_registry": validate_type_registry, "read_preference": validate_read_preference, @@ -699,20 +770,21 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "password": validate_string_or_none, "server_selector": validate_is_callable_or_none, "auto_encryption_opts": validate_auto_encryption_opts_or_none, + "authoidcallowedhosts": validate_list, } # Dictionary where keys are any URI option name, and values are the # internally-used names of that URI option. Options with only one name # variant need not be included here. Options whose public and internal # names are the same need not be included here. -INTERNAL_URI_OPTION_NAME_MAP: Dict[str, str] = { +INTERNAL_URI_OPTION_NAME_MAP: dict[str, str] = { "ssl": "tls", } # Map from deprecated URI option names to a tuple indicating the method of # their deprecation and any additional information that may be needed to # construct the warning message. -URI_OPTIONS_DEPRECATION_MAP: Dict[str, Tuple[str, str]] = { +URI_OPTIONS_DEPRECATION_MAP: dict[str, tuple[str, str]] = { # format: : (, ), # Supported values: # - 'renamed': should be the new option name. Note that case is @@ -731,11 +803,11 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A URI_OPTIONS_VALIDATOR_MAP[alias] = URI_OPTIONS_VALIDATOR_MAP[optname] # Map containing all URI option and keyword argument validators. -VALIDATORS: Dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() +VALIDATORS: dict[str, Callable[[Any, Any], Any]] = URI_OPTIONS_VALIDATOR_MAP.copy() VALIDATORS.update(KW_VALIDATORS) # List of timeout-related options. -TIMEOUT_OPTIONS: List[str] = [ +TIMEOUT_OPTIONS: list[str] = [ "connecttimeoutms", "heartbeatfrequencyms", "maxidletimems", @@ -745,22 +817,31 @@ def validate_auto_encryption_opts_or_none(option: Any, value: Any) -> Optional[A "waitqueuetimeoutms", ] - _AUTH_OPTIONS = frozenset(["authmechanismproperties"]) -def validate_auth_option(option: str, value: Any) -> Tuple[str, Any]: +def validate_auth_option(option: str, value: Any) -> tuple[str, Any]: """Validate optional authentication parameters.""" lower, value = validate(option, value) if lower not in _AUTH_OPTIONS: - raise ConfigurationError("Unknown authentication option: %s" % (option,)) + raise ConfigurationError(f"Unknown option: {option}. Must be in {_AUTH_OPTIONS}") return option, value -def validate(option: str, value: Any) -> Tuple[str, Any]: +def _get_validator( + key: str, validators: dict[str, Callable[[Any, Any], Any]], normed_key: Optional[str] = None +) -> Callable[[Any, Any], Any]: + normed_key = normed_key or key + try: + return validators[normed_key] + except KeyError: + suggestions = get_close_matches(normed_key, validators, cutoff=0.2) + raise_config_error(key, suggestions) + + +def validate(option: str, value: Any) -> tuple[str, Any]: """Generic validation function.""" - lower = option.lower() - validator = VALIDATORS.get(lower, raise_config_error) + validator = _get_validator(option, VALIDATORS, normed_key=option.lower()) value = validator(option, value) return option, value @@ -771,46 +852,56 @@ def get_validated_options( """Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed. - :Parameters: - - `opts`: A dict containing MongoDB URI options. - - `warn` (optional): If ``True`` then warnings will be logged and + :param opts: A dict containing MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and invalid options will be ignored. Otherwise, invalid options will cause errors. """ validated_options: MutableMapping[str, Any] if isinstance(options, _CaseInsensitiveDictionary): validated_options = _CaseInsensitiveDictionary() - get_normed_key = lambda x: x # noqa: E731 - get_setter_key = lambda x: options.cased_key(x) # noqa: E731 + + def get_normed_key(x: str) -> str: + return x + + def get_setter_key(x: str) -> str: + return options.cased_key(x) + else: validated_options = {} - get_normed_key = lambda x: x.lower() # noqa: E731 - get_setter_key = lambda x: x # noqa: E731 + + def get_normed_key(x: str) -> str: + return x.lower() + + def get_setter_key(x: str) -> str: + return x for opt, value in options.items(): normed_key = get_normed_key(opt) try: - validator = URI_OPTIONS_VALIDATOR_MAP.get(normed_key, raise_config_error) - value = validator(opt, value) + validator = _get_validator(opt, URI_OPTIONS_VALIDATOR_MAP, normed_key=normed_key) + validated = validator(opt, value) except (ValueError, TypeError, ConfigurationError) as exc: + if ( + normed_key == "authmechanismproperties" + and any(p in str(exc) for p in _MECH_PROP_MUST_RAISE) + and "is not a supported auth mechanism property" not in str(exc) + ): + raise if warn: - warnings.warn(str(exc)) + warnings.warn(str(exc), stacklevel=2) else: raise else: - validated_options[get_setter_key(normed_key)] = value + validated_options[get_setter_key(normed_key)] = validated return validated_options -def _esc_coll_name(encrypted_fields, name): +def _esc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: return encrypted_fields.get("escCollection", f"enxcol_.{name}.esc") -def _ecc_coll_name(encrypted_fields, name): - return encrypted_fields.get("eccCollection", f"enxcol_.{name}.ecc") - - -def _ecoc_coll_name(encrypted_fields, name): +def _ecoc_coll_name(encrypted_fields: Mapping[str, Any], name: str) -> Any: return encrypted_fields.get("ecocCollection", f"enxcol_.{name}.ecoc") @@ -818,7 +909,7 @@ def _ecoc_coll_name(encrypted_fields, name): WRITE_CONCERN_OPTIONS = frozenset(["w", "wtimeout", "wtimeoutms", "fsync", "j", "journal"]) -class BaseObject(object): +class BaseObject: """A base class that provides attributes and methods common to multiple pymongo classes. @@ -827,39 +918,41 @@ class BaseObject(object): def __init__( self, - codec_options: CodecOptions, + codec_options: CodecOptions[Any], read_preference: _ServerMode, write_concern: WriteConcern, read_concern: ReadConcern, ) -> None: if not isinstance(codec_options, CodecOptions): raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") - self.__codec_options = codec_options + self._codec_options = codec_options if not isinstance(read_preference, _ServerMode): raise TypeError( - "%r is not valid for read_preference. See " + f"{read_preference!r} is not valid for read_preference. See " "pymongo.read_preferences for valid " - "options." % (read_preference,) + "options." ) - self.__read_preference = read_preference + self._read_preference = read_preference if not isinstance(write_concern, WriteConcern): raise TypeError( - "write_concern must be an instance of pymongo.write_concern.WriteConcern" + f"write_concern must be an instance of pymongo.write_concern.WriteConcern, not {type(write_concern)}" ) - self.__write_concern = write_concern + self._write_concern = write_concern if not isinstance(read_concern, ReadConcern): - raise TypeError("read_concern must be an instance of pymongo.read_concern.ReadConcern") - self.__read_concern = read_concern + raise TypeError( + f"read_concern must be an instance of pymongo.read_concern.ReadConcern, not {type(read_concern)}" + ) + self._read_concern = read_concern @property - def codec_options(self) -> CodecOptions: + def codec_options(self) -> CodecOptions[Any]: """Read only access to the :class:`~bson.codec_options.CodecOptions` of this instance. """ - return self.__codec_options + return self._codec_options @property def write_concern(self) -> WriteConcern: @@ -869,9 +962,9 @@ def write_concern(self) -> WriteConcern: .. versionchanged:: 3.0 The :attr:`write_concern` attribute is now read only. """ - return self.__write_concern + return self._write_concern - def _write_concern_for(self, session): + def _write_concern_for(self, session: Optional[_AgnosticClientSession]) -> WriteConcern: """Read only access to the write concern of this instance or session.""" # Override this operation's write concern with the transaction's. if session and session.in_transaction: @@ -885,14 +978,14 @@ def read_preference(self) -> _ServerMode: .. versionchanged:: 3.0 The :attr:`read_preference` attribute is now read only. """ - return self.__read_preference + return self._read_preference - def _read_preference_for(self, session): + def _read_preference_for(self, session: Optional[_AgnosticClientSession]) -> _ServerMode: """Read only access to the read preference of this instance or session.""" # Override this operation's read preference with the transaction's. if session: - return session._txn_read_preference() or self.__read_preference - return self.__read_preference + return session._txn_read_preference() or self._read_preference + return self._read_preference @property def read_concern(self) -> ReadConcern: @@ -901,69 +994,77 @@ def read_concern(self) -> ReadConcern: .. versionadded:: 3.2 """ - return self.__read_concern + return self._read_concern -class _CaseInsensitiveDictionary(abc.MutableMapping): - def __init__(self, *args, **kwargs): - self.__casedkeys = {} - self.__data = {} +class _CaseInsensitiveDictionary(MutableMapping[str, Any]): + def __init__(self, *args: Any, **kwargs: Any): + self.__casedkeys: dict[str, Any] = {} + self.__data: dict[str, Any] = {} self.update(dict(*args, **kwargs)) - def __contains__(self, key): + def __contains__(self, key: str) -> bool: # type: ignore[override] return key.lower() in self.__data - def __len__(self): + def __len__(self) -> int: return len(self.__data) - def __iter__(self): + def __iter__(self) -> Iterator[str]: return (key for key in self.__casedkeys) - def __repr__(self): + def __repr__(self) -> str: return str({self.__casedkeys[k]: self.__data[k] for k in self}) - def __setitem__(self, key, value): + def __setitem__(self, key: str, value: Any) -> None: lc_key = key.lower() self.__casedkeys[lc_key] = key self.__data[lc_key] = value - def __getitem__(self, key): + def __getitem__(self, key: str) -> Any: return self.__data[key.lower()] - def __delitem__(self, key): + def __delitem__(self, key: str) -> None: lc_key = key.lower() del self.__casedkeys[lc_key] del self.__data[lc_key] - def __eq__(self, other): + def __eq__(self, other: Any) -> bool: if not isinstance(other, abc.Mapping): return NotImplemented if len(self) != len(other): return False - for key in other: + for key in other: # noqa: SIM110 if self[key] != other[key]: return False return True - def get(self, key, default=None): + def get(self, key: str, default: Optional[Any] = None) -> Any: return self.__data.get(key.lower(), default) - def pop(self, key, *args, **kwargs): + def pop(self, key: str, *args: Any, **kwargs: Any) -> Any: lc_key = key.lower() self.__casedkeys.pop(lc_key, None) return self.__data.pop(lc_key, *args, **kwargs) - def popitem(self): + def popitem(self) -> tuple[str, Any]: lc_key, cased_key = self.__casedkeys.popitem() value = self.__data.pop(lc_key) return cased_key, value - def clear(self): + def clear(self) -> None: self.__casedkeys.clear() self.__data.clear() - def setdefault(self, key, default=None): + @overload + def setdefault(self, key: str, default: None = None) -> Optional[Any]: + ... + + @overload + def setdefault(self, key: str, default: Any) -> Any: + ... + + def setdefault(self, key: str, default: Optional[Any] = None) -> Optional[Any]: lc_key = key.lower() if key in self: return self.__data[lc_key] @@ -972,7 +1073,7 @@ def setdefault(self, key, default=None): self.__data[lc_key] = default return default - def update(self, other): + def update(self, other: Mapping[str, Any]) -> None: # type: ignore[override] if isinstance(other, _CaseInsensitiveDictionary): for key in other: self[other.cased_key(key)] = other[key] @@ -980,5 +1081,103 @@ def update(self, other): for key in other: self[key] = other[key] - def cased_key(self, key): + def cased_key(self, key: str) -> Any: return self.__casedkeys[key.lower()] + + +def has_c() -> bool: + """Is the C extension installed?""" + try: + from pymongo import _cmessage # type: ignore[attr-defined] # noqa: F401 + + return True + except ImportError: + return False + + +class Version(tuple[int, ...]): + """A class that can be used to compare version strings.""" + + def __new__(cls, *version: int) -> Version: + padded_version = cls._padded(version, 4) + return super().__new__(cls, tuple(padded_version)) + + @classmethod + def _padded(cls, iter: Any, length: int, padding: int = 0) -> list[int]: + as_list = list(iter) + if len(as_list) < length: + for _ in range(length - len(as_list)): + as_list.append(padding) + return as_list + + @classmethod + def from_string(cls, version_string: str) -> Version: + mod = 0 + bump_patch_level = False + if version_string.endswith("+"): + version_string = version_string[0:-1] + mod = 1 + elif version_string.endswith("-pre-"): + version_string = version_string[0:-5] + mod = -1 + elif version_string.endswith("-"): + version_string = version_string[0:-1] + mod = -1 + # Deal with .devX substrings + if ".dev" in version_string: + version_string = version_string[0 : version_string.find(".dev")] + mod = -1 + # Deal with '-rcX' substrings + if "-rc" in version_string: + version_string = version_string[0 : version_string.find("-rc")] + mod = -1 + # Deal with git describe generated substrings + elif "-" in version_string: + version_string = version_string[0 : version_string.find("-")] + mod = -1 + bump_patch_level = True + + version = [int(part) for part in version_string.split(".")] + version = cls._padded(version, 3) + # Make from_string and from_version_array agree. For example: + # MongoDB Enterprise > db.runCommand('buildInfo').versionArray + # [ 3, 2, 1, -100 ] + # MongoDB Enterprise > db.runCommand('buildInfo').version + # 3.2.0-97-g1ef94fe + if bump_patch_level: + version[-1] += 1 + version.append(mod) + + return Version(*version) + + @classmethod + def from_version_array(cls, version_array: Any) -> Version: + version = list(version_array) + if version[-1] < 0: + version[-1] = -1 + version = cls._padded(version, 3) + return Version(*version) + + def at_least(self, *other_version: Any) -> bool: + return self >= Version(*other_version) + + def __str__(self) -> str: + return ".".join(map(str, self)) + + +def check_for_min_version(package_name: str) -> tuple[str, str, bool]: + """Test whether an installed package is of the desired version.""" + package_version_str = version(package_name) + package_version = Version.from_string(package_version_str) + # Dependency is expected to be in one of the forms: + # "pymongocrypt<2.0.0,>=1.13.0; extra == 'encryption'" + # 'dnspython<3.0.0,>=1.16.0' + # + requirements = requires("pymongo") + assert requirements is not None + requirement = [i for i in requirements if i.startswith(package_name)][0] # noqa: RUF015 + if ";" in requirement: + requirement = requirement.split(";")[0] + required_version = requirement[requirement.find(">=") + 2 :] + is_valid = package_version >= Version.from_string(required_version) + return package_version_str, required_version, is_valid diff --git a/pymongo/compression_support.py b/pymongo/compression_support.py index c9632a43d3..64ffe052ec 100644 --- a/pymongo/compression_support.py +++ b/pymongo/compression_support.py @@ -4,51 +4,57 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import warnings +from typing import Any, Iterable, Optional, Union -try: - import snappy +from pymongo.hello import HelloCompat +from pymongo.helpers_shared import _SENSITIVE_COMMANDS - _HAVE_SNAPPY = True -except ImportError: - # python-snappy isn't available. - _HAVE_SNAPPY = False +_SUPPORTED_COMPRESSORS = {"snappy", "zlib", "zstd"} +_NO_COMPRESSION = {HelloCompat.CMD, HelloCompat.LEGACY_CMD} +_NO_COMPRESSION.update(_SENSITIVE_COMMANDS) -try: - import zlib - _HAVE_ZLIB = True -except ImportError: - # Python built without zlib support. - _HAVE_ZLIB = False +def _have_snappy() -> bool: + try: + import snappy # type:ignore[import-untyped] # noqa: F401 -try: - from zstandard import ZstdCompressor, ZstdDecompressor + return True + except ImportError: + return False - _HAVE_ZSTD = True -except ImportError: - _HAVE_ZSTD = False -from pymongo.hello import HelloCompat -from pymongo.monitoring import _SENSITIVE_COMMANDS +def _have_zlib() -> bool: + try: + import zlib # noqa: F401 -_SUPPORTED_COMPRESSORS = set(["snappy", "zlib", "zstd"]) -_NO_COMPRESSION = set([HelloCompat.CMD, HelloCompat.LEGACY_CMD]) -_NO_COMPRESSION.update(_SENSITIVE_COMMANDS) + return True + except ImportError: + return False -def validate_compressors(dummy, value): +def _have_zstd() -> bool: + try: + import zstandard # noqa: F401 + + return True + except ImportError: + return False + + +def validate_compressors(dummy: Any, value: Union[str, Iterable[str]]) -> list[str]: try: # `value` is string. - compressors = value.split(",") + compressors = value.split(",") # type: ignore[union-attr] except AttributeError: # `value` is an iterable. compressors = list(value) @@ -56,44 +62,49 @@ def validate_compressors(dummy, value): for compressor in compressors[:]: if compressor not in _SUPPORTED_COMPRESSORS: compressors.remove(compressor) - warnings.warn("Unsupported compressor: %s" % (compressor,)) - elif compressor == "snappy" and not _HAVE_SNAPPY: + warnings.warn(f"Unsupported compressor: {compressor}", stacklevel=2) + elif compressor == "snappy" and not _have_snappy(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with snappy is not available. " - "You must install the python-snappy module for snappy support." + "You must install the python-snappy module for snappy support.", + stacklevel=2, ) - elif compressor == "zlib" and not _HAVE_ZLIB: + elif compressor == "zlib" and not _have_zlib(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with zlib is not available. " - "The zlib module is not available." + "The zlib module is not available.", + stacklevel=2, ) - elif compressor == "zstd" and not _HAVE_ZSTD: + elif compressor == "zstd" and not _have_zstd(): compressors.remove(compressor) warnings.warn( "Wire protocol compression with zstandard is not available. " - "You must install the zstandard module for zstandard support." + "You must install the zstandard module for zstandard support.", + stacklevel=2, ) return compressors -def validate_zlib_compression_level(option, value): +def validate_zlib_compression_level(option: str, value: Any) -> int: try: level = int(value) except Exception: - raise TypeError("%s must be an integer, not %r." % (option, value)) + raise TypeError(f"{option} must be an integer, not {value!r}") from None if level < -1 or level > 9: raise ValueError("%s must be between -1 and 9, not %d." % (option, level)) return level -class CompressionSettings(object): - def __init__(self, compressors, zlib_compression_level): +class CompressionSettings: + def __init__(self, compressors: list[str], zlib_compression_level: int): self.compressors = compressors self.zlib_compression_level = zlib_compression_level - def get_compression_context(self, compressors): + def get_compression_context( + self, compressors: Optional[list[str]] + ) -> Union[SnappyContext, ZlibContext, ZstdContext, None]: if compressors: chosen = compressors[0] if chosen == "snappy": @@ -102,48 +113,63 @@ def get_compression_context(self, compressors): return ZlibContext(self.zlib_compression_level) elif chosen == "zstd": return ZstdContext() + return None + return None -class SnappyContext(object): +class SnappyContext: compressor_id = 1 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: + import snappy + return snappy.compress(data) -class ZlibContext(object): +class ZlibContext: compressor_id = 2 - def __init__(self, level): + def __init__(self, level: int): self.level = level def compress(self, data: bytes) -> bytes: + import zlib + return zlib.compress(data, self.level) -class ZstdContext(object): +class ZstdContext: compressor_id = 3 @staticmethod - def compress(data): + def compress(data: bytes) -> bytes: # ZstdCompressor is not thread safe. # TODO: Use a pool? - return ZstdCompressor().compress(data) + import zstandard + + return zstandard.ZstdCompressor().compress(data) -def decompress(data, compressor_id): + +def decompress(data: bytes | memoryview, compressor_id: int) -> bytes: if compressor_id == SnappyContext.compressor_id: # python-snappy doesn't support the buffer interface. # https://github.com/andrix/python-snappy/issues/65 # This only matters when data is a memoryview since # id(bytes(data)) == id(data) when data is a bytes. + import snappy + return snappy.uncompress(bytes(data)) elif compressor_id == ZlibContext.compressor_id: + import zlib + return zlib.decompress(data) elif compressor_id == ZstdContext.compressor_id: # ZstdDecompressor is not thread safe. # TODO: Use a pool? - return ZstdDecompressor().decompress(data) + import zstandard + + return zstandard.ZstdDecompressor().decompress(data) else: raise ValueError("Unknown compressorId %d" % (compressor_id,)) diff --git a/pymongo/cursor.py b/pymongo/cursor.py index 2a85f1d82a..869adddc37 100644 --- a/pymongo/cursor.py +++ b/pymongo/cursor.py @@ -1,4 +1,4 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,1334 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Cursor class to iterate over Mongo query results.""" -import copy -import threading -import warnings -from collections import deque -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generic, - Iterable, - List, - Mapping, - NoReturn, - Optional, - Sequence, - Tuple, - Union, - cast, - overload, -) +"""Re-import of synchronous Cursor API for compatibility.""" +from __future__ import annotations -from bson import RE_TYPE, _convert_raw_document_lists_to_streams -from bson.code import Code -from bson.son import SON -from pymongo import helpers -from pymongo.collation import validate_collation_or_none -from pymongo.common import ( - validate_boolean, - validate_is_document_type, - validate_is_mapping, -) -from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure -from pymongo.message import ( - _CursorAddress, - _GetMore, - _Query, - _RawBatchGetMore, - _RawBatchQuery, -) -from pymongo.response import PinnedResponse -from pymongo.typings import _CollationIn, _DocumentType +from pymongo.cursor_shared import * # noqa: F403 +from pymongo.synchronous.cursor import * # noqa: F403 +from pymongo.synchronous.cursor import __doc__ as original_doc -# These errors mean that the server has already killed the cursor so there is -# no need to send killCursors. -_CURSOR_CLOSED_ERRORS = frozenset( - [ - 43, # CursorNotFound - 50, # MaxTimeMSExpired - 175, # QueryPlanKilled - 237, # CursorKilled - # On a tailable cursor, the following errors mean the capped collection - # rolled over. - # MongoDB 2.6: - # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} - 28617, - # MongoDB 3.0: - # {'$err': 'getMore executor error: UnknownError no details available', - # 'code': 17406, 'ok': 0} - 17406, - # MongoDB 3.2 + 3.4: - # {'ok': 0.0, 'errmsg': 'GetMore command executor error: - # CappedPositionLost: CollectionScan died due to failure to restore - # tailable cursor position. Last seen record id: RecordId(3)', - # 'code': 96} - 96, - # MongoDB 3.6+: - # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to - # restore tailable cursor position. Last seen record id: RecordId(3)"', - # 'code': 136, 'codeName': 'CappedPositionLost'} - 136, - ] -) - -_QUERY_OPTIONS = { - "tailable_cursor": 2, - "secondary_okay": 4, - "oplog_replay": 8, - "no_timeout": 16, - "await_data": 32, - "exhaust": 64, - "partial": 128, -} - - -class CursorType(object): - NON_TAILABLE = 0 - """The standard cursor type.""" - - TAILABLE = _QUERY_OPTIONS["tailable_cursor"] - """The tailable cursor type. - - Tailable cursors are only for use with capped collections. They are not - closed when the last data is retrieved but are kept open and the cursor - location marks the final document position. If more data is received - iteration of the cursor will continue from the last document received. - """ - - TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] - """A tailable cursor with the await option set. - - Creates a tailable cursor that will wait for a few seconds after returning - the full result set so that it can capture and return additional data added - during the query. - """ - - EXHAUST = _QUERY_OPTIONS["exhaust"] - """An exhaust cursor. - - MongoDB will stream batched results to the client without waiting for the - client to request each batch, reducing latency. - """ - - -class _SocketManager(object): - """Used with exhaust cursors to ensure the socket is returned.""" - - def __init__(self, sock, more_to_come): - self.sock = sock - self.more_to_come = more_to_come - self.closed = False - self.lock = threading.Lock() - - def update_exhaust(self, more_to_come): - self.more_to_come = more_to_come - - def close(self): - """Return this instance's socket to the connection pool.""" - if not self.closed: - self.closed = True - self.sock.unpin() - self.sock = None - - -_Sort = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_Hint = Union[str, _Sort] - - -if TYPE_CHECKING: - from pymongo.client_session import ClientSession - from pymongo.collection import Collection - - -class Cursor(Generic[_DocumentType]): - """A cursor / iterator over Mongo query results.""" - - _query_class = _Query - _getmore_class = _GetMore - - def __init__( - self, - collection: "Collection[_DocumentType]", - filter: Optional[Mapping[str, Any]] = None, - projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, - skip: int = 0, - limit: int = 0, - no_cursor_timeout: bool = False, - cursor_type: int = CursorType.NON_TAILABLE, - sort: Optional[_Sort] = None, - allow_partial_results: bool = False, - oplog_replay: bool = False, - batch_size: int = 0, - collation: Optional[_CollationIn] = None, - hint: Optional[_Hint] = None, - max_scan: Optional[int] = None, - max_time_ms: Optional[int] = None, - max: Optional[_Sort] = None, - min: Optional[_Sort] = None, - return_key: Optional[bool] = None, - show_record_id: Optional[bool] = None, - snapshot: Optional[bool] = None, - comment: Optional[Any] = None, - session: Optional["ClientSession"] = None, - allow_disk_use: Optional[bool] = None, - let: Optional[bool] = None, - ) -> None: - """Create a new cursor. - - Should not be called directly by application developers - see - :meth:`~pymongo.collection.Collection.find` instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - # Initialize all attributes used in __del__ before possibly raising - # an error to avoid attribute errors during garbage collection. - self.__collection: Collection[_DocumentType] = collection - self.__id: Any = None - self.__exhaust = False - self.__sock_mgr: Any = None - self.__killed = False - self.__session: Optional["ClientSession"] - - if session: - self.__session = session - self.__explicit_session = True - else: - self.__session = None - self.__explicit_session = False - - spec: Mapping[str, Any] = filter or {} - validate_is_mapping("filter", spec) - if not isinstance(skip, int): - raise TypeError("skip must be an instance of int") - if not isinstance(limit, int): - raise TypeError("limit must be an instance of int") - validate_boolean("no_cursor_timeout", no_cursor_timeout) - if no_cursor_timeout and not self.__explicit_session: - warnings.warn( - "use an explicit session with no_cursor_timeout=True " - "otherwise the cursor may still timeout after " - "30 minutes, for more info see " - "https://mongodb.com/docs/v4.4/reference/method/" - "cursor.noCursorTimeout/" - "#session-idle-timeout-overrides-nocursortimeout", - UserWarning, - stacklevel=2, - ) - if cursor_type not in ( - CursorType.NON_TAILABLE, - CursorType.TAILABLE, - CursorType.TAILABLE_AWAIT, - CursorType.EXHAUST, - ): - raise ValueError("not a valid value for cursor_type") - validate_boolean("allow_partial_results", allow_partial_results) - validate_boolean("oplog_replay", oplog_replay) - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - # Only set if allow_disk_use is provided by the user, else None. - if allow_disk_use is not None: - allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) - - if projection is not None: - projection = helpers._fields_list_to_dict(projection, "projection") - - if let is not None: - validate_is_document_type("let", let) - - self.__let = let - self.__spec = spec - self.__has_filter = filter is not None - self.__projection = projection - self.__skip = skip - self.__limit = limit - self.__batch_size = batch_size - self.__ordering = sort and helpers._index_document(sort) or None - self.__max_scan = max_scan - self.__explain = False - self.__comment = comment - self.__max_time_ms = max_time_ms - self.__max_await_time_ms: Optional[int] = None - self.__max: Optional[Union[SON[Any, Any], _Sort]] = max - self.__min: Optional[Union[SON[Any, Any], _Sort]] = min - self.__collation = validate_collation_or_none(collation) - self.__return_key = return_key - self.__show_record_id = show_record_id - self.__allow_disk_use = allow_disk_use - self.__snapshot = snapshot - self.__set_hint(hint) - - # Exhaust cursor support - if cursor_type == CursorType.EXHAUST: - if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are not supported by mongos") - if limit: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__exhaust = True - - # This is ugly. People want to be able to do cursor[5:5] and - # get an empty result set (old behavior was an - # exception). It's hard to do that right, though, because the - # server uses limit(0) to mean 'no limit'. So we set __empty - # in that case and check for it when iterating. We also unset - # it anytime we change __limit. - self.__empty = False - - self.__data: deque = deque() - self.__address = None - self.__retrieved = 0 - - self.__codec_options = collection.codec_options - # Read preference is set when the initial find is sent. - self.__read_preference = None - self.__read_concern = collection.read_concern - - self.__query_flags = cursor_type - if no_cursor_timeout: - self.__query_flags |= _QUERY_OPTIONS["no_timeout"] - if allow_partial_results: - self.__query_flags |= _QUERY_OPTIONS["partial"] - if oplog_replay: - self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] - - # The namespace to use for find/getMore commands. - self.__dbname = collection.database.name - self.__collname = collection.name - - @property - def collection(self) -> "Collection[_DocumentType]": - """The :class:`~pymongo.collection.Collection` that this - :class:`Cursor` is iterating. - """ - return self.__collection - - @property - def retrieved(self) -> int: - """The number of documents retrieved so far.""" - return self.__retrieved - - def __del__(self) -> None: - self.__die() - - def rewind(self) -> "Cursor[_DocumentType]": - """Rewind this cursor to its unevaluated state. - - Reset this cursor if it has been partially or completely evaluated. - Any options that are present on the cursor will remain in effect. - Future iterating performed on this cursor will cause new queries to - be sent to the server, even if the resultant data has already been - retrieved by this cursor. - """ - self.close() - self.__data = deque() - self.__id = None - self.__address = None - self.__retrieved = 0 - self.__killed = False - - return self - - def clone(self) -> "Cursor[_DocumentType]": - """Get a clone of this cursor. - - Returns a new Cursor instance with options matching those that have - been set on the current instance. The clone will be completely - unevaluated, even if the current instance has been partially or - completely evaluated. - """ - return self._clone(True) - - def _clone(self, deepcopy=True, base=None): - """Internal clone helper.""" - if not base: - if self.__explicit_session: - base = self._clone_base(self.__session) - else: - base = self._clone_base(None) - - values_to_clone = ( - "spec", - "projection", - "skip", - "limit", - "max_time_ms", - "max_await_time_ms", - "comment", - "max", - "min", - "ordering", - "explain", - "hint", - "batch_size", - "max_scan", - "query_flags", - "collation", - "empty", - "show_record_id", - "return_key", - "allow_disk_use", - "snapshot", - "exhaust", - "has_filter", - ) - data = dict( - (k, v) - for k, v in self.__dict__.items() - if k.startswith("_Cursor__") and k[9:] in values_to_clone - ) - if deepcopy: - data = self._deepcopy(data) - base.__dict__.update(data) - return base - - def _clone_base(self, session): - """Creates an empty Cursor object for information to be copied into.""" - return self.__class__(self.__collection, session=session) - - def __die(self, synchronous=False): - """Closes this cursor.""" - try: - already_killed = self.__killed - except AttributeError: - # __init__ did not run to completion (or at all). - return - - self.__killed = True - if self.__id and not already_killed: - cursor_id = self.__id - address = _CursorAddress(self.__address, "%s.%s" % (self.__dbname, self.__collname)) - else: - # Skip killCursors. - cursor_id = 0 - address = None - self.__collection.database.client._cleanup_cursor( - synchronous, - cursor_id, - address, - self.__sock_mgr, - self.__session, - self.__explicit_session, - ) - if not self.__explicit_session: - self.__session = None - self.__sock_mgr = None - - def close(self) -> None: - """Explicitly close / kill this cursor.""" - self.__die(True) - - def __query_spec(self): - """Get the spec to use for a query.""" - operators = {} - if self.__ordering: - operators["$orderby"] = self.__ordering - if self.__explain: - operators["$explain"] = True - if self.__hint: - operators["$hint"] = self.__hint - if self.__let: - operators["let"] = self.__let - if self.__comment: - operators["$comment"] = self.__comment - if self.__max_scan: - operators["$maxScan"] = self.__max_scan - if self.__max_time_ms is not None: - operators["$maxTimeMS"] = self.__max_time_ms - if self.__max: - operators["$max"] = self.__max - if self.__min: - operators["$min"] = self.__min - if self.__return_key is not None: - operators["$returnKey"] = self.__return_key - if self.__show_record_id is not None: - # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. - operators["$showDiskLoc"] = self.__show_record_id - if self.__snapshot is not None: - operators["$snapshot"] = self.__snapshot - - if operators: - # Make a shallow copy so we can cleanly rewind or clone. - spec = copy.copy(self.__spec) - - # Allow-listed commands must be wrapped in $query. - if "$query" not in spec: - # $query has to come first - spec = SON([("$query", spec)]) - - if not isinstance(spec, SON): - # Ensure the spec is SON. As order is important this will - # ensure its set before merging in any extra operators. - spec = SON(spec) - - spec.update(operators) - return spec - # Have to wrap with $query if "query" is the first key. - # We can't just use $query anytime "query" is a key as - # that breaks commands like count and find_and_modify. - # Checking spec.keys()[0] covers the case that the spec - # was passed as an instance of SON or OrderedDict. - elif "query" in self.__spec and ( - len(self.__spec) == 1 or next(iter(self.__spec)) == "query" - ): - return SON({"$query": self.__spec}) - - return self.__spec - - def __check_okay_to_chain(self): - """Check if it is okay to chain more options onto this cursor.""" - if self.__retrieved or self.__id is not None: - raise InvalidOperation("cannot set options after executing query") - - def add_option(self, mask: int) -> "Cursor[_DocumentType]": - """Set arbitrary query flags using a bitmask. - - To set the tailable flag: - cursor.add_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["exhaust"]: - if self.__limit: - raise InvalidOperation("Can't use limit and exhaust together.") - if self.__collection.database.client.is_mongos: - raise InvalidOperation("Exhaust cursors are not supported by mongos") - self.__exhaust = True - - self.__query_flags |= mask - return self - - def remove_option(self, mask: int) -> "Cursor[_DocumentType]": - """Unset arbitrary query flags using a bitmask. - - To unset the tailable flag: - cursor.remove_option(2) - """ - if not isinstance(mask, int): - raise TypeError("mask must be an int") - self.__check_okay_to_chain() - - if mask & _QUERY_OPTIONS["exhaust"]: - self.__exhaust = False - - self.__query_flags &= ~mask - return self - - def allow_disk_use(self, allow_disk_use: bool) -> "Cursor[_DocumentType]": - """Specifies whether MongoDB can use temporary disk files while - processing a blocking sort operation. - - Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. - - .. note:: `allow_disk_use` requires server version **>= 4.4** - - :Parameters: - - `allow_disk_use`: if True, MongoDB may use temporary - disk files to store data exceeding the system memory limit while - processing a blocking sort operation. - - .. versionadded:: 3.11 - """ - if not isinstance(allow_disk_use, bool): - raise TypeError("allow_disk_use must be a bool") - self.__check_okay_to_chain() - - self.__allow_disk_use = allow_disk_use - return self - - def limit(self, limit: int) -> "Cursor[_DocumentType]": - """Limits the number of results to be returned by this cursor. - - Raises :exc:`TypeError` if `limit` is not an integer. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. The last `limit` applied to this cursor - takes precedence. A limit of ``0`` is equivalent to no limit. - - :Parameters: - - `limit`: the number of results to return - - .. seealso:: The MongoDB documentation on `limit `_. - """ - if not isinstance(limit, int): - raise TypeError("limit must be an integer") - if self.__exhaust: - raise InvalidOperation("Can't use limit and exhaust together.") - self.__check_okay_to_chain() - - self.__empty = False - self.__limit = limit - return self - - def batch_size(self, batch_size: int) -> "Cursor[_DocumentType]": - """Limits the number of documents returned in one batch. Each batch - requires a round trip to the server. It can be adjusted to optimize - performance and limit data transfer. - - .. note:: batch_size can not override MongoDB's internal limits on the - amount of data it will return to the client in a single batch (i.e - if you set batch size to 1,000,000,000, MongoDB will currently only - return 4-16MB of results per batch). - - Raises :exc:`TypeError` if `batch_size` is not an integer. - Raises :exc:`ValueError` if `batch_size` is less than ``0``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. The last `batch_size` - applied to this cursor takes precedence. - - :Parameters: - - `batch_size`: The size of each batch of results requested. - """ - if not isinstance(batch_size, int): - raise TypeError("batch_size must be an integer") - if batch_size < 0: - raise ValueError("batch_size must be >= 0") - self.__check_okay_to_chain() - - self.__batch_size = batch_size - return self - - def skip(self, skip: int) -> "Cursor[_DocumentType]": - """Skips the first `skip` results of this cursor. - - Raises :exc:`TypeError` if `skip` is not an integer. Raises - :exc:`ValueError` if `skip` is less than ``0``. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has - already been used. The last `skip` applied to this cursor takes - precedence. - - :Parameters: - - `skip`: the number of results to skip - """ - if not isinstance(skip, int): - raise TypeError("skip must be an integer") - if skip < 0: - raise ValueError("skip must be >= 0") - self.__check_okay_to_chain() - - self.__skip = skip - return self - - def max_time_ms(self, max_time_ms: Optional[int]) -> "Cursor[_DocumentType]": - """Specifies a time limit for a query operation. If the specified - time is exceeded, the operation will be aborted and - :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` - is ``None`` no limit is applied. - - Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. - Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` - has already been used. - - :Parameters: - - `max_time_ms`: the time limit after which the operation is aborted - """ - if not isinstance(max_time_ms, int) and max_time_ms is not None: - raise TypeError("max_time_ms must be an integer or None") - self.__check_okay_to_chain() - - self.__max_time_ms = max_time_ms - return self - - def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> "Cursor[_DocumentType]": - """Specifies a time limit for a getMore operation on a - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other - types of cursor max_await_time_ms is ignored. - - Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or - ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. - - .. note:: `max_await_time_ms` requires server version **>= 3.2** - - :Parameters: - - `max_await_time_ms`: the time limit after which the operation is - aborted - - .. versionadded:: 3.2 - """ - if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: - raise TypeError("max_await_time_ms must be an integer or None") - self.__check_okay_to_chain() - - # Ignore max_await_time_ms if not tailable or await_data is False. - if self.__query_flags & CursorType.TAILABLE_AWAIT: - self.__max_await_time_ms = max_await_time_ms - - return self - - @overload - def __getitem__(self, index: int) -> _DocumentType: - ... - - @overload - def __getitem__(self, index: slice) -> "Cursor[_DocumentType]": - ... - - def __getitem__(self, index): - """Get a single document or a slice of documents from this cursor. - - .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each - index access or slice requires that a new query be run using skip - and limit. Do not iterate the cursor using index accesses. - The following example is **extremely inefficient** and may return - surprising results:: - - cursor = db.collection.find() - # Warning: This runs a new query for each document. - # Don't do this! - for idx in range(10): - print(cursor[idx]) - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. - - To get a single document use an integral index, e.g.:: - - >>> db.test.find()[50] - - An :class:`IndexError` will be raised if the index is negative - or greater than the amount of documents in this cursor. Any - limit previously applied to this cursor will be ignored. - - To get a slice of documents use a slice index, e.g.:: - - >>> db.test.find()[20:25] - - This will return this cursor with a limit of ``5`` and skip of - ``20`` applied. Using a slice index will override any prior - limits or skips applied to this cursor (including those - applied through previous calls to this method). Raises - :class:`IndexError` when the slice has a step, a negative - start value, or a stop value less than or equal to the start - value. - - :Parameters: - - `index`: An integer or slice index to be applied to this cursor - """ - self.__check_okay_to_chain() - self.__empty = False - if isinstance(index, slice): - if index.step is not None: - raise IndexError("Cursor instances do not support slice steps") - - skip = 0 - if index.start is not None: - if index.start < 0: - raise IndexError("Cursor instances do not support negative indices") - skip = index.start - - if index.stop is not None: - limit = index.stop - skip - if limit < 0: - raise IndexError( - "stop index must be greater than start index for slice %r" % index - ) - if limit == 0: - self.__empty = True - else: - limit = 0 - - self.__skip = skip - self.__limit = limit - return self - - if isinstance(index, int): - if index < 0: - raise IndexError("Cursor instances do not support negative indices") - clone = self.clone() - clone.skip(index + self.__skip) - clone.limit(-1) # use a hard limit - clone.__query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 - for doc in clone: - return doc - raise IndexError("no such item for Cursor instance") - raise TypeError("index %r cannot be applied to Cursor instances" % index) - - def max_scan(self, max_scan: Optional[int]) -> "Cursor[_DocumentType]": - """**DEPRECATED** - Limit the number of documents to scan when - performing the query. - - Raises :class:`~pymongo.errors.InvalidOperation` if this - cursor has already been used. Only the last :meth:`max_scan` - applied to this cursor has any effect. - - :Parameters: - - `max_scan`: the maximum number of documents to scan - - .. versionchanged:: 3.7 - Deprecated :meth:`max_scan`. Support for this option is deprecated in - MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side - execution time. - """ - self.__check_okay_to_chain() - self.__max_scan = max_scan - return self - - def max(self, spec: _Sort) -> "Cursor[_DocumentType]": - """Adds ``max`` operator that specifies upper bound for specific index. - - When using ``max``, :meth:`~hint` should also be configured to ensure - the query uses the expected index and starting in MongoDB 4.2 - :meth:`~hint` will be required. - - :Parameters: - - `spec`: a list of field, limit pairs specifying the exclusive - upper bound for all keys of a specific index in order. - - .. versionchanged:: 3.8 - Deprecated cursors that use ``max`` without a :meth:`~hint`. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__max = SON(spec) - return self - - def min(self, spec: _Sort) -> "Cursor[_DocumentType]": - """Adds ``min`` operator that specifies lower bound for specific index. - - When using ``min``, :meth:`~hint` should also be configured to ensure - the query uses the expected index and starting in MongoDB 4.2 - :meth:`~hint` will be required. - - :Parameters: - - `spec`: a list of field, limit pairs specifying the inclusive - lower bound for all keys of a specific index in order. - - .. versionchanged:: 3.8 - Deprecated cursors that use ``min`` without a :meth:`~hint`. - - .. versionadded:: 2.7 - """ - if not isinstance(spec, (list, tuple)): - raise TypeError("spec must be an instance of list or tuple") - - self.__check_okay_to_chain() - self.__min = SON(spec) - return self - - def sort( - self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None - ) -> "Cursor[_DocumentType]": - """Sorts this cursor's results. - - Pass a field name and a direction, either - :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: - - for doc in collection.find().sort('field', pymongo.ASCENDING): - print(doc) - - To sort by multiple fields, pass a list of (key, direction) pairs:: - - for doc in collection.find().sort([ - ('field1', pymongo.ASCENDING), - ('field2', pymongo.DESCENDING)]): - print(doc) - - Text search results can be sorted by relevance:: - - cursor = db.test.find( - {'$text': {'$search': 'some words'}}, - {'score': {'$meta': 'textScore'}}) - - # Sort by 'score' field. - cursor.sort([('score', {'$meta': 'textScore'})]) - - for doc in cursor: - print(doc) - - For more advanced text search functionality, see MongoDB's - `Atlas Search `_. - - Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. Only the last :meth:`sort` applied to this - cursor has any effect. - - :Parameters: - - `key_or_list`: a single key or a list of (key, direction) - pairs specifying the keys to sort on - - `direction` (optional): only used if `key_or_list` is a single - key, if not given :data:`~pymongo.ASCENDING` is assumed - """ - self.__check_okay_to_chain() - keys = helpers._index_list(key_or_list, direction) - self.__ordering = helpers._index_document(keys) - return self - - def distinct(self, key: str) -> List: - """Get a list of distinct values for `key` among all documents - in the result set of this query. - - Raises :class:`TypeError` if `key` is not an instance of - :class:`basestring` (:class:`str` in python 3). - - The :meth:`distinct` method obeys the - :attr:`~pymongo.collection.Collection.read_preference` of the - :class:`~pymongo.collection.Collection` instance on which - :meth:`~pymongo.collection.Collection.find` was called. - - :Parameters: - - `key`: name of key for which we want to get the distinct values - - .. seealso:: :meth:`pymongo.collection.Collection.distinct` - """ - options: Dict[str, Any] = {} - if self.__spec: - options["query"] = self.__spec - if self.__max_time_ms is not None: - options["maxTimeMS"] = self.__max_time_ms - if self.__comment: - options["comment"] = self.__comment - if self.__collation is not None: - options["collation"] = self.__collation - - return self.__collection.distinct(key, session=self.__session, **options) - - def explain(self) -> _DocumentType: - """Returns an explain plan record for this cursor. - - .. note:: This method uses the default verbosity mode of the - `explain command - `_, - ``allPlansExecution``. To use a different verbosity use - :meth:`~pymongo.database.Database.command` to run the explain - command directly. - - .. seealso:: The MongoDB documentation on `explain `_. - """ - c = self.clone() - c.__explain = True - - # always use a hard limit for explains - if c.__limit: - c.__limit = -abs(c.__limit) - return next(c) - - def __set_hint(self, index): - if index is None: - self.__hint = None - return - - if isinstance(index, str): - self.__hint = index - else: - self.__hint = helpers._index_document(index) - - def hint(self, index: Optional[_Hint]) -> "Cursor[_DocumentType]": - """Adds a 'hint', telling Mongo the proper index to use for the query. - - Judicious use of hints can greatly improve query - performance. When doing a query on multiple fields (at least - one of which is indexed) pass the indexed field as a hint to - the query. Raises :class:`~pymongo.errors.OperationFailure` if the - provided hint requires an index that does not exist on this collection, - and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has - already been used. - - `index` should be an index as passed to - :meth:`~pymongo.collection.Collection.create_index` - (e.g. ``[('field', ASCENDING)]``) or the name of the index. - If `index` is ``None`` any existing hint for this query is - cleared. The last hint applied to this cursor takes precedence - over all others. - - :Parameters: - - `index`: index to hint on (as an index specifier) - """ - self.__check_okay_to_chain() - self.__set_hint(index) - return self - - def comment(self, comment: Any) -> "Cursor[_DocumentType]": - """Adds a 'comment' to the cursor. - - http://mongodb.com/docs/manual/reference/operator/comment/ - - :Parameters: - - `comment`: A string to attach to the query to help interpret and - trace the operation in the server logs and in profile data. - - .. versionadded:: 2.7 - """ - self.__check_okay_to_chain() - self.__comment = comment - return self - - def where(self, code: Union[str, Code]) -> "Cursor[_DocumentType]": - """Adds a `$where`_ clause to this query. - - The `code` argument must be an instance of :class:`basestring` - (:class:`str` in python 3) or :class:`~bson.code.Code` - containing a JavaScript expression. This expression will be - evaluated for each document scanned. Only those documents - for which the expression evaluates to *true* will be returned - as results. The keyword *this* refers to the object currently - being scanned. For example:: - - # Find all documents where field "a" is less than "b" plus "c". - for doc in db.test.find().where('this.a < (this.b + this.c)'): - print(doc) - - Raises :class:`TypeError` if `code` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidOperation` if this - :class:`Cursor` has already been used. Only the last call to - :meth:`where` applied to a :class:`Cursor` has any effect. - - .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` - with scope variables. Consider using `$expr`_ instead. - - :Parameters: - - `code`: JavaScript expression to use as a filter - - .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ - .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ - """ - self.__check_okay_to_chain() - if not isinstance(code, Code): - code = Code(code) - - # Avoid overwriting a filter argument that was given by the user - # when updating the spec. - spec: Dict[str, Any] - if self.__has_filter: - spec = dict(self.__spec) - else: - spec = cast(Dict, self.__spec) - spec["$where"] = code - self.__spec = spec - return self - - def collation(self, collation: Optional[_CollationIn]) -> "Cursor[_DocumentType]": - """Adds a :class:`~pymongo.collation.Collation` to this query. - - Raises :exc:`TypeError` if `collation` is not an instance of - :class:`~pymongo.collation.Collation` or a ``dict``. Raises - :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has - already been used. Only the last collation applied to this cursor has - any effect. - - :Parameters: - - `collation`: An instance of :class:`~pymongo.collation.Collation`. - """ - self.__check_okay_to_chain() - self.__collation = validate_collation_or_none(collation) - return self - - def __send_message(self, operation): - """Send a query or getmore operation and handles the response. - - If operation is ``None`` this is an exhaust cursor, which reads - the next result batch off the exhaust socket instead of - sending getMore messages to the server. - - Can raise ConnectionFailure. - """ - client = self.__collection.database.client - # OP_MSG is required to support exhaust cursors with encryption. - if client._encrypter and self.__exhaust: - raise InvalidOperation("exhaust cursors do not support auto encryption") - - try: - response = client._run_operation( - operation, self._unpack_response, address=self.__address - ) - except OperationFailure as exc: - if exc.code in _CURSOR_CLOSED_ERRORS or self.__exhaust: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - self.close() - # If this is a tailable cursor the error is likely - # due to capped collection roll over. Setting - # self.__killed to True ensures Cursor.alive will be - # False. No need to re-raise. - if ( - exc.code in _CURSOR_CLOSED_ERRORS - and self.__query_flags & _QUERY_OPTIONS["tailable_cursor"] - ): - return - raise - except ConnectionFailure: - # Don't send killCursors because the cursor is already closed. - self.__killed = True - self.close() - raise - except Exception: - self.close() - raise - - self.__address = response.address - if isinstance(response, PinnedResponse): - if not self.__sock_mgr: - self.__sock_mgr = _SocketManager(response.socket_info, response.more_to_come) - - cmd_name = operation.name - docs = response.docs - if response.from_command: - if cmd_name != "explain": - cursor = docs[0]["cursor"] - self.__id = cursor["id"] - if cmd_name == "find": - documents = cursor["firstBatch"] - # Update the namespace used for future getMore commands. - ns = cursor.get("ns") - if ns: - self.__dbname, self.__collname = ns.split(".", 1) - else: - documents = cursor["nextBatch"] - self.__data = deque(documents) - self.__retrieved += len(documents) - else: - self.__id = 0 - self.__data = deque(docs) - self.__retrieved += len(docs) - else: - self.__id = response.data.cursor_id - self.__data = deque(docs) - self.__retrieved += response.data.number_returned - - if self.__id == 0: - # Don't wait for garbage collection to call __del__, return the - # socket and the session to the pool now. - self.close() - - if self.__limit and self.__id and self.__limit <= self.__retrieved: - self.close() - - def _unpack_response( - self, response, cursor_id, codec_options, user_fields=None, legacy_response=False - ): - return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) - - def _read_preference(self): - if self.__read_preference is None: - # Save the read preference for getMore commands. - self.__read_preference = self.__collection._read_preference_for(self.session) - return self.__read_preference - - def _refresh(self): - """Refreshes the cursor with more data from Mongo. - - Returns the length of self.__data after refresh. Will exit early if - self.__data is already non-empty. Raises OperationFailure when the - cursor cannot be refreshed due to an error on the query. - """ - if len(self.__data) or self.__killed: - return len(self.__data) - - if not self.__session: - self.__session = self.__collection.database.client._ensure_session() - - if self.__id is None: # Query - if (self.__min or self.__max) and not self.__hint: - raise InvalidOperation( - "Passing a 'hint' is required when using the min/max query" - " option to ensure the query utilizes the correct index" - ) - q = self._query_class( - self.__query_flags, - self.__collection.database.name, - self.__collection.name, - self.__skip, - self.__query_spec(), - self.__projection, - self.__codec_options, - self._read_preference(), - self.__limit, - self.__batch_size, - self.__read_concern, - self.__collation, - self.__session, - self.__collection.database.client, - self.__allow_disk_use, - self.__exhaust, - ) - self.__send_message(q) - elif self.__id: # Get More - if self.__limit: - limit = self.__limit - self.__retrieved - if self.__batch_size: - limit = min(limit, self.__batch_size) - else: - limit = self.__batch_size - # Exhaust cursors don't send getMore messages. - g = self._getmore_class( - self.__dbname, - self.__collname, - limit, - self.__id, - self.__codec_options, - self._read_preference(), - self.__session, - self.__collection.database.client, - self.__max_await_time_ms, - self.__sock_mgr, - self.__exhaust, - self.__comment, - ) - self.__send_message(g) - - return len(self.__data) - - @property - def alive(self) -> bool: - """Does this cursor have the potential to return more data? - - This is mostly useful with `tailable cursors - `_ - since they will stop iterating even though they *may* return more - results in the future. - - With regular cursors, simply use a for loop instead of :attr:`alive`:: - - for doc in collection.find(): - print(doc) - - .. note:: Even if :attr:`alive` is True, :meth:`next` can raise - :exc:`StopIteration`. :attr:`alive` can also be True while iterating - a cursor from a failed server. In this case :attr:`alive` will - return False after :meth:`next` fails to retrieve the next batch - of results from the server. - """ - return bool(len(self.__data) or (not self.__killed)) - - @property - def cursor_id(self) -> Optional[int]: - """Returns the id of the cursor - - .. versionadded:: 2.2 - """ - return self.__id - - @property - def address(self) -> Optional[Tuple[str, Any]]: - """The (host, port) of the server used, or None. - - .. versionchanged:: 3.0 - Renamed from "conn_id". - """ - return self.__address - - @property - def session(self) -> Optional["ClientSession"]: - """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. - - .. versionadded:: 3.6 - """ - if self.__explicit_session: - return self.__session - return None - - def __iter__(self) -> "Cursor[_DocumentType]": - return self - - def next(self) -> _DocumentType: - """Advance the cursor.""" - if self.__empty: - raise StopIteration - if len(self.__data) or self._refresh(): - return self.__data.popleft() - else: - raise StopIteration - - __next__ = next - - def __enter__(self) -> "Cursor[_DocumentType]": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - def __copy__(self) -> "Cursor[_DocumentType]": - """Support function for `copy.copy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=False) - - def __deepcopy__(self, memo: Any) -> Any: - """Support function for `copy.deepcopy()`. - - .. versionadded:: 2.4 - """ - return self._clone(deepcopy=True) - - def _deepcopy(self, x, memo=None): - """Deepcopy helper for the data dictionary or list. - - Regular expressions cannot be deep copied but as they are immutable we - don't have to copy them when cloning. - """ - y: Any - if not hasattr(x, "items"): - y, is_list, iterator = [], True, enumerate(x) - else: - y, is_list, iterator = {}, False, x.items() - - if memo is None: - memo = {} - val_id = id(x) - if val_id in memo: - return memo.get(val_id) - memo[val_id] = y - - for key, value in iterator: - if isinstance(value, (dict, list)) and not isinstance(value, SON): - value = self._deepcopy(value, memo) - elif not isinstance(value, RE_TYPE): - value = copy.deepcopy(value, memo) - - if is_list: - y.append(value) - else: - if not isinstance(key, RE_TYPE): - key = copy.deepcopy(key, memo) - y[key] = value - return y - - -class RawBatchCursor(Cursor, Generic[_DocumentType]): - """A cursor / iterator over raw batches of BSON data from a query result.""" - - _query_class = _RawBatchQuery - _getmore_class = _RawBatchGetMore - - def __init__(self, collection: "Collection[_DocumentType]", *args: Any, **kwargs: Any) -> None: - """Create a new cursor / iterator over raw batches of BSON data. - - Should not be called directly by application developers - - see :meth:`~pymongo.collection.Collection.find_raw_batches` - instead. - - .. seealso:: The MongoDB documentation on `cursors `_. - """ - super(RawBatchCursor, self).__init__(collection, *args, **kwargs) - - def _unpack_response( - self, response, cursor_id, codec_options, user_fields=None, legacy_response=False - ): - raw_response = response.raw_response(cursor_id, user_fields=user_fields) - if not legacy_response: - # OP_MSG returns firstBatch/nextBatch documents as a BSON array - # Re-assemble the array of documents into a document stream - _convert_raw_document_lists_to_streams(raw_response[0]) - return raw_response - - def explain(self) -> _DocumentType: - """Returns an explain plan record for this cursor. - - .. seealso:: The MongoDB documentation on `explain `_. - """ - clone = self._clone(deepcopy=True, base=Cursor(self.collection)) - return clone.explain() - - def __getitem__(self, index: Any) -> NoReturn: - raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") +__doc__ = original_doc +__all__ = ["Cursor", "CursorType", "RawBatchCursor"] # noqa: F405 diff --git a/pymongo/cursor_shared.py b/pymongo/cursor_shared.py new file mode 100644 index 0000000000..de6126c4fb --- /dev/null +++ b/pymongo/cursor_shared.py @@ -0,0 +1,94 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants and types shared across all cursor classes.""" +from __future__ import annotations + +from typing import Any, Mapping, Sequence, Tuple, Union + +# These errors mean that the server has already killed the cursor so there is +# no need to send killCursors. +_CURSOR_CLOSED_ERRORS = frozenset( + [ + 43, # CursorNotFound + 175, # QueryPlanKilled + 237, # CursorKilled + # On a tailable cursor, the following errors mean the capped collection + # rolled over. + # MongoDB 2.6: + # {'$err': 'Runner killed during getMore', 'code': 28617, 'ok': 0} + 28617, + # MongoDB 3.0: + # {'$err': 'getMore executor error: UnknownError no details available', + # 'code': 17406, 'ok': 0} + 17406, + # MongoDB 3.2 + 3.4: + # {'ok': 0.0, 'errmsg': 'GetMore command executor error: + # CappedPositionLost: CollectionScan died due to failure to restore + # tailable cursor position. Last seen record id: RecordId(3)', + # 'code': 96} + 96, + # MongoDB 3.6+: + # {'ok': 0.0, 'errmsg': 'errmsg: "CollectionScan died due to failure to + # restore tailable cursor position. Last seen record id: RecordId(3)"', + # 'code': 136, 'codeName': 'CappedPositionLost'} + 136, + ] +) + +_QUERY_OPTIONS = { + "tailable_cursor": 2, + "secondary_okay": 4, + "oplog_replay": 8, + "no_timeout": 16, + "await_data": 32, + "exhaust": 64, + "partial": 128, +} + + +class CursorType: + NON_TAILABLE = 0 + """The standard cursor type.""" + + TAILABLE = _QUERY_OPTIONS["tailable_cursor"] + """The tailable cursor type. + + Tailable cursors are only for use with capped collections. They are not + closed when the last data is retrieved but are kept open and the cursor + location marks the final document position. If more data is received + iteration of the cursor will continue from the last document received. + """ + + TAILABLE_AWAIT = TAILABLE | _QUERY_OPTIONS["await_data"] + """A tailable cursor with the await option set. + + Creates a tailable cursor that will wait for a few seconds after returning + the full result set so that it can capture and return additional data added + during the query. + """ + + EXHAUST = _QUERY_OPTIONS["exhaust"] + """An exhaust cursor. + + MongoDB will stream batched results to the client without waiting for the + client to request each batch, reducing latency. + """ + + +_Sort = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_Hint = Union[str, _Sort] diff --git a/pymongo/daemon.py b/pymongo/daemon.py index 4fdf147a59..c0a01db16d 100644 --- a/pymongo/daemon.py +++ b/pymongo/daemon.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -16,20 +16,22 @@ PyMongo only attempts to spawn the mongocryptd daemon process when automatic client-side field level encryption is enabled. See -:ref:`automatic-client-side-encryption` for more info. +`Client-side Field Level Encryption `_ for more info. """ +from __future__ import annotations import os import subprocess import sys import warnings +from typing import Any, Optional, Sequence # The maximum amount of time to wait for the intermediate subprocess. _WAIT_TIMEOUT = 10 _THIS_FILE = os.path.realpath(__file__) -def _popen_wait(popen, timeout): +def _popen_wait(popen: subprocess.Popen[Any], timeout: Optional[float]) -> Optional[int]: """Implement wait timeout support for Python 3.""" try: return popen.wait(timeout=timeout) @@ -38,7 +40,7 @@ def _popen_wait(popen, timeout): return None -def _silence_resource_warning(popen): +def _silence_resource_warning(popen: Optional[subprocess.Popen[Any]]) -> None: """Silence Popen's ResourceWarning. Note this should only be used if the process was created as a daemon. @@ -56,12 +58,12 @@ def _silence_resource_warning(popen): # On Windows we spawn the daemon process simply by using DETACHED_PROCESS. _DETACHED_PROCESS = getattr(subprocess, "DETACHED_PROCESS", 0x00000008) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Windows).""" try: with open(os.devnull, "r+b") as devnull: popen = subprocess.Popen( - args, + args, # noqa: S603 creationflags=_DETACHED_PROCESS, stdin=devnull, stderr=devnull, @@ -87,12 +89,16 @@ def _spawn_daemon(args): # to be safe to call from any thread. Using Popen instead of fork also # avoids triggering the application's os.register_at_fork() callbacks when # we spawn the mongocryptd daemon process. - def _spawn(args): + def _spawn(args: Sequence[str]) -> Optional[subprocess.Popen[Any]]: """Spawn the process and silence stdout/stderr.""" try: with open(os.devnull, "r+b") as devnull: return subprocess.Popen( - args, close_fds=True, stdin=devnull, stderr=devnull, stdout=devnull + args, # noqa: S603 + close_fds=True, + stdin=devnull, + stderr=devnull, + stdout=devnull, ) except FileNotFoundError as exc: warnings.warn( @@ -100,17 +106,18 @@ def _spawn(args): RuntimeWarning, stacklevel=2, ) + return None - def _spawn_daemon_double_popen(args): + def _spawn_daemon_double_popen(args: Sequence[str]) -> None: """Spawn a daemon process using a double subprocess.Popen.""" spawner_args = [sys.executable, _THIS_FILE] spawner_args.extend(args) - temp_proc = subprocess.Popen(spawner_args, close_fds=True) + temp_proc = subprocess.Popen(spawner_args, close_fds=True) # noqa: S603 # Reap the intermediate child process to avoid creating zombie # processes. _popen_wait(temp_proc, _WAIT_TIMEOUT) - def _spawn_daemon(args): + def _spawn_daemon(args: Sequence[str]) -> None: """Spawn a daemon process (Unix).""" # "If Python is unable to retrieve the real path to its executable, # sys.executable will be an empty string or None". diff --git a/pymongo/database.py b/pymongo/database.py index 4f87a58dda..f85b312f91 100644 --- a/pymongo/database.py +++ b/pymongo/database.py @@ -1,10 +1,10 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,1191 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Database level operations.""" -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generic, - List, - Mapping, - MutableMapping, - NoReturn, - Optional, - Sequence, - TypeVar, - Union, - cast, -) +"""Re-import of synchronous Database API for compatibility.""" +from __future__ import annotations -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions -from bson.dbref import DBRef -from bson.son import SON -from bson.timestamp import Timestamp -from pymongo import _csot, common -from pymongo.aggregation import _DatabaseAggregationCommand -from pymongo.change_stream import DatabaseChangeStream -from pymongo.collection import Collection -from pymongo.command_cursor import CommandCursor -from pymongo.common import _ecc_coll_name, _ecoc_coll_name, _esc_coll_name -from pymongo.errors import CollectionInvalid, InvalidName -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.synchronous.database import * # noqa: F403 +from pymongo.synchronous.database import __doc__ as original_doc - -def _check_name(name): - """Check if a database name is valid.""" - if not name: - raise InvalidName("database name cannot be the empty string") - - for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: - if invalid_char in name: - raise InvalidName("database names cannot contain the character %r" % invalid_char) - - -if TYPE_CHECKING: - import bson.codec_options - from pymongo.client_session import ClientSession - from pymongo.mongo_client import MongoClient - from pymongo.read_concern import ReadConcern - from pymongo.write_concern import WriteConcern - - -_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) - - -class Database(common.BaseObject, Generic[_DocumentType]): - """A Mongo database.""" - - def __init__( - self, - client: "MongoClient[_DocumentType]", - name: str, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> None: - """Get a database by client and name. - - Raises :class:`TypeError` if `name` is not an instance of - :class:`basestring` (:class:`str` in python 3). Raises - :class:`~pymongo.errors.InvalidName` if `name` is not a valid - database name. - - :Parameters: - - `client`: A :class:`~pymongo.mongo_client.MongoClient` instance. - - `name`: The database name. - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) client.codec_options is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) client.read_preference is used. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) client.write_concern is used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) client.read_concern is used. - - .. seealso:: The MongoDB documentation on `databases `_. - - .. versionchanged:: 4.0 - Removed the eval, system_js, error, last_status, previous_error, - reset_error_history, authenticate, logout, collection_names, - current_op, add_user, remove_user, profiling_level, - set_profiling_level, and profiling_info methods. - See the :ref:`pymongo4-migration-guide`. - - .. versionchanged:: 3.2 - Added the read_concern option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - :class:`~pymongo.database.Database` no longer returns an instance - of :class:`~pymongo.collection.Collection` for attribute names - with leading underscores. You must use dict-style lookups instead:: - - db['__my_collection__'] - - Not: - - db.__my_collection__ - """ - super(Database, self).__init__( - codec_options or client.codec_options, - read_preference or client.read_preference, - write_concern or client.write_concern, - read_concern or client.read_concern, - ) - - if not isinstance(name, str): - raise TypeError("name must be an instance of str") - - if name != "$external": - _check_name(name) - - self.__name = name - self.__client: MongoClient[_DocumentType] = client - self._timeout = client.options.timeout - - @property - def client(self) -> "MongoClient[_DocumentType]": - """The client instance for this :class:`Database`.""" - return self.__client - - @property - def name(self) -> str: - """The name of this :class:`Database`.""" - return self.__name - - def with_options( - self, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> "Database[_DocumentType]": - """Get a clone of this database changing the specified settings. - - >>> db1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> db2 = db1.with_options(read_preference=ReadPreference.SECONDARY) - >>> db1.read_preference - Primary() - >>> db2.read_preference - Secondary(tag_sets=None) - - :Parameters: - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Collection` - is used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Collection` is used. See :mod:`~pymongo.read_preferences` - for options. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Collection` - is used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Collection` - is used. - - .. versionadded:: 3.8 - """ - return Database( - self.client, - self.__name, - codec_options or self.codec_options, - read_preference or self.read_preference, - write_concern or self.write_concern, - read_concern or self.read_concern, - ) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Database): - return self.__client == other.client and self.__name == other.name - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash((self.__client, self.__name)) - - def __repr__(self): - return "Database(%r, %r)" % (self.__client, self.__name) - - def __getattr__(self, name: str) -> Collection[_DocumentType]: - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - if name.startswith("_"): - raise AttributeError( - "Database has no attribute %r. To access the %s" - " collection, use database[%r]." % (name, name, name) - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> "Collection[_DocumentType]": - """Get a collection of this database by name. - - Raises InvalidName if an invalid collection name is used. - - :Parameters: - - `name`: the name of the collection to get - """ - return Collection(self, name) - - def get_collection( - self, - name: str, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> Collection[_DocumentType]: - """Get a :class:`~pymongo.collection.Collection` with the given name - and options. - - Useful for creating a :class:`~pymongo.collection.Collection` with - different codec options, read preference, and/or write concern from - this :class:`Database`. - - >>> db.read_preference - Primary() - >>> coll1 = db.test - >>> coll1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> coll2 = db.get_collection( - ... 'test', read_preference=ReadPreference.SECONDARY) - >>> coll2.read_preference - Secondary(tag_sets=None) - - :Parameters: - - `name`: The name of the collection - a string. - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Database` is - used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Database` is used. See :mod:`~pymongo.read_preferences` - for options. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Database` is - used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Database` is - used. - """ - return Collection( - self, - name, - False, - codec_options, - read_preference, - write_concern, - read_concern, - ) - - @_csot.apply - def create_collection( - self, - name: str, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional["WriteConcern"] = None, - read_concern: Optional["ReadConcern"] = None, - session: Optional["ClientSession"] = None, - check_exists: Optional[bool] = True, - **kwargs: Any, - ) -> Collection[_DocumentType]: - """Create a new :class:`~pymongo.collection.Collection` in this - database. - - Normally collection creation is automatic. This method should - only be used to specify options on - creation. :class:`~pymongo.errors.CollectionInvalid` will be - raised if the collection already exists. - - :Parameters: - - `name`: the name of the collection to create - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`Database` is - used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`Database` is used. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`Database` is - used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`Database` is - used. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - ``check_exists`` (optional): if True (the default), send a listCollections command to - check if the collection already exists before creation. - - `**kwargs` (optional): additional keyword arguments will - be passed as options for the `create collection command`_ - - All optional `create collection command`_ parameters should be passed - as keyword arguments to this method. Valid options include, but are not - limited to: - - - ``size`` (int): desired initial size for the collection (in - bytes). For capped collections this size is the max - size of the collection. - - ``capped`` (bool): if True, this is a capped collection - - ``max`` (int): maximum number of objects if capped (optional) - - ``timeseries`` (dict): a document specifying configuration options for - timeseries collections - - ``expireAfterSeconds`` (int): the number of seconds after which a - document in a timeseries collection expires - - ``validator`` (dict): a document specifying validation rules or expressions - for the collection - - ``validationLevel`` (str): how strictly to apply the - validation rules to existing documents during an update. The default level - is "strict" - - ``validationAction`` (str): whether to "error" on invalid documents - (the default) or just "warn" about the violations but allow invalid - documents to be inserted - - ``indexOptionDefaults`` (dict): a document specifying a default configuration - for indexes when creating a collection - - ``viewOn`` (str): the name of the source collection or view from which - to create the view - - ``pipeline`` (list): a list of aggregation pipeline stages - - ``comment`` (str): a user-provided comment to attach to this command. - This option is only supported on MongoDB >= 4.4. - - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - } - - ``clusteredIndex`` (dict): Document that specifies the clustered index - configuration. It must have the following form:: - - { - // key pattern must be {_id: 1} - key: , // required - unique: , // required, must be ‘true’ - name: , // optional, otherwise automatically generated - v: , // optional, must be ‘2’ if provided - } - - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for - enabling pre- and post-images. - - .. versionchanged:: 4.2 - Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. - - .. versionchanged:: 3.11 - This method is now supported inside multi-document transactions - with MongoDB 4.4+. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Added the collation option. - - .. versionchanged:: 3.0 - Added the codec_options, read_preference, and write_concern options. - - .. _create collection command: - https://mongodb.com/docs/manual/reference/command/create - """ - encrypted_fields = kwargs.get("encryptedFields") - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - "%s.%s" % (self.name, name) - ) - kwargs["encryptedFields"] = encrypted_fields - - if encrypted_fields: - common.validate_is_mapping("encryptedFields", encrypted_fields) - - clustered_index = kwargs.get("clusteredIndex") - if clustered_index: - common.validate_is_mapping("clusteredIndex", clustered_index) - - with self.__client._tmp_session(session) as s: - # Skip this check in a transaction where listCollections is not - # supported. - if ( - check_exists - and (not s or not s.in_transaction) - and name in self.list_collection_names(filter={"name": name}, session=s) - ): - raise CollectionInvalid("collection %s already exists" % name) - return Collection( - self, - name, - True, - codec_options, - read_preference, - write_concern, - read_concern, - session=s, - **kwargs, - ) - - def aggregate( - self, pipeline: _Pipeline, session: Optional["ClientSession"] = None, **kwargs: Any - ) -> CommandCursor[_DocumentType]: - """Perform a database-level aggregation. - - See the `aggregation pipeline`_ documentation for a list of stages - that are supported. - - .. code-block:: python - - # Lists all operations currently running on the server. - with client.admin.aggregate([{"$currentOp": {}}]) as cursor: - for operation in cursor: - print(operation) - - The :meth:`aggregate` method obeys the :attr:`read_preference` of this - :class:`Database`, except when ``$out`` or ``$merge`` are used, in - which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` - is used. - - .. note:: This method does not support the 'explain' option. Please - use :meth:`~pymongo.database.Database.command` instead. - - .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this collection is automatically applied to this operation. - - :Parameters: - - `pipeline`: a list of aggregation pipeline stages - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `**kwargs` (optional): extra `aggregate command`_ parameters. - - All optional `aggregate command`_ parameters should be passed as - keyword arguments to this method. Valid options include, but are not - limited to: - - - `allowDiskUse` (bool): Enables writing to temporary files. When set - to True, aggregation stages can write data to the _tmp subdirectory - of the --dbpath directory. The default is False. - - `maxTimeMS` (int): The maximum amount of time to allow the operation - to run in milliseconds. - - `batchSize` (int): The maximum number of documents to return per - batch. Ignored if the connected mongod or mongos does not support - returning aggregate results using a cursor. - - `collation` (optional): An instance of - :class:`~pymongo.collation.Collation`. - - `let` (dict): A dict of parameter names and values. Values must be - constant or closed expressions that do not reference document - fields. Parameters can then be accessed as variables in an - aggregate expression context (e.g. ``"$$var"``). This option is - only supported on MongoDB >= 5.0. - - :Returns: - A :class:`~pymongo.command_cursor.CommandCursor` over the result - set. - - .. versionadded:: 3.9 - - .. _aggregation pipeline: - https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline - - .. _aggregate command: - https://mongodb.com/docs/manual/reference/command/aggregate - """ - with self.client._tmp_session(session, close=False) as s: - cmd = _DatabaseAggregationCommand( - self, - CommandCursor, - pipeline, - kwargs, - session is not None, - user_fields={"cursor": {"firstBatch": 1}}, - ) - return self.client._retryable_read( - cmd.get_cursor, cmd.get_read_preference(s), s, retryable=not cmd._performs_write - ) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional["ClientSession"] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - ) -> DatabaseChangeStream[_DocumentType]: - """Watch changes on this database. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which - iterates over changes on all collections in this database. - - Introduced in MongoDB 4.0. - - .. code-block:: python - - with db.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with db.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error('...') - - For a precise description of the resume process see the - `change streams specification`_. - - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - - `batch_size` (optional): The maximum number of documents to return - per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` - to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionadded:: 3.7 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst - """ - return DatabaseChangeStream( - self, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - ) - - def _command( - self, - sock_info, - command, - value=1, - check=True, - allowable_errors=None, - read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, - write_concern=None, - parse_write_concern_error=False, - session=None, - **kwargs, - ): - """Internal command helper.""" - if isinstance(command, str): - command = SON([(command, value)]) - - command.update(kwargs) - with self.__client._tmp_session(session) as s: - return sock_info.command( - self.__name, - command, - read_preference, - codec_options, - check, - allowable_errors, - write_concern=write_concern, - parse_write_concern_error=parse_write_concern_error, - session=s, - client=self.__client, - ) - - @_csot.apply - def command( - self, - command: Union[str, MutableMapping[str, Any]], - value: Any = 1, - check: bool = True, - allowable_errors: Optional[Sequence[Union[str, int]]] = None, - read_preference: Optional[_ServerMode] = None, - codec_options: "Optional[bson.codec_options.CodecOptions[_CodecDocumentType]]" = None, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> _CodecDocumentType: - """Issue a MongoDB command. - - Send command `command` to the database and return the - response. If `command` is an instance of :class:`basestring` - (:class:`str` in python 3) then the command {`command`: `value`} - will be sent. Otherwise, `command` must be an instance of - :class:`dict` and will be sent as is. - - Any additional keyword arguments will be added to the final - command document before it is sent. - - For example, a command like ``{buildinfo: 1}`` can be sent - using: - - >>> db.command("buildinfo") - - For a command where the value matters, like ``{collstats: - collection_name}`` we can do: - - >>> db.command("collstats", collection_name) - - For commands that take additional arguments we can use - kwargs. So ``{filemd5: object_id, root: file_root}`` becomes: - - >>> db.command("filemd5", object_id, root=file_root) - - :Parameters: - - `command`: document representing the command to be issued, - or the name of the command (for simple commands only). - - .. note:: the order of keys in the `command` document is - significant (the "verb" must come first), so commands - which require multiple keys (e.g. `findandmodify`) - should use an instance of :class:`~bson.son.SON` or - a string and kwargs instead of a Python `dict`. - - - `value` (optional): value to use for the command verb when - `command` is passed as a string - - `check` (optional): check the response for errors, raising - :class:`~pymongo.errors.OperationFailure` if there are any - - `allowable_errors`: if `check` is ``True``, error messages - in this list will be ignored by error-checking - - `read_preference` (optional): The read preference for this - operation. See :mod:`~pymongo.read_preferences` for options. - If the provided `session` is in a transaction, defaults to the - read preference configured for the transaction. - Otherwise, defaults to - :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. - - `codec_options`: A :class:`~bson.codec_options.CodecOptions` - instance. - - `session` (optional): A - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): additional keyword arguments will - be added to the command document before it is sent - - - .. note:: :meth:`command` does **not** obey this Database's - :attr:`read_preference` or :attr:`codec_options`. You must use the - ``read_preference`` and ``codec_options`` parameters instead. - - .. note:: :meth:`command` does **not** apply any custom TypeDecoders - when decoding the command response. - - .. note:: If this client has been configured to use MongoDB Stable - API (see :ref:`versioned-api-ref`), then :meth:`command` will - automatically add API versioning options to the given command. - Explicitly adding API versioning options in the command and - declaring an API version on the client is not supported. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.0 - Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, - and `secondary_acceptable_latency_ms` option. - Removed `compile_re` option: PyMongo now always represents BSON - regular expressions as :class:`~bson.regex.Regex` objects. Use - :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a - BSON regular expression to a Python regular expression object. - Added the ``codec_options`` parameter. - - .. seealso:: The MongoDB documentation on `commands `_. - """ - opts = codec_options or DEFAULT_CODEC_OPTIONS - if comment is not None: - kwargs["comment"] = comment - - if read_preference is None: - read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - with self.__client._socket_for_reads(read_preference, session) as ( - sock_info, - read_preference, - ): - return self._command( - sock_info, - command, - value, - check, - allowable_errors, - read_preference, - opts, - session=session, - **kwargs, - ) - - def _retryable_read_command( - self, - command, - value=1, - check=True, - allowable_errors=None, - read_preference=None, - codec_options=DEFAULT_CODEC_OPTIONS, - session=None, - **kwargs, - ): - """Same as command but used for retryable read commands.""" - if read_preference is None: - read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - - def _cmd(session, server, sock_info, read_preference): - return self._command( - sock_info, - command, - value, - check, - allowable_errors, - read_preference, - codec_options, - session=session, - **kwargs, - ) - - return self.__client._retryable_read(_cmd, read_preference, session) - - def _list_collections(self, sock_info, session, read_preference, **kwargs): - """Internal listCollections helper.""" - - coll = self.get_collection("$cmd", read_preference=read_preference) - cmd = SON([("listCollections", 1), ("cursor", {})]) - cmd.update(kwargs) - with self.__client._tmp_session(session, close=False) as tmp_session: - cursor = self._command( - sock_info, cmd, read_preference=read_preference, session=tmp_session - )["cursor"] - cmd_cursor = CommandCursor( - coll, - cursor, - sock_info.address, - session=tmp_session, - explicit_session=session is not None, - comment=cmd.get("comment"), - ) - cmd_cursor._maybe_pin_connection(sock_info) - return cmd_cursor - - def list_collections( - self, - session: Optional["ClientSession"] = None, - filter: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[Dict[str, Any]]: - """Get a cursor over the collections of this database. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `filter` (optional): A query document to filter the list of - collections returned from the listCollections command. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): Optional parameters of the - `listCollections command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionadded:: 3.6 - """ - if filter is not None: - kwargs["filter"] = filter - read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY - if comment is not None: - kwargs["comment"] = comment - - def _cmd(session, server, sock_info, read_preference): - return self._list_collections( - sock_info, session, read_preference=read_preference, **kwargs - ) - - return self.__client._retryable_read(_cmd, read_pref, session) - - def list_collection_names( - self, - session: Optional["ClientSession"] = None, - filter: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> List[str]: - """Get a list of all the collection names in this database. - - For example, to list all non-system collections:: - - filter = {"name": {"$regex": r"^(?!system\\.)"}} - db.list_collection_names(filter=filter) - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `filter` (optional): A query document to filter the list of - collections returned from the listCollections command. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): Optional parameters of the - `listCollections command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - .. versionchanged:: 3.8 - Added the ``filter`` and ``**kwargs`` parameters. - - .. versionadded:: 3.6 - """ - if comment is not None: - kwargs["comment"] = comment - if filter is None: - kwargs["nameOnly"] = True - - else: - # The enumerate collections spec states that "drivers MUST NOT set - # nameOnly if a filter specifies any keys other than name." - common.validate_is_mapping("filter", filter) - kwargs["filter"] = filter - if not filter or (len(filter) == 1 and "name" in filter): - kwargs["nameOnly"] = True - - return [result["name"] for result in self.list_collections(session=session, **kwargs)] - - def _drop_helper(self, name, session=None, comment=None): - command = SON([("drop", name)]) - if comment is not None: - command["comment"] = comment - - with self.__client._socket_for_writes(session) as sock_info: - return self._command( - sock_info, - command, - allowable_errors=["ns not found", 26], - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, - ) - - @_csot.apply - def drop_collection( - self, - name_or_collection: Union[str, Collection], - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - encrypted_fields: Optional[Mapping[str, Any]] = None, - ) -> Dict[str, Any]: - """Drop a collection. - - :Parameters: - - `name_or_collection`: the name of a collection to drop or the - collection object itself - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `encrypted_fields`: **(BETA)** Document that describes the encrypted fields for - Queryable Encryption. For example:: - - { - "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", - "ecocCollection": "enxcol_.encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), - "bsonType": "string", - "queries": {"queryType": "equality"} - }, - { - "path": "ssn", - "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), - "bsonType": "string" - } - ] - - } - - - .. note:: The :attr:`~pymongo.database.Database.write_concern` of - this database is automatically applied to this operation. - - .. versionchanged:: 4.2 - Added ``encrypted_fields`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. versionchanged:: 3.4 - Apply this database's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str") - full_name = "%s.%s" % (self.name, name) - if ( - not encrypted_fields - and self.client.options.auto_encryption_opts - and self.client.options.auto_encryption_opts._encrypted_fields_map - ): - encrypted_fields = self.client.options.auto_encryption_opts._encrypted_fields_map.get( - full_name - ) - if not encrypted_fields and self.client.options.auto_encryption_opts: - colls = list( - self.list_collections(filter={"name": name}, session=session, comment=comment) - ) - if colls and colls[0]["options"].get("encryptedFields"): - encrypted_fields = colls[0]["options"]["encryptedFields"] - if encrypted_fields: - common.validate_is_mapping("encrypted_fields", encrypted_fields) - self._drop_helper( - _esc_coll_name(encrypted_fields, name), session=session, comment=comment - ) - self._drop_helper( - _ecc_coll_name(encrypted_fields, name), session=session, comment=comment - ) - self._drop_helper( - _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment - ) - - return self._drop_helper(name, session, comment) - - def validate_collection( - self, - name_or_collection: Union[str, Collection], - scandata: bool = False, - full: bool = False, - session: Optional["ClientSession"] = None, - background: Optional[bool] = None, - comment: Optional[Any] = None, - ) -> Dict[str, Any]: - """Validate a collection. - - Returns a dict of validation info. Raises CollectionInvalid if - validation fails. - - See also the MongoDB documentation on the `validate command`_. - - :Parameters: - - `name_or_collection`: A Collection object or the name of a - collection to validate. - - `scandata`: Do extra checks beyond checking the overall - structure of the collection. - - `full`: Have the server do a more thorough scan of the - collection. Use with `scandata` for a thorough scan - of the structure of the collection and the individual - documents. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `background` (optional): A boolean flag that determines whether - the command runs in the background. Requires MongoDB 4.4+. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.11 - Added ``background`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ - """ - name = name_or_collection - if isinstance(name, Collection): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_collection must be an instance of str or Collection") - cmd = SON([("validate", name), ("scandata", scandata), ("full", full)]) - if comment is not None: - cmd["comment"] = comment - - if background is not None: - cmd["background"] = background - - result = cast(dict, self.command(cmd, session=session)) - - valid = True - # Pre 1.9 results - if "result" in result: - info = result["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) - # Sharded results - elif "raw" in result: - for _, res in result["raw"].items(): - if "result" in res: - info = res["result"] - if info.find("exception") != -1 or info.find("corrupt") != -1: - raise CollectionInvalid("%s invalid: %s" % (name, info)) - elif not res.get("valid", False): - valid = False - break - # Post 1.9 non-sharded results. - elif not result.get("valid", False): - valid = False - - if not valid: - raise CollectionInvalid("%s invalid: %r" % (name, result)) - - return result - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'Database' object is not iterable") - - next = __next__ - - def __bool__(self) -> NoReturn: - raise NotImplementedError( - "Database objects do not implement truth " - "value testing or bool(). Please compare " - "with None instead: database is not None" - ) - - def dereference( - self, - dbref: DBRef, - session: Optional["ClientSession"] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> Optional[_DocumentType]: - """Dereference a :class:`~bson.dbref.DBRef`, getting the - document it points to. - - Raises :class:`TypeError` if `dbref` is not an instance of - :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if - the reference does not point to a valid document. Raises - :class:`ValueError` if `dbref` has a database specified that - is different from the current database. - - :Parameters: - - `dbref`: the reference - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): any additional keyword arguments - are the same as the arguments to - :meth:`~pymongo.collection.Collection.find`. - - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - if not isinstance(dbref, DBRef): - raise TypeError("cannot dereference a %s" % type(dbref)) - if dbref.database is not None and dbref.database != self.__name: - raise ValueError( - "trying to dereference a DBRef that points to " - "another database (%r not %r)" % (dbref.database, self.__name) - ) - return self[dbref.collection].find_one( - {"_id": dbref.id}, session=session, comment=comment, **kwargs - ) +__doc__ = original_doc +__all__ = ["Database"] # noqa: F405 diff --git a/pymongo/database_shared.py b/pymongo/database_shared.py new file mode 100644 index 0000000000..d6563a4b3d --- /dev/null +++ b/pymongo/database_shared.py @@ -0,0 +1,34 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Constants, helpers, and types shared across all database classes.""" +from __future__ import annotations + +from typing import Any, Mapping, TypeVar + +from pymongo.errors import InvalidName + + +def _check_name(name: str) -> None: + """Check if a database name is valid.""" + if not name: + raise InvalidName("database name cannot be the empty string") + + for invalid_char in [" ", ".", "$", "/", "\\", "\x00", '"']: + if invalid_char in name: + raise InvalidName("database names cannot contain the character %r" % invalid_char) + + +_CodecDocumentType = TypeVar("_CodecDocumentType", bound=Mapping[str, Any]) diff --git a/pymongo/driver_info.py b/pymongo/driver_info.py index 53fbfd3428..f24321d973 100644 --- a/pymongo/driver_info.py +++ b/pymongo/driver_info.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,7 +12,11 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Advanced options for MongoDB drivers implemented on top of PyMongo.""" +"""Advanced options for MongoDB drivers implemented on top of PyMongo. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations from collections import namedtuple from typing import Optional @@ -30,13 +34,12 @@ class DriverInfo(namedtuple("DriverInfo", ["name", "version", "platform"])): def __new__( cls, name: str, version: Optional[str] = None, platform: Optional[str] = None - ) -> "DriverInfo": - self = super(DriverInfo, cls).__new__(cls, name, version, platform) + ) -> DriverInfo: + self = super().__new__(cls, name, version, platform) for key, value in self._asdict().items(): if value is not None and not isinstance(value, str): raise TypeError( - "Wrong type for DriverInfo %s option, value " - "must be an instance of str" % (key,) + f"Wrong type for DriverInfo {key} option, value must be an instance of str, not {type(value)}" ) return self diff --git a/pymongo/encryption.py b/pymongo/encryption.py index 9fef5963a6..71c1d4b723 100644 --- a/pymongo/encryption.py +++ b/pymongo/encryption.py @@ -1,10 +1,10 @@ -# Copyright 2019-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,873 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for explicit client-side field level encryption.""" +"""Re-import of synchronous Encryption API for compatibility.""" +from __future__ import annotations -import contextlib -import enum -import socket -import weakref -from typing import Any, Mapping, Optional, Sequence +from pymongo.synchronous.encryption import * # noqa: F403 +from pymongo.synchronous.encryption import __doc__ as original_doc -try: - from pymongocrypt.auto_encrypter import AutoEncrypter - from pymongocrypt.errors import MongoCryptError # noqa: F401 - from pymongocrypt.explicit_encrypter import ExplicitEncrypter - from pymongocrypt.mongocrypt import MongoCryptOptions - from pymongocrypt.state_machine import MongoCryptCallback - - _HAVE_PYMONGOCRYPT = True -except ImportError: - _HAVE_PYMONGOCRYPT = False - MongoCryptCallback = object - -from bson import _dict_to_bson, decode, encode -from bson.binary import STANDARD, UUID_SUBTYPE, Binary -from bson.codec_options import CodecOptions -from bson.errors import BSONError -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson -from bson.son import SON -from pymongo import _csot -from pymongo.cursor import Cursor -from pymongo.daemon import _spawn_daemon -from pymongo.encryption_options import AutoEncryptionOpts -from pymongo.errors import ( - ConfigurationError, - EncryptionError, - InvalidOperation, - ServerSelectionTimeoutError, -) -from pymongo.mongo_client import MongoClient -from pymongo.network import BLOCKING_IO_ERRORS -from pymongo.operations import UpdateOne -from pymongo.pool import PoolOptions, _configured_socket -from pymongo.read_concern import ReadConcern -from pymongo.results import BulkWriteResult, DeleteResult -from pymongo.ssl_support import get_ssl_context -from pymongo.uri_parser import parse_host -from pymongo.write_concern import WriteConcern - -_HTTPS_PORT = 443 -_KMS_CONNECT_TIMEOUT = 10 # TODO: CDRIVER-3262 will define this value. -_MONGOCRYPTD_TIMEOUT_MS = 10000 - - -_DATA_KEY_OPTS: CodecOptions = CodecOptions(document_class=SON, uuid_representation=STANDARD) -# Use RawBSONDocument codec options to avoid needlessly decoding -# documents from the key vault. -_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) - - -@contextlib.contextmanager -def _wrap_encryption_errors(): - """Context manager to wrap encryption related errors.""" - try: - yield - except BSONError: - # BSON encoding/decoding errors are unrelated to encryption so - # we should propagate them unchanged. - raise - except Exception as exc: - raise EncryptionError(exc) - - -class _EncryptionIO(MongoCryptCallback): # type: ignore - def __init__(self, client, key_vault_coll, mongocryptd_client, opts): - """Internal class to perform I/O on behalf of pymongocrypt.""" - self.client_ref: Any - # Use a weak ref to break reference cycle. - if client is not None: - self.client_ref = weakref.ref(client) - else: - self.client_ref = None - self.key_vault_coll = key_vault_coll.with_options( - codec_options=_KEY_VAULT_OPTS, - read_concern=ReadConcern(level="majority"), - write_concern=WriteConcern(w="majority"), - ) - self.mongocryptd_client = mongocryptd_client - self.opts = opts - self._spawned = False - - def kms_request(self, kms_context): - """Complete a KMS request. - - :Parameters: - - `kms_context`: A :class:`MongoCryptKmsContext`. - - :Returns: - None - """ - endpoint = kms_context.endpoint - message = kms_context.message - provider = kms_context.kms_provider - ctx = self.opts._kms_ssl_contexts.get(provider) - if ctx is None: - # Enable strict certificate verification, OCSP, match hostname, and - # SNI using the system default CA certificates. - ctx = get_ssl_context( - None, # certfile - None, # passphrase - None, # ca_certs - None, # crlfile - False, # allow_invalid_certificates - False, # allow_invalid_hostnames - False, - ) # disable_ocsp_endpoint_check - # CSOT: set timeout for socket creation. - connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) - opts = PoolOptions( - connect_timeout=connect_timeout, - socket_timeout=connect_timeout, - ssl_context=ctx, - ) - host, port = parse_host(endpoint, _HTTPS_PORT) - conn = _configured_socket((host, port), opts) - try: - conn.sendall(message) - while kms_context.bytes_needed > 0: - # CSOT: update timeout. - conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) - data = conn.recv(kms_context.bytes_needed) - if not data: - raise OSError("KMS connection closed") - kms_context.feed(data) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") - finally: - conn.close() - - def collection_info(self, database, filter): - """Get the collection info for a namespace. - - The returned collection info is passed to libmongocrypt which reads - the JSON schema. - - :Parameters: - - `database`: The database on which to run listCollections. - - `filter`: The filter to pass to listCollections. - - :Returns: - The first document from the listCollections command response as BSON. - """ - with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: - for doc in cursor: - return _dict_to_bson(doc, False, _DATA_KEY_OPTS) - - def spawn(self): - """Spawn mongocryptd. - - Note this method is thread safe; at most one mongocryptd will start - successfully. - """ - self._spawned = True - args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] - args.extend(self.opts._mongocryptd_spawn_args) - _spawn_daemon(args) - - def mark_command(self, database, cmd): - """Mark a command for encryption. - - :Parameters: - - `database`: The database on which to run this command. - - `cmd`: The BSON command to run. - - :Returns: - The marked command response from mongocryptd. - """ - if not self._spawned and not self.opts._mongocryptd_bypass_spawn: - self.spawn() - # Database.command only supports mutable mappings so we need to decode - # the raw BSON command first. - inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) - try: - res = self.mongocryptd_client[database].command( - inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS - ) - except ServerSelectionTimeoutError: - if self.opts._mongocryptd_bypass_spawn: - raise - self.spawn() - res = self.mongocryptd_client[database].command( - inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS - ) - return res.raw - - def fetch_keys(self, filter): - """Yields one or more keys from the key vault. - - :Parameters: - - `filter`: The filter to pass to find. - - :Returns: - A generator which yields the requested keys from the key vault. - """ - with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: - for key in cursor: - yield key.raw - - def insert_data_key(self, data_key): - """Insert a data key into the key vault. - - :Parameters: - - `data_key`: The data key document to insert. - - :Returns: - The _id of the inserted data key document. - """ - raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) - data_key_id = raw_doc.get("_id") - if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: - raise TypeError("data_key _id must be Binary with a UUID subtype") - - self.key_vault_coll.insert_one(raw_doc) - return data_key_id - - def bson_encode(self, doc): - """Encode a document to BSON. - - A document can be any mapping type (like :class:`dict`). - - :Parameters: - - `doc`: mapping type representing a document - - :Returns: - The encoded BSON bytes. - """ - return encode(doc) - - def close(self): - """Release resources. - - Note it is not safe to call this method from __del__ or any GC hooks. - """ - self.client_ref = None - self.key_vault_coll = None - if self.mongocryptd_client: - self.mongocryptd_client.close() - self.mongocryptd_client = None - - -class RewrapManyDataKeyResult(object): - """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. - - .. versionadded:: 4.2 - """ - - def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: - self._bulk_write_result = bulk_write_result - - @property - def bulk_write_result(self) -> Optional[BulkWriteResult]: - """The result of the bulk write operation used to update the key vault - collection with one or more rewrapped data keys. If - :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, - no bulk write operation will be executed and this field will be - ``None``. - """ - return self._bulk_write_result - - -class _Encrypter(object): - """Encrypts and decrypts MongoDB commands. - - This class is used to support automatic encryption and decryption of - MongoDB commands.""" - - def __init__(self, client, opts): - """Create a _Encrypter for a client. - - :Parameters: - - `client`: The encrypted MongoClient. - - `opts`: The encrypted client's :class:`AutoEncryptionOpts`. - """ - if opts._schema_map is None: - schema_map = None - else: - schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) - - if opts._encrypted_fields_map is None: - encrypted_fields_map = None - else: - encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) - self._bypass_auto_encryption = opts._bypass_auto_encryption - self._internal_client = None - - def _get_internal_client(encrypter, mongo_client): - if mongo_client.options.pool_options.max_pool_size is None: - # Unlimited pool size, use the same client. - return mongo_client - # Else - limited pool size, use an internal client. - if encrypter._internal_client is not None: - return encrypter._internal_client - internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) - encrypter._internal_client = internal_client - return internal_client - - if opts._key_vault_client is not None: - key_vault_client = opts._key_vault_client - else: - key_vault_client = _get_internal_client(self, client) - - if opts._bypass_auto_encryption: - metadata_client = None - else: - metadata_client = _get_internal_client(self, client) - - db, coll = opts._key_vault_namespace.split(".", 1) - key_vault_coll = key_vault_client[db][coll] - - mongocryptd_client: MongoClient = MongoClient( - opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS - ) - - io_callbacks = _EncryptionIO(metadata_client, key_vault_coll, mongocryptd_client, opts) - self._auto_encrypter = AutoEncrypter( - io_callbacks, - MongoCryptOptions( - opts._kms_providers, - schema_map, - crypt_shared_lib_path=opts._crypt_shared_lib_path, - crypt_shared_lib_required=opts._crypt_shared_lib_required, - bypass_encryption=opts._bypass_auto_encryption, - encrypted_fields_map=encrypted_fields_map, - bypass_query_analysis=opts._bypass_query_analysis, - ), - ) - self._closed = False - - def encrypt(self, database, cmd, codec_options): - """Encrypt a MongoDB command. - - :Parameters: - - `database`: The database for this command. - - `cmd`: A command document. - - `codec_options`: The CodecOptions to use while encoding `cmd`. - - :Returns: - The encrypted command to execute. - """ - self._check_closed() - encoded_cmd = _dict_to_bson(cmd, False, codec_options) - with _wrap_encryption_errors(): - encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) - # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. - encrypt_cmd = _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) - return encrypt_cmd - - def decrypt(self, response): - """Decrypt a MongoDB command response. - - :Parameters: - - `response`: A MongoDB command response as BSON. - - :Returns: - The decrypted command response. - """ - self._check_closed() - with _wrap_encryption_errors(): - return self._auto_encrypter.decrypt(response) - - def _check_closed(self): - if self._closed: - raise InvalidOperation("Cannot use MongoClient after close") - - def close(self): - """Cleanup resources.""" - self._closed = True - self._auto_encrypter.close() - if self._internal_client: - self._internal_client.close() - self._internal_client = None - - -class Algorithm(str, enum.Enum): - """An enum that defines the supported encryption algorithms.""" - - AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" - """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" - AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" - """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" - INDEXED = "Indexed" - """Indexed. - - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - - .. versionadded:: 4.2 - """ - UNINDEXED = "Unindexed" - """Unindexed. - - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - - .. versionadded:: 4.2 - """ - - -class QueryType(str, enum.Enum): - """**(BETA)** An enum that defines the supported values for explicit encryption query_type. - - .. note:: Support for Queryable Encryption is in beta. - Backwards-breaking changes may be made before the final release. - - .. versionadded:: 4.2 - """ - - EQUALITY = "equality" - """Used to encrypt a value for an equality query.""" - - -class ClientEncryption(object): - """Explicit client-side field level encryption.""" - - def __init__( - self, - kms_providers: Mapping[str, Any], - key_vault_namespace: str, - key_vault_client: MongoClient, - codec_options: CodecOptions, - kms_tls_options: Optional[Mapping[str, Any]] = None, - ) -> None: - """Explicit client-side field level encryption. - - The ClientEncryption class encapsulates explicit operations on a key - vault collection that cannot be done directly on a MongoClient. Similar - to configuring auto encryption on a MongoClient, it is constructed with - a MongoClient (to a MongoDB cluster containing the key vault - collection), KMS provider configuration, and keyVaultNamespace. It - provides an API for explicitly encrypting and decrypting values, and - creating data keys. It does not provide an API to query keys from the - key vault collection, as this can be done directly on the MongoClient. - - See :ref:`explicit-client-side-encryption` for an example. - - :Parameters: - - `kms_providers`: Map of KMS provider options. The `kms_providers` - map values differ by provider: - - - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. - These are the AWS access key ID and AWS secret access key used - to generate KMS messages. An optional "sessionToken" may be - included to support temporary AWS credentials. - - `azure`: Map with "tenantId", "clientId", and "clientSecret" as - strings. Additionally, "identityPlatformEndpoint" may also be - specified as a string (defaults to 'login.microsoftonline.com'). - These are the Azure Active Directory credentials used to - generate Azure Key Vault messages. - - `gcp`: Map with "email" as a string and "privateKey" - as `bytes` or a base64 encoded string. - Additionally, "endpoint" may also be specified as a string - (defaults to 'oauth2.googleapis.com'). These are the - credentials used to generate Google Cloud KMS messages. - - `kmip`: Map with "endpoint" as a host with required port. - For example: ``{"endpoint": "example.com:443"}``. - - `local`: Map with "key" as `bytes` (96 bytes in length) or - a base64 encoded string which decodes - to 96 bytes. "key" is the master key used to encrypt/decrypt - data keys. This key should be generated and stored as securely - as possible. - - - `key_vault_namespace`: The namespace for the key vault collection. - The key vault collection contains all data keys used for encryption - and decryption. Data keys are stored as documents in this MongoDB - collection. Data keys are protected with encryption by a KMS - provider. - - `key_vault_client`: A MongoClient connected to a MongoDB cluster - containing the `key_vault_namespace` collection. - - `codec_options`: An instance of - :class:`~bson.codec_options.CodecOptions` to use when encoding a - value for encryption and decoding the decrypted BSON value. This - should be the same CodecOptions instance configured on the - MongoClient, Database, or Collection used to access application - data. - - `kms_tls_options` (optional): A map of KMS provider names to TLS - options to use when creating secure connections to KMS providers. - Accepts the same TLS options as - :class:`pymongo.mongo_client.MongoClient`. For example, to - override the system default CA file:: - - kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} - - Or to supply a client certificate:: - - kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - .. versionchanged:: 4.0 - Added the `kms_tls_options` parameter and the "kmip" KMS provider. - - .. versionadded:: 3.9 - """ - if not _HAVE_PYMONGOCRYPT: - raise ConfigurationError( - "client-side field level encryption requires the pymongocrypt " - "library: install a compatible version with: " - "python -m pip install 'pymongo[encryption]'" - ) - - if not isinstance(codec_options, CodecOptions): - raise TypeError("codec_options must be an instance of bson.codec_options.CodecOptions") - - self._kms_providers = kms_providers - self._key_vault_namespace = key_vault_namespace - self._key_vault_client = key_vault_client - self._codec_options = codec_options - - db, coll = key_vault_namespace.split(".", 1) - key_vault_coll = key_vault_client[db][coll] - - opts = AutoEncryptionOpts( - kms_providers, key_vault_namespace, kms_tls_options=kms_tls_options - ) - self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( - None, key_vault_coll, None, opts - ) - self._encryption = ExplicitEncrypter( - self._io_callbacks, MongoCryptOptions(kms_providers, None) - ) - # Use the same key vault collection as the callback. - self._key_vault_coll = self._io_callbacks.key_vault_coll - - def create_data_key( - self, - kms_provider: str, - master_key: Optional[Mapping[str, Any]] = None, - key_alt_names: Optional[Sequence[str]] = None, - key_material: Optional[bytes] = None, - ) -> Binary: - """Create and insert a new data key into the key vault collection. - - :Parameters: - - `kms_provider`: The KMS provider to use. Supported values are - "aws", "azure", "gcp", "kmip", and "local". - - `master_key`: Identifies a KMS-specific key used to encrypt the - new data key. If the kmsProvider is "local" the `master_key` is - not applicable and may be omitted. - - If the `kms_provider` is "aws" it is required and has the - following fields:: - - - `region` (string): Required. The AWS region, e.g. "us-east-1". - - `key` (string): Required. The Amazon Resource Name (ARN) to - the AWS customer. - - `endpoint` (string): Optional. An alternate host to send KMS - requests to. May include port number, e.g. - "kms.us-east-1.amazonaws.com:443". - - If the `kms_provider` is "azure" it is required and has the - following fields:: - - - `keyVaultEndpoint` (string): Required. Host with optional - port, e.g. "example.vault.azure.net". - - `keyName` (string): Required. Key name in the key vault. - - `keyVersion` (string): Optional. Version of the key to use. - - If the `kms_provider` is "gcp" it is required and has the - following fields:: - - - `projectId` (string): Required. The Google cloud project ID. - - `location` (string): Required. The GCP location, e.g. "us-east1". - - `keyRing` (string): Required. Name of the key ring that contains - the key to use. - - `keyName` (string): Required. Name of the key to use. - - `keyVersion` (string): Optional. Version of the key to use. - - `endpoint` (string): Optional. Host with optional port. - Defaults to "cloudkms.googleapis.com". - - If the `kms_provider` is "kmip" it is optional and has the - following fields:: - - - `keyId` (string): Optional. `keyId` is the KMIP Unique - Identifier to a 96 byte KMIP Secret Data managed object. If - keyId is omitted, the driver creates a random 96 byte KMIP - Secret Data managed object. - - `endpoint` (string): Optional. Host with optional - port, e.g. "example.vault.azure.net:". - - - `key_alt_names` (optional): An optional list of string alternate - names used to reference a key. If a key is created with alternate - names, then encryption may refer to the key by the unique alternate - name instead of by ``key_id``. The following example shows creating - and referring to a data key by alternate name:: - - client_encryption.create_data_key("local", keyAltNames=["name1"]) - # reference the key with the alternate name - client_encryption.encrypt("457-55-5462", keyAltName="name1", - algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) - - `key_material` (optional): Sets the custom key material to be used - by the data key for encryption and decryption. - - :Returns: - The ``_id`` of the created data key document as a - :class:`~bson.binary.Binary` with subtype - :data:`~bson.binary.UUID_SUBTYPE`. - - .. versionchanged:: 4.2 - Added the `key_material` parameter. - """ - self._check_closed() - with _wrap_encryption_errors(): - return self._encryption.create_data_key( - kms_provider, - master_key=master_key, - key_alt_names=key_alt_names, - key_material=key_material, - ) - - def encrypt( - self, - value: Any, - algorithm: str, - key_id: Optional[Binary] = None, - key_alt_name: Optional[str] = None, - query_type: Optional[str] = None, - contention_factor: Optional[int] = None, - ) -> Binary: - """Encrypt a BSON value with a given key and algorithm. - - Note that exactly one of ``key_id`` or ``key_alt_name`` must be - provided. - - :Parameters: - - `value`: The BSON value to encrypt. - - `algorithm` (string): The encryption algorithm to use. See - :class:`Algorithm` for some valid options. - - `key_id`: Identifies a data key by ``_id`` which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - `key_alt_name`: Identifies a key vault document by 'keyAltName'. - - `query_type` (str): **(BETA)** The query type to execute. See - :class:`QueryType` for valid options. - - `contention_factor` (int): **(BETA)** The contention factor to use - when the algorithm is :attr:`Algorithm.INDEXED`. An integer value - *must* be given when the :attr:`Algorithm.INDEXED` algorithm is - used. - - .. note:: `query_type` and `contention_factor` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. - - :Returns: - The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. - - .. versionchanged:: 4.2 - Added the `query_type` and `contention_factor` parameters. - - """ - self._check_closed() - if key_id is not None and not ( - isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE - ): - raise TypeError("key_id must be a bson.binary.Binary with subtype 4") - - doc = encode({"v": value}, codec_options=self._codec_options) - with _wrap_encryption_errors(): - encrypted_doc = self._encryption.encrypt( - doc, - algorithm, - key_id=key_id, - key_alt_name=key_alt_name, - query_type=query_type, - contention_factor=contention_factor, - ) - return decode(encrypted_doc)["v"] # type: ignore[index] - - def decrypt(self, value: Binary) -> Any: - """Decrypt an encrypted value. - - :Parameters: - - `value` (Binary): The encrypted value, a - :class:`~bson.binary.Binary` with subtype 6. - - :Returns: - The decrypted BSON value. - """ - self._check_closed() - if not (isinstance(value, Binary) and value.subtype == 6): - raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") - - with _wrap_encryption_errors(): - doc = encode({"v": value}) - decrypted_doc = self._encryption.decrypt(doc) - return decode(decrypted_doc, codec_options=self._codec_options)["v"] - - def get_key(self, id: Binary) -> Optional[RawBSONDocument]: - """Get a data key by id. - - :Parameters: - - `id` (Binary): The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - :Returns: - The key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - return self._key_vault_coll.find_one({"_id": id}) - - def get_keys(self) -> Cursor[RawBSONDocument]: - """Get all of the data keys. - - :Returns: - An instance of :class:`~pymongo.cursor.Cursor` over the data key - documents. - - .. versionadded:: 4.2 - """ - self._check_closed() - return self._key_vault_coll.find({}) - - def delete_key(self, id: Binary) -> DeleteResult: - """Delete a key document in the key vault collection that has the given ``key_id``. - - :Parameters: - - `id` (Binary): The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - :Returns: - The delete result. - - .. versionadded:: 4.2 - """ - self._check_closed() - return self._key_vault_coll.delete_one({"_id": id}) - - def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: - """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. - - :Parameters: - - ``id``: The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - ``key_alt_name``: The key alternate name to add. - - :Returns: - The previous version of the key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - update = {"$addToSet": {"keyAltNames": key_alt_name}} - return self._key_vault_coll.find_one_and_update({"_id": id}, update) - - def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: - """Get a key document in the key vault collection that has the given ``key_alt_name``. - - :Parameters: - - `key_alt_name`: (str): The key alternate name of the key to get. - - :Returns: - The key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) - - def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: - """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. - - Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. - - :Parameters: - - ``id``: The UUID of a key a which must be a - :class:`~bson.binary.Binary` with subtype 4 ( - :attr:`~bson.binary.UUID_SUBTYPE`). - - ``key_alt_name``: The key alternate name to remove. - - :Returns: - Returns the previous version of the key document. - - .. versionadded:: 4.2 - """ - self._check_closed() - pipeline = [ - { - "$set": { - "keyAltNames": { - "$cond": [ - {"$eq": ["$keyAltNames", [key_alt_name]]}, - "$$REMOVE", - { - "$filter": { - "input": "$keyAltNames", - "cond": {"$ne": ["$$this", key_alt_name]}, - } - }, - ] - } - } - } - ] - return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) - - def rewrap_many_data_key( - self, - filter: Mapping[str, Any], - provider: Optional[str] = None, - master_key: Optional[Mapping[str, Any]] = None, - ) -> RewrapManyDataKeyResult: - """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. - - :Parameters: - - `filter`: A document used to filter the data keys. - - `provider`: The new KMS provider to use to encrypt the data keys, - or ``None`` to use the current KMS provider(s). - - ``master_key``: The master key fields corresponding to the new KMS - provider when ``provider`` is not ``None``. - - :Returns: - A :class:`RewrapManyDataKeyResult`. - - .. versionadded:: 4.2 - """ - self._check_closed() - with _wrap_encryption_errors(): - raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) - if raw_result is None: - return RewrapManyDataKeyResult() - - raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) - replacements = [] - for key in raw_doc["v"]: - update_model = { - "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, - "$currentDate": {"updateDate": True}, - } - op = UpdateOne({"_id": key["_id"]}, update_model) - replacements.append(op) - if not replacements: - return RewrapManyDataKeyResult() - result = self._key_vault_coll.bulk_write(replacements) - return RewrapManyDataKeyResult(result) - - def __enter__(self) -> "ClientEncryption": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - def _check_closed(self): - if self._encryption is None: - raise InvalidOperation("Cannot use closed ClientEncryption") - - def close(self) -> None: - """Release resources. - - Note that using this class in a with-statement will automatically call - :meth:`close`:: - - with ClientEncryption(...) as client_encryption: - encrypted = client_encryption.encrypt(value, ...) - decrypted = client_encryption.decrypt(encrypted) - - """ - if self._io_callbacks: - self._io_callbacks.close() - self._encryption.close() - self._io_callbacks = None - self._encryption = None +__doc__ = original_doc +__all__ = ["Algorithm", "ClientEncryption", "QueryType", "RewrapManyDataKeyResult"] # noqa: F405 diff --git a/pymongo/encryption_options.py b/pymongo/encryption_options.py index c5e6f47837..b2037617b0 100644 --- a/pymongo/encryption_options.py +++ b/pymongo/encryption_options.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,63 +12,84 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Support for automatic client-side field level encryption.""" +"""Support for automatic client-side field level encryption. -from typing import TYPE_CHECKING, Any, List, Mapping, Optional +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, TypedDict + +from pymongo.uri_parser_shared import _parse_kms_tls_options try: - import pymongocrypt # noqa: F401 + import pymongocrypt # type:ignore[import-untyped] # noqa: F401 + + # Check for pymongocrypt>=1.10. + from pymongocrypt import synchronous as _ # noqa: F401 _HAVE_PYMONGOCRYPT = True except ImportError: _HAVE_PYMONGOCRYPT = False - -from pymongo.common import validate_is_mapping +from bson import int64 +from pymongo.common import check_for_min_version, validate_is_mapping from pymongo.errors import ConfigurationError -from pymongo.uri_parser import _parse_kms_tls_options if TYPE_CHECKING: - from pymongo.mongo_client import MongoClient + from pymongo.pyopenssl_context import SSLContext + from pymongo.typings import _AgnosticMongoClient + + +def check_min_pymongocrypt() -> None: + """Raise an appropriate error if the min pymongocrypt is not installed.""" + pymongocrypt_version, required_version, is_valid = check_for_min_version("pymongocrypt") + if not is_valid: + raise ConfigurationError( + f"client side encryption requires pymongocrypt>={required_version}, " + f"found version {pymongocrypt_version}. " + "Install a compatible version with: " + "python -m pip install 'pymongo[encryption]'" + ) -class AutoEncryptionOpts(object): +class AutoEncryptionOpts: """Options to configure automatic client-side field level encryption.""" def __init__( self, kms_providers: Mapping[str, Any], key_vault_namespace: str, - key_vault_client: Optional["MongoClient"] = None, + key_vault_client: Optional[_AgnosticMongoClient] = None, schema_map: Optional[Mapping[str, Any]] = None, bypass_auto_encryption: bool = False, mongocryptd_uri: str = "mongodb://localhost:27020", mongocryptd_bypass_spawn: bool = False, mongocryptd_spawn_path: str = "mongocryptd", - mongocryptd_spawn_args: Optional[List[str]] = None, + mongocryptd_spawn_args: Optional[list[str]] = None, kms_tls_options: Optional[Mapping[str, Any]] = None, crypt_shared_lib_path: Optional[str] = None, crypt_shared_lib_required: bool = False, bypass_query_analysis: bool = False, - encrypted_fields_map: Optional[Mapping] = None, + encrypted_fields_map: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, ) -> None: """Options to configure automatic client-side field level encryption. - Automatic client-side field level encryption requires MongoDB 4.2 - enterprise or a MongoDB 4.2 Atlas cluster. Automatic encryption is not + Automatic client-side field level encryption requires MongoDB >=4.2 + enterprise or a MongoDB >=4.2 Atlas cluster. Automatic encryption is not supported for operations on a database or view and will result in error. - Although automatic encryption requires MongoDB 4.2 enterprise or a - MongoDB 4.2 Atlas cluster, automatic *decryption* is supported for all + Although automatic encryption requires MongoDB >=4.2 enterprise or a + MongoDB >=4.2 Atlas cluster, automatic *decryption* is supported for all users. To configure automatic *decryption* without automatic *encryption* set ``bypass_auto_encryption=True``. Explicit encryption and explicit decryption is also supported for all users - with the :class:`~pymongo.encryption.ClientEncryption` class. + with the :class:`~pymongo.asynchronous.encryption.AsyncClientEncryption` and :class:`~pymongo.encryption.ClientEncryption` classes. - See :ref:`automatic-client-side-encryption` for an example. + See `client-side field level encryption `_ for an example. - :Parameters: - - `kms_providers`: Map of KMS provider options. The `kms_providers` + :param kms_providers: Map of KMS provider options. The `kms_providers` map values differ by provider: - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. @@ -93,16 +114,27 @@ def __init__( data keys. This key should be generated and stored as securely as possible. - - `key_vault_namespace`: The namespace for the key vault collection. + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + Named KMS providers enables more than one of each KMS provider type to be configured. + For example, to configure multiple local KMS providers:: + + kms_providers = { + "local": {"key": local_kek1}, # Unnamed KMS provider. + "local:myname": {"key": local_kek2}, # Named KMS provider with name "myname". + } + + :param key_vault_namespace: The namespace for the key vault collection. The key vault collection contains all data keys used for encryption and decryption. Data keys are stored as documents in this MongoDB collection. Data keys are protected with encryption by a KMS provider. - - `key_vault_client` (optional): By default the key vault collection + :param key_vault_client: By default, the key vault collection is assumed to reside in the same MongoDB cluster as the encrypted - MongoClient. Use this option to route data key queries to a + AsyncMongoClient/MongoClient. Use this option to route data key queries to a separate MongoDB cluster. - - `schema_map` (optional): Map of collection namespace ("db.coll") to + :param schema_map: Map of collection namespace ("db.coll") to JSON Schema. By default, a collection's JSONSchema is periodically polled with the listCollections command. But a JSONSchema may be specified locally with the schemaMap option. @@ -117,27 +149,27 @@ def __init__( automatic encryption for client side encryption. Other validation rules in the JSON schema will not be enforced by the driver and will result in an error. - - `bypass_auto_encryption` (optional): If ``True``, automatic + :param bypass_auto_encryption: If ``True``, automatic encryption will be disabled but automatic decryption will still be enabled. Defaults to ``False``. - - `mongocryptd_uri` (optional): The MongoDB URI used to connect + :param mongocryptd_uri: The MongoDB URI used to connect to the *local* mongocryptd process. Defaults to ``'mongodb://localhost:27020'``. - - `mongocryptd_bypass_spawn` (optional): If ``True``, the encrypted - MongoClient will not attempt to spawn the mongocryptd process. + :param mongocryptd_bypass_spawn: If ``True``, the encrypted + AsyncMongoClient/MongoClient will not attempt to spawn the mongocryptd process. Defaults to ``False``. - - `mongocryptd_spawn_path` (optional): Used for spawning the + :param mongocryptd_spawn_path: Used for spawning the mongocryptd process. Defaults to ``'mongocryptd'`` and spawns mongocryptd from the system path. - - `mongocryptd_spawn_args` (optional): A list of string arguments to + :param mongocryptd_spawn_args: A list of string arguments to use when spawning the mongocryptd process. Defaults to ``['--idleShutdownTimeoutSecs=60']``. If the list does not include the ``idleShutdownTimeoutSecs`` option then ``'--idleShutdownTimeoutSecs=60'`` will be added. - - `kms_tls_options` (optional): A map of KMS provider names to TLS + :param kms_tls_options: A map of KMS provider names to TLS options to use when creating secure connections to KMS providers. Accepts the same TLS options as - :class:`pymongo.mongo_client.MongoClient`. For example, to + :class:`pymongo.mongo_client.AsyncMongoClient` and :class:`pymongo.mongo_client.MongoClient`. For example, to override the system default CA file:: kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} @@ -145,20 +177,19 @@ def __init__( Or to supply a client certificate:: kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} - - `crypt_shared_lib_path` (optional): Override the path to load the crypt_shared library. - - `crypt_shared_lib_required` (optional): If True, raise an error if libmongocrypt is + :param crypt_shared_lib_path: Override the path to load the crypt_shared library. + :param crypt_shared_lib_required: If True, raise an error if libmongocrypt is unable to load the crypt_shared library. - - `bypass_query_analysis` (optional): **(BETA)** If ``True``, disable automatic analysis + :param bypass_query_analysis: If ``True``, disable automatic analysis of outgoing commands. Set `bypass_query_analysis` to use explicit encryption on indexed fields without the MongoDB Enterprise Advanced licensed crypt_shared library. - - `encrypted_fields_map`: **(BETA)** Map of collection namespace ("db.coll") to documents + :param encrypted_fields_map: Map of collection namespace ("db.coll") to documents that described the encrypted fields for Queryable Encryption. For example:: { "db.encryptedCollection": { "escCollection": "enxcol_.encryptedCollection.esc", - "eccCollection": "enxcol_.encryptedCollection.ecc", "ecocCollection": "enxcol_.encryptedCollection.ecoc", "fields": [ { @@ -175,13 +206,14 @@ def __init__( ] } } + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. - .. note:: `bypass_query_analysis` and `encrypted_fields_map` are part of the - Queryable Encryption beta. Backwards-breaking changes may be made before the - final release. - + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. .. versionchanged:: 4.2 - Added `encrypted_fields_map` `crypt_shared_lib_path`, `crypt_shared_lib_required`, + Added the `encrypted_fields_map`, `crypt_shared_lib_path`, `crypt_shared_lib_required`, and `bypass_query_analysis` parameters. .. versionchanged:: 4.0 @@ -195,10 +227,10 @@ def __init__( "install a compatible version with: " "python -m pip install 'pymongo[encryption]'" ) + check_min_pymongocrypt() if encrypted_fields_map: validate_is_mapping("encrypted_fields_map", encrypted_fields_map) self._encrypted_fields_map = encrypted_fields_map - self._bypass_query_analysis = bypass_query_analysis self._crypt_shared_lib_path = crypt_shared_lib_path self._crypt_shared_lib_required = crypt_shared_lib_required self._kms_providers = kms_providers @@ -213,9 +245,148 @@ def __init__( mongocryptd_spawn_args = ["--idleShutdownTimeoutSecs=60"] self._mongocryptd_spawn_args = mongocryptd_spawn_args if not isinstance(self._mongocryptd_spawn_args, list): - raise TypeError("mongocryptd_spawn_args must be a list") + raise TypeError( + f"mongocryptd_spawn_args must be a list, not {type(self._mongocryptd_spawn_args)}" + ) if not any("idleShutdownTimeoutSecs" in s for s in self._mongocryptd_spawn_args): self._mongocryptd_spawn_args.append("--idleShutdownTimeoutSecs=60") # Maps KMS provider name to a SSLContext. - self._kms_ssl_contexts = _parse_kms_tls_options(kms_tls_options) + self._kms_tls_options = kms_tls_options + self._sync_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None + self._async_kms_ssl_contexts: Optional[dict[str, SSLContext]] = None self._bypass_query_analysis = bypass_query_analysis + self._key_expiration_ms = key_expiration_ms + + def _kms_ssl_contexts(self, is_sync: bool) -> dict[str, SSLContext]: + if is_sync: + if self._sync_kms_ssl_contexts is None: + self._sync_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, True) + return self._sync_kms_ssl_contexts + else: + if self._async_kms_ssl_contexts is None: + self._async_kms_ssl_contexts = _parse_kms_tls_options(self._kms_tls_options, False) + return self._async_kms_ssl_contexts + + +class RangeOpts: + """Options to configure encrypted queries using the range algorithm.""" + + def __init__( + self, + sparsity: Optional[int] = None, + trim_factor: Optional[int] = None, + min: Optional[Any] = None, + max: Optional[Any] = None, + precision: Optional[int] = None, + ) -> None: + """Options to configure encrypted queries using the range algorithm. + + :param sparsity: An integer. + :param trim_factor: An integer. + :param min: A BSON scalar value corresponding to the type being queried. + :param max: A BSON scalar value corresponding to the type being queried. + :param precision: An integer, may only be set for double or decimal128 types. + + .. versionadded:: 4.4 + """ + self.min = min + self.max = max + self.sparsity = sparsity + self.trim_factor = trim_factor + self.precision = precision + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("sparsity", int64.Int64(self.sparsity) if self.sparsity else None), + ("trimFactor", self.trim_factor), + ("precision", self.precision), + ("min", self.min), + ("max", self.max), + ]: + if v is not None: + doc[k] = v + return doc + + +class TextOpts: + """**BETA** Options to configure encrypted queries using the text algorithm. + + TextOpts is currently unstable API and subject to backwards breaking changes.""" + + def __init__( + self, + substring: Optional[SubstringOpts] = None, + prefix: Optional[PrefixOpts] = None, + suffix: Optional[SuffixOpts] = None, + case_sensitive: Optional[bool] = None, + diacritic_sensitive: Optional[bool] = None, + ) -> None: + """Options to configure encrypted queries using the text algorithm. + + :param substring: Further options to support substring queries. + :param prefix: Further options to support prefix queries. + :param suffix: Further options to support suffix queries. + :param case_sensitive: Whether text indexes for this field are case sensitive. + :param diacritic_sensitive: Whether text indexes for this field are diacritic sensitive. + + .. versionadded:: 4.15 + """ + self.substring = substring + self.prefix = prefix + self.suffix = suffix + self.case_sensitive = case_sensitive + self.diacritic_sensitive = diacritic_sensitive + + @property + def document(self) -> dict[str, Any]: + doc = {} + for k, v in [ + ("substring", self.substring), + ("prefix", self.prefix), + ("suffix", self.suffix), + ("caseSensitive", self.case_sensitive), + ("diacriticSensitive", self.diacritic_sensitive), + ]: + if v is not None: + doc[k] = v + return doc + + +class SubstringOpts(TypedDict): + """**BETA** Options for substring text queries. + + SubstringOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMaxLength is the maximum allowed length to insert. Inserting longer strings will error. + strMaxLength: int + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class PrefixOpts(TypedDict): + """**BETA** Options for prefix text queries. + + PrefixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int + + +class SuffixOpts(TypedDict): + """**BETA** Options for suffix text queries. + + SuffixOpts is currently unstable API and subject to backwards breaking changes. + """ + + # strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error. + strMinQueryLength: int + # strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error. + strMaxQueryLength: int diff --git a/pymongo/errors.py b/pymongo/errors.py index efc7e2eca0..794b5a9398 100644 --- a/pymongo/errors.py +++ b/pymongo/errors.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,28 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Exceptions raised by PyMongo.""" -from typing import Any, Iterable, List, Mapping, Optional, Sequence, Tuple, Union +"""Exceptions raised by PyMongo. -from bson.errors import InvalidDocument +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from ssl import SSLCertVerificationError as _CertificateError # noqa: F401 +from typing import TYPE_CHECKING, Any, Iterable, Mapping, Optional, Sequence, Union -try: - # CPython 3.7+ - from ssl import SSLCertVerificationError as _CertificateError -except ImportError: - try: - from ssl import CertificateError as _CertificateError - except ImportError: +from bson.errors import InvalidDocument - class _CertificateError(ValueError): # type: ignore - pass +if TYPE_CHECKING: + from pymongo.results import ClientBulkWriteResult + from pymongo.typings import _DocumentOut class PyMongoError(Exception): """Base class for all PyMongo exceptions.""" def __init__(self, message: str = "", error_labels: Optional[Iterable[str]] = None) -> None: - super(PyMongoError, self).__init__(message) + super().__init__(message) self._message = message self._error_labels = set(error_labels or []) @@ -44,11 +43,11 @@ def has_error_label(self, label: str) -> bool: """ return label in self._error_labels - def _add_error_label(self, label): + def _add_error_label(self, label: str) -> None: """Add the given label to this error.""" self._error_labels.add(label) - def _remove_error_label(self, label): + def _remove_error_label(self, label: str) -> None: """Remove the given label from this error.""" self._error_labels.discard(label) @@ -95,17 +94,17 @@ class AutoReconnect(ConnectionFailure): Subclass of :exc:`~pymongo.errors.ConnectionFailure`. """ - errors: Union[Mapping[str, Any], Sequence] - details: Union[Mapping[str, Any], Sequence] + errors: Union[Mapping[str, Any], Sequence[Any]] + details: Union[Mapping[str, Any], Sequence[Any]] def __init__( - self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence]] = None + self, message: str = "", errors: Optional[Union[Mapping[str, Any], Sequence[Any]]] = None ) -> None: error_labels = None if errors is not None: if isinstance(errors, dict): error_labels = errors.get("errorLabels") - super(AutoReconnect, self).__init__(message, error_labels) + super().__init__(message, error_labels) self.errors = self.details = errors or [] @@ -123,9 +122,11 @@ def timeout(self) -> bool: return True -def _format_detailed_error(message, details): +def _format_detailed_error( + message: str, details: Optional[Union[Mapping[str, Any], list[Any]]] +) -> str: if details is not None: - message = "%s, full error: %s" % (message, details) + message = f"{message}, full error: {details}" return message @@ -146,11 +147,9 @@ class NotPrimaryError(AutoReconnect): """ def __init__( - self, message: str = "", errors: Optional[Union[Mapping[str, Any], List]] = None + self, message: str = "", errors: Optional[Union[Mapping[str, Any], list[Any]]] = None ) -> None: - super(NotPrimaryError, self).__init__( - _format_detailed_error(message, errors), errors=errors - ) + super().__init__(_format_detailed_error(message, errors), errors=errors) class ServerSelectionTimeoutError(AutoReconnect): @@ -191,15 +190,13 @@ def __init__( error_labels = None if details is not None: error_labels = details.get("errorLabels") - super(OperationFailure, self).__init__( - _format_detailed_error(error, details), error_labels=error_labels - ) + super().__init__(_format_detailed_error(error, details), error_labels=error_labels) self.__code = code self.__details = details self.__max_wire_version = max_wire_version @property - def _max_wire_version(self): + def _max_wire_version(self) -> Optional[int]: return self.__max_wire_version @property @@ -290,12 +287,12 @@ class BulkWriteError(OperationFailure): .. versionadded:: 2.7 """ - details: Mapping[str, Any] + details: _DocumentOut - def __init__(self, results: Mapping[str, Any]) -> None: - super(BulkWriteError, self).__init__("batch op errors occurred", 65, results) + def __init__(self, results: _DocumentOut) -> None: + super().__init__("batch op errors occurred", 65, results) - def __reduce__(self) -> Tuple[Any, Any]: + def __reduce__(self) -> tuple[Any, Any]: return self.__class__, (self.details,) @property @@ -312,6 +309,62 @@ def timeout(self) -> bool: return False +class ClientBulkWriteException(OperationFailure): + """Exception class for client-level bulk write errors.""" + + details: _DocumentOut + verbose: bool + + def __init__(self, results: _DocumentOut, verbose: bool) -> None: + super().__init__("batch op errors occurred", 65, results) + self.verbose = verbose + + def __reduce__(self) -> tuple[Any, Any]: + return self.__class__, (self.details,) + + @property + def error(self) -> Optional[Any]: + """A top-level error that occurred when attempting to + communicate with the server or execute the bulk write. + + This value may not be populated if the exception was + thrown due to errors occurring on individual writes. + """ + return self.details.get("error", None) + + @property + def write_concern_errors(self) -> Optional[list[WriteConcernError]]: + """Write concern errors that occurred during the bulk write. + + This list may have multiple items if more than one + server command was required to execute the bulk write. + """ + return self.details.get("writeConcernErrors", []) + + @property + def write_errors(self) -> Optional[list[WriteError]]: + """Errors that occurred during the execution of individual write operations. + + This list will contain at most one entry if the bulk write was ordered. + """ + return self.details.get("writeErrors", {}) + + @property + def partial_result(self) -> Optional[ClientBulkWriteResult]: + """The results of any successful operations that were + performed before the error was encountered. + """ + from pymongo.results import ClientBulkWriteResult + + if self.details.get("anySuccessful"): + return ClientBulkWriteResult( + self.details, # type: ignore[arg-type] + acknowledged=True, + has_verbose_results=self.verbose, + ) + return None + + class InvalidOperation(PyMongoError): """Raised when a client attempts to perform an invalid operation.""" @@ -331,8 +384,6 @@ class InvalidURI(ConfigurationError): class DocumentTooLarge(InvalidDocument): """Raised when an encoded document is too large for the connected server.""" - pass - class EncryptionError(PyMongoError): """Raised when encryption or decryption fails. @@ -344,7 +395,7 @@ class EncryptionError(PyMongoError): """ def __init__(self, cause: Exception) -> None: - super(EncryptionError, self).__init__(str(cause)) + super().__init__(str(cause)) self.__cause = cause @property @@ -359,7 +410,27 @@ def timeout(self) -> bool: return False +class EncryptedCollectionError(EncryptionError): + """Raised when creating a collection with encrypted_fields fails. + + .. versionadded:: 4.4 + """ + + def __init__(self, cause: Exception, encrypted_fields: Mapping[str, Any]) -> None: + super().__init__(cause) + self.__encrypted_fields = encrypted_fields + + @property + def encrypted_fields(self) -> Mapping[str, Any]: + """The encrypted_fields document that allows inferring which data keys are *known* to be created. + + Note that the returned document is not guaranteed to contain information about *all* of the data keys that + were created, for example in the case of an indefinite error like a timeout. Use the `cause` property to + determine whether a definite or indefinite error caused this error, and only rely on the accuracy of the + encrypted_fields if the error is definite. + """ + return self.__encrypted_fields + + class _OperationCancelled(AutoReconnect): """Internal error raised when a socket operation is cancelled.""" - - pass diff --git a/pymongo/event_loggers.py b/pymongo/event_loggers.py index 248dfb17bd..80acaa10c0 100644 --- a/pymongo/event_loggers.py +++ b/pymongo/event_loggers.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -25,7 +25,12 @@ or ``MongoClient(event_listeners=[CommandLogger()])`` + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. + """ +from __future__ import annotations + import logging from pymongo import monitoring @@ -179,7 +184,7 @@ class ConnectionPoolLogger(monitoring.ConnectionPoolListener): def pool_created(self, event: monitoring.PoolCreatedEvent) -> None: logging.info(f"[pool {event.address}] pool created") - def pool_ready(self, event): + def pool_ready(self, event: monitoring.PoolReadyEvent) -> None: logging.info(f"[pool {event.address}] pool ready") def pool_cleared(self, event: monitoring.PoolClearedEvent) -> None: diff --git a/pymongo/hello.py b/pymongo/hello.py index 92e9b426c0..1eb40ed929 100644 --- a/pymongo/hello.py +++ b/pymongo/hello.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,27 +13,20 @@ # limitations under the License. """Helpers for the 'hello' and legacy hello commands.""" +from __future__ import annotations import copy import datetime import itertools -from typing import Any, Generic, List, Mapping, Optional, Set, Tuple +from typing import Any, Generic, Mapping, Optional from bson.objectid import ObjectId from pymongo import common from pymongo.server_type import SERVER_TYPE -from pymongo.typings import _DocumentType +from pymongo.typings import ClusterTime, _DocumentType -class HelloCompat: - CMD = "hello" - LEGACY_CMD = "ismaster" - PRIMARY = "isWritablePrimary" - LEGACY_PRIMARY = "ismaster" - LEGACY_ERROR = "not master" - - -def _get_server_type(doc): +def _get_server_type(doc: Mapping[str, Any]) -> int: """Determine the server type from a hello response.""" if not doc.get("ok"): return SERVER_TYPE.Unknown @@ -61,6 +54,14 @@ def _get_server_type(doc): return SERVER_TYPE.Standalone +class HelloCompat: + CMD = "hello" + LEGACY_CMD = "ismaster" + PRIMARY = "isWritablePrimary" + LEGACY_PRIMARY = "ismaster" + LEGACY_ERROR = "not master" + + class Hello(Generic[_DocumentType]): """Parse a hello response from the server. @@ -95,7 +96,7 @@ def server_type(self) -> int: return self._server_type @property - def all_hosts(self) -> Set[Tuple[str, int]]: + def all_hosts(self) -> set[tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return set( map( @@ -114,7 +115,7 @@ def tags(self) -> Mapping[str, Any]: return self._doc.get("tags", {}) @property - def primary(self) -> Optional[Tuple[str, int]]: + def primary(self) -> Optional[tuple[str, int]]: """This server's opinion about who the primary is, or None.""" if self._doc.get("primary"): return common.partition_node(self._doc["primary"]) @@ -132,7 +133,7 @@ def max_bson_size(self) -> int: @property def max_message_size(self) -> int: - return self._doc.get("maxMessageSizeBytes", 2 * self.max_bson_size) + return self._doc.get("maxMessageSizeBytes", common.MAX_MESSAGE_SIZE) @property def max_write_batch_size(self) -> int: @@ -155,7 +156,7 @@ def election_id(self) -> Optional[ObjectId]: return self._doc.get("electionId") @property - def cluster_time(self) -> Optional[Mapping[str, Any]]: + def cluster_time(self) -> Optional[ClusterTime]: return self._doc.get("$clusterTime") @property @@ -171,7 +172,7 @@ def is_readable(self) -> bool: return self._is_readable @property - def me(self) -> Optional[Tuple[str, int]]: + def me(self) -> Optional[tuple[str, int]]: me = self._doc.get("me") if me: return common.clean_node(me) @@ -182,11 +183,11 @@ def last_write_date(self) -> Optional[datetime.datetime]: return self._doc.get("lastWrite", {}).get("lastWriteDate") @property - def compressors(self) -> Optional[List[str]]: + def compressors(self) -> Optional[list[str]]: return self._doc.get("compression") @property - def sasl_supported_mechs(self) -> List[str]: + def sasl_supported_mechs(self) -> list[str]: """Supported authentication mechanisms for the current user. For example:: @@ -217,3 +218,7 @@ def service_id(self) -> Optional[ObjectId]: @property def hello_ok(self) -> bool: return self._doc.get("helloOk", False) + + @property + def connection_id(self) -> Optional[int]: + return self._doc.get("connectionId") diff --git a/pymongo/helpers.py b/pymongo/helpers.py deleted file mode 100644 index 4df8ab8e7a..0000000000 --- a/pymongo/helpers.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright 2009-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Bits and pieces used by the driver that don't really fit elsewhere.""" - -import sys -import traceback -from collections import abc -from typing import Any, List, NoReturn - -from bson.son import SON -from pymongo import ASCENDING -from pymongo.errors import ( - CursorNotFound, - DuplicateKeyError, - ExecutionTimeout, - NotPrimaryError, - OperationFailure, - WriteConcernError, - WriteError, - WTimeoutError, - _wtimeout_error, -) -from pymongo.hello import HelloCompat - -# From the SDAM spec, the "node is shutting down" codes. -_SHUTDOWN_CODES = frozenset( - [ - 11600, # InterruptedAtShutdown - 91, # ShutdownInProgress - ] -) -# From the SDAM spec, the "not primary" error codes are combined with the -# "node is recovering" error codes (of which the "node is shutting down" -# errors are a subset). -_NOT_PRIMARY_CODES = ( - frozenset( - [ - 10058, # LegacyNotPrimary <=3.2 "not primary" error code - 10107, # NotWritablePrimary - 13435, # NotPrimaryNoSecondaryOk - 11602, # InterruptedDueToReplStateChange - 13436, # NotPrimaryOrSecondary - 189, # PrimarySteppedDown - ] - ) - | _SHUTDOWN_CODES -) -# From the retryable writes spec. -_RETRYABLE_ERROR_CODES = _NOT_PRIMARY_CODES | frozenset( - [ - 7, # HostNotFound - 6, # HostUnreachable - 89, # NetworkTimeout - 9001, # SocketException - 262, # ExceededTimeLimit - ] -) - - -def _gen_index_name(keys): - """Generate an index name from the set of fields it is over.""" - return "_".join(["%s_%s" % item for item in keys]) - - -def _index_list(key_or_list, direction=None): - """Helper to generate a list of (key, direction) pairs. - - Takes such a list, or a single key, or a single key and direction. - """ - if direction is not None: - return [(key_or_list, direction)] - else: - if isinstance(key_or_list, str): - return [(key_or_list, ASCENDING)] - if isinstance(key_or_list, abc.ItemsView): - return list(key_or_list) - elif not isinstance(key_or_list, (list, tuple)): - raise TypeError("if no direction is specified, key_or_list must be an instance of list") - return key_or_list - - -def _index_document(index_list): - """Helper to generate an index specifying document. - - Takes a list of (key, direction) pairs. - """ - if isinstance(index_list, abc.Mapping): - raise TypeError( - "passing a dict to sort/create_index/hint is not " - "allowed - use a list of tuples instead. did you " - "mean %r?" % list(index_list.items()) - ) - elif not isinstance(index_list, (list, tuple)): - raise TypeError("must use a list of (key, direction) pairs, not: " + repr(index_list)) - if not len(index_list): - raise ValueError("key_or_list must not be the empty list") - - index: SON[str, Any] = SON() - for (key, value) in index_list: - if not isinstance(key, str): - raise TypeError("first item in each key pair must be an instance of str") - if not isinstance(value, (str, int, abc.Mapping)): - raise TypeError( - "second item in each key pair must be 1, -1, " - "'2d', or another valid MongoDB index specifier." - ) - index[key] = value - return index - - -def _check_command_response( - response, max_wire_version, allowable_errors=None, parse_write_concern_error=False -): - """Check the response to a command for errors.""" - if "ok" not in response: - # Server didn't recognize our message as a command. - raise OperationFailure( - response.get("$err"), response.get("code"), response, max_wire_version - ) - - if parse_write_concern_error and "writeConcernError" in response: - _error = response["writeConcernError"] - _labels = response.get("errorLabels") - if _labels: - _error.update({"errorLabels": _labels}) - _raise_write_concern_error(_error) - - if response["ok"]: - return - - details = response - # Mongos returns the error details in a 'raw' object - # for some errors. - if "raw" in response: - for shard in response["raw"].values(): - # Grab the first non-empty raw error from a shard. - if shard.get("errmsg") and not shard.get("ok"): - details = shard - break - - errmsg = details["errmsg"] - code = details.get("code") - - # For allowable errors, only check for error messages when the code is not - # included. - if allowable_errors: - if code is not None: - if code in allowable_errors: - return - elif errmsg in allowable_errors: - return - - # Server is "not primary" or "recovering" - if code is not None: - if code in _NOT_PRIMARY_CODES: - raise NotPrimaryError(errmsg, response) - elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg: - raise NotPrimaryError(errmsg, response) - - # Other errors - # findAndModify with upsert can raise duplicate key error - if code in (11000, 11001, 12582): - raise DuplicateKeyError(errmsg, code, response, max_wire_version) - elif code == 50: - raise ExecutionTimeout(errmsg, code, response, max_wire_version) - elif code == 43: - raise CursorNotFound(errmsg, code, response, max_wire_version) - - raise OperationFailure(errmsg, code, response, max_wire_version) - - -def _raise_last_write_error(write_errors: List[Any]) -> NoReturn: - # If the last batch had multiple errors only report - # the last error to emulate continue_on_error. - error = write_errors[-1] - if error.get("code") == 11000: - raise DuplicateKeyError(error.get("errmsg"), 11000, error) - raise WriteError(error.get("errmsg"), error.get("code"), error) - - -def _raise_write_concern_error(error: Any) -> NoReturn: - if _wtimeout_error(error): - # Make sure we raise WTimeoutError - raise WTimeoutError(error.get("errmsg"), error.get("code"), error) - raise WriteConcernError(error.get("errmsg"), error.get("code"), error) - - -def _get_wce_doc(result): - """Return the writeConcernError or None.""" - wce = result.get("writeConcernError") - if wce: - # The server reports errorLabels at the top level but it's more - # convenient to attach it to the writeConcernError doc itself. - error_labels = result.get("errorLabels") - if error_labels: - wce["errorLabels"] = error_labels - return wce - - -def _check_write_command_response(result): - """Backward compatibility helper for write command error handling.""" - # Prefer write errors over write concern errors - write_errors = result.get("writeErrors") - if write_errors: - _raise_last_write_error(write_errors) - - wce = _get_wce_doc(result) - if wce: - _raise_write_concern_error(wce) - - -def _fields_list_to_dict(fields, option_name): - """Takes a sequence of field names and returns a matching dictionary. - - ["a", "b"] becomes {"a": 1, "b": 1} - - and - - ["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1} - """ - if isinstance(fields, abc.Mapping): - return fields - - if isinstance(fields, (abc.Sequence, abc.Set)): - if not all(isinstance(field, str) for field in fields): - raise TypeError( - "%s must be a list of key names, each an instance of str" % (option_name,) - ) - return dict.fromkeys(fields, 1) - - raise TypeError("%s must be a mapping or list of key names" % (option_name,)) - - -def _handle_exception(): - """Print exceptions raised by subscribers to stderr.""" - # Heavily influenced by logging.Handler.handleError. - - # See note here: - # https://docs.python.org/3.4/library/sys.html#sys.__stderr__ - if sys.stderr: - einfo = sys.exc_info() - try: - traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) - except IOError: - pass - finally: - del einfo diff --git a/pymongo/helpers_shared.py b/pymongo/helpers_shared.py new file mode 100644 index 0000000000..c3611df7c8 --- /dev/null +++ b/pymongo/helpers_shared.py @@ -0,0 +1,364 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Bits and pieces used by the driver that don't really fit elsewhere.""" +from __future__ import annotations + +import sys +import traceback +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Container, + Iterable, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from pymongo import ASCENDING +from pymongo.errors import ( + CursorNotFound, + DuplicateKeyError, + ExecutionTimeout, + NotPrimaryError, + OperationFailure, + WriteConcernError, + WriteError, + WTimeoutError, + _wtimeout_error, +) +from pymongo.hello import HelloCompat + +if TYPE_CHECKING: + from pymongo.cursor_shared import _Hint + from pymongo.operations import _IndexList + from pymongo.pool_options import PoolOptions + from pymongo.typings import _DocumentOut + + +# From the SDAM spec, the "node is shutting down" codes. + +_SHUTDOWN_CODES: frozenset[int] = frozenset( + [ + 11600, # InterruptedAtShutdown + 91, # ShutdownInProgress + ] +) +# From the SDAM spec, the "not primary" error codes are combined with the +# "node is recovering" error codes (of which the "node is shutting down" +# errors are a subset). +_NOT_PRIMARY_CODES: frozenset[int] = ( + frozenset( + [ + 10058, # LegacyNotPrimary <=3.2 "not primary" error code + 10107, # NotWritablePrimary + 13435, # NotPrimaryNoSecondaryOk + 11602, # InterruptedDueToReplStateChange + 13436, # NotPrimaryOrSecondary + 189, # PrimarySteppedDown + ] + ) + | _SHUTDOWN_CODES +) +# From the retryable writes spec. +_RETRYABLE_ERROR_CODES: frozenset[int] = _NOT_PRIMARY_CODES | frozenset( + [ + 7, # HostNotFound + 6, # HostUnreachable + 89, # NetworkTimeout + 9001, # SocketException + 262, # ExceededTimeLimit + 134, # ReadConcernMajorityNotAvailableYet + ] +) + +# Server code raised when re-authentication is required +_REAUTHENTICATION_REQUIRED_CODE: int = 391 + +# Server code raised when authentication fails. +_AUTHENTICATION_FAILURE_CODE: int = 18 + +# Note - to avoid bugs from forgetting which if these is all lowercase and +# which are camelCase, and at the same time avoid having to add a test for +# every command, use all lowercase here and test against command_name.lower(). +_SENSITIVE_COMMANDS: set[str] = { + "authenticate", + "saslstart", + "saslcontinue", + "getnonce", + "createuser", + "updateuser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +} + + +def _get_timeout_details(options: PoolOptions) -> dict[str, float]: + from pymongo import _csot + + details = {} + timeout = _csot.get_timeout() + socket_timeout = options.socket_timeout + connect_timeout = options.connect_timeout + if timeout: + details["timeoutMS"] = timeout * 1000 + if socket_timeout and not timeout: + details["socketTimeoutMS"] = socket_timeout * 1000 + if connect_timeout: + details["connectTimeoutMS"] = connect_timeout * 1000 + return details + + +def format_timeout_details(details: Optional[dict[str, float]]) -> str: + result = "" + if details: + result += " (configured timeouts:" + for timeout in ["socketTimeoutMS", "timeoutMS", "connectTimeoutMS"]: + if timeout in details: + result += f" {timeout}: {details[timeout]}ms," + result = result[:-1] + result += ")" + return result + + +def _gen_index_name(keys: _IndexList) -> str: + """Generate an index name from the set of fields it is over.""" + return "_".join(["{}_{}".format(*item) for item in keys]) + + +def _index_list( + key_or_list: _Hint, direction: Optional[Union[int, str]] = None +) -> Sequence[tuple[str, Union[int, str, Mapping[str, Any]]]]: + """Helper to generate a list of (key, direction) pairs. + + Takes such a list, or a single key, or a single key and direction. + """ + if direction is not None: + if not isinstance(key_or_list, str): + raise TypeError(f"Expected a string and a direction, not {type(key_or_list)}") + return [(key_or_list, direction)] + else: + if isinstance(key_or_list, str): + return [(key_or_list, ASCENDING)] + elif isinstance(key_or_list, abc.ItemsView): + return list(key_or_list) # type: ignore[arg-type] + elif isinstance(key_or_list, abc.Mapping): + return list(key_or_list.items()) + elif not isinstance(key_or_list, (list, tuple)): + raise TypeError( + f"if no direction is specified, key_or_list must be an instance of list, not {type(key_or_list)}" + ) + values: list[tuple[str, int]] = [] + for item in key_or_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + values.append(item) + return values + + +def _index_document(index_list: _IndexList) -> dict[str, Any]: + """Helper to generate an index specifying document. + + Takes a list of (key, direction) pairs. + """ + if not isinstance(index_list, (list, tuple, abc.Mapping)): + raise TypeError( + "must use a dictionary or a list of (key, direction) pairs, not: " + repr(index_list) + ) + if not len(index_list): + raise ValueError("key_or_list must not be empty") + + index: dict[str, Any] = {} + + if isinstance(index_list, abc.Mapping): + for key in index_list: + value = index_list[key] + _validate_index_key_pair(key, value) + index[key] = value + else: + for item in index_list: + if isinstance(item, str): + item = (item, ASCENDING) # noqa: PLW2901 + key, value = item + _validate_index_key_pair(key, value) + index[key] = value + return index + + +def _validate_index_key_pair(key: Any, value: Any) -> None: + if not isinstance(key, str): + raise TypeError(f"first item in each key pair must be an instance of str, not {type(key)}") + if not isinstance(value, (str, int, abc.Mapping)): + raise TypeError( + "second item in each key pair must be 1, -1, " + "'2d', or another valid MongoDB index specifier." + f", not {type(value)}" + ) + + +def _check_command_response( + response: _DocumentOut, + max_wire_version: Optional[int], + allowable_errors: Optional[Container[Union[int, str]]] = None, + parse_write_concern_error: bool = False, + pool_opts: Optional[PoolOptions] = None, +) -> None: + """Check the response to a command for errors.""" + if "ok" not in response: + # Server didn't recognize our message as a command. + raise OperationFailure( + response.get("$err"), # type: ignore[arg-type] + response.get("code"), + response, + max_wire_version, + ) + + if parse_write_concern_error and "writeConcernError" in response: + _error = response["writeConcernError"] + _labels = response.get("errorLabels") + if _labels: + _error.update({"errorLabels": _labels}) + _raise_write_concern_error(_error) + + if response["ok"]: + return + + details = response + # Mongos returns the error details in a 'raw' object + # for some errors. + if "raw" in response: + for shard in response["raw"].values(): + # Grab the first non-empty raw error from a shard. + if shard.get("errmsg") and not shard.get("ok"): + details = shard + break + + errmsg = details["errmsg"] + code = details.get("code") + + # For allowable errors, only check for error messages when the code is not + # included. + if allowable_errors: + if code is not None: + if code in allowable_errors: + return + elif errmsg in allowable_errors: + return + + # Server is "not primary" or "recovering" + if code is not None: + if code in _NOT_PRIMARY_CODES: + raise NotPrimaryError(errmsg, response) + elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg: + raise NotPrimaryError(errmsg, response) + + # Other errors + # findAndModify with upsert can raise duplicate key error + if code in (11000, 11001, 12582): + raise DuplicateKeyError(errmsg, code, response, max_wire_version) + elif code == 50: + # Append timeout details to MaxTimeMSExpired responses. + if pool_opts: + timeout_details = _get_timeout_details(pool_opts) + errmsg += format_timeout_details(timeout_details) + raise ExecutionTimeout(errmsg, code, response, max_wire_version) + elif code == 43: + raise CursorNotFound(errmsg, code, response, max_wire_version) + + raise OperationFailure(errmsg, code, response, max_wire_version) + + +def _raise_last_write_error(write_errors: list[Any]) -> NoReturn: + # If the last batch had multiple errors only report + # the last error to emulate continue_on_error. + error = write_errors[-1] + if error.get("code") == 11000: + raise DuplicateKeyError(error.get("errmsg"), 11000, error) + raise WriteError(error.get("errmsg"), error.get("code"), error) + + +def _raise_write_concern_error(error: Any) -> NoReturn: + if _wtimeout_error(error): + # Make sure we raise WTimeoutError + raise WTimeoutError(error.get("errmsg"), error.get("code"), error) + raise WriteConcernError(error.get("errmsg"), error.get("code"), error) + + +def _get_wce_doc(result: Mapping[str, Any]) -> Optional[Mapping[str, Any]]: + """Return the writeConcernError or None.""" + wce = result.get("writeConcernError") + if wce: + # The server reports errorLabels at the top level but it's more + # convenient to attach it to the writeConcernError doc itself. + error_labels = result.get("errorLabels") + if error_labels: + # Copy to avoid changing the original document. + wce = wce.copy() + wce["errorLabels"] = error_labels + return wce + + +def _check_write_command_response(result: Mapping[str, Any]) -> None: + """Backward compatibility helper for write command error handling.""" + # Prefer write errors over write concern errors + write_errors = result.get("writeErrors") + if write_errors: + _raise_last_write_error(write_errors) + + wce = _get_wce_doc(result) + if wce: + _raise_write_concern_error(wce) + + +def _fields_list_to_dict( + fields: Union[Mapping[str, Any], Iterable[str]], option_name: str +) -> Mapping[str, Any]: + """Takes a sequence of field names and returns a matching dictionary. + + ["a", "b"] becomes {"a": 1, "b": 1} + + and + + ["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1} + """ + if isinstance(fields, abc.Mapping): + return fields + + if isinstance(fields, (abc.Sequence, abc.Set)): + if not all(isinstance(field, str) for field in fields): + raise TypeError(f"{option_name} must be a list of key names, each an instance of str") + return dict.fromkeys(fields, 1) + + raise TypeError(f"{option_name} must be a mapping or list of key names") + + +def _handle_exception() -> None: + """Print exceptions raised by subscribers to stderr.""" + # Heavily influenced by logging.Handler.handleError. + + # See note here: + # https://docs.python.org/3.4/library/sys.html#sys.__stderr__ + if sys.stderr: + einfo = sys.exc_info() + try: + traceback.print_exception(einfo[0], einfo[1], einfo[2], None, sys.stderr) + except OSError: + pass + finally: + del einfo diff --git a/pymongo/lock.py b/pymongo/lock.py new file mode 100644 index 0000000000..ad990fce3f --- /dev/null +++ b/pymongo/lock.py @@ -0,0 +1,92 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal helpers for lock and condition coordination primitives.""" + +from __future__ import annotations + +import asyncio +import os +import sys +import threading +import weakref +from asyncio import wait_for +from typing import Any, Optional, TypeVar + +import pymongo._asyncio_lock + +_HAS_REGISTER_AT_FORK = hasattr(os, "register_at_fork") + +# References to instances of _create_lock +_forkable_locks: weakref.WeakSet[threading.Lock] = weakref.WeakSet() + +_T = TypeVar("_T") + +# Needed to support 3.13 asyncio fixes (https://github.com/python/cpython/issues/112202) +# in older versions of Python +if sys.version_info >= (3, 13): + Lock = asyncio.Lock + Condition = asyncio.Condition +else: + Lock = pymongo._asyncio_lock.Lock + Condition = pymongo._asyncio_lock.Condition + + +def _create_lock() -> threading.Lock: + """Represents a lock that is tracked upon instantiation using a WeakSet and + reset by pymongo upon forking. + """ + lock = threading.Lock() + if _HAS_REGISTER_AT_FORK: + _forkable_locks.add(lock) + return lock + + +def _async_create_lock() -> Lock: + """Represents an asyncio.Lock.""" + return Lock() + + +def _create_condition( + lock: threading.Lock, condition_class: Optional[Any] = None +) -> threading.Condition: + """Represents a threading.Condition.""" + if condition_class: + return condition_class(lock) + return threading.Condition(lock) + + +def _async_create_condition(lock: Lock, condition_class: Optional[Any] = None) -> Condition: + """Represents an asyncio.Condition.""" + if condition_class: + return condition_class(lock) + return Condition(lock) + + +def _release_locks() -> None: + # Completed the fork, reset all the locks in the child. + for lock in _forkable_locks: + if lock.locked(): + lock.release() + + +async def _async_cond_wait(condition: Condition, timeout: Optional[float]) -> bool: + try: + return await wait_for(condition.wait(), timeout) + except asyncio.TimeoutError: + return False + + +def _cond_wait(condition: threading.Condition, timeout: Optional[float]) -> bool: + return condition.wait(timeout) diff --git a/pymongo/logger.py b/pymongo/logger.py new file mode 100644 index 0000000000..1b3fe43b86 --- /dev/null +++ b/pymongo/logger.py @@ -0,0 +1,189 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import enum +import logging +import os +import warnings +from typing import Any + +from bson import UuidRepresentation, json_util +from bson.json_util import JSONOptions, _truncate_documents +from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason + + +class _CommandStatusMessage(str, enum.Enum): + STARTED = "Command started" + SUCCEEDED = "Command succeeded" + FAILED = "Command failed" + + +class _ServerSelectionStatusMessage(str, enum.Enum): + STARTED = "Server selection started" + SUCCEEDED = "Server selection succeeded" + FAILED = "Server selection failed" + WAITING = "Waiting for suitable server to become available" + + +class _ConnectionStatusMessage(str, enum.Enum): + POOL_CREATED = "Connection pool created" + POOL_READY = "Connection pool ready" + POOL_CLOSED = "Connection pool closed" + POOL_CLEARED = "Connection pool cleared" + + CONN_CREATED = "Connection created" + CONN_READY = "Connection ready" + CONN_CLOSED = "Connection closed" + + CHECKOUT_STARTED = "Connection checkout started" + CHECKOUT_SUCCEEDED = "Connection checked out" + CHECKOUT_FAILED = "Connection checkout failed" + CHECKEDIN = "Connection checked in" + + +class _SDAMStatusMessage(str, enum.Enum): + START_TOPOLOGY = "Starting topology monitoring" + STOP_TOPOLOGY = "Stopped topology monitoring" + START_SERVER = "Starting server monitoring" + STOP_SERVER = "Stopped server monitoring" + TOPOLOGY_CHANGE = "Topology description changed" + HEARTBEAT_START = "Server heartbeat started" + HEARTBEAT_SUCCESS = "Server heartbeat succeeded" + HEARTBEAT_FAIL = "Server heartbeat failed" + + +_DEFAULT_DOCUMENT_LENGTH = 1000 +_SENSITIVE_COMMANDS = [ + "authenticate", + "saslStart", + "saslContinue", + "getnonce", + "createUser", + "updateUser", + "copydbgetnonce", + "copydbsaslstart", + "copydb", +] +_HELLO_COMMANDS = ["hello", "ismaster", "isMaster"] +_REDACTED_FAILURE_FIELDS = ["code", "codeName", "errorLabels"] +_DOCUMENT_NAMES = ["command", "reply", "failure"] +_JSON_OPTIONS = JSONOptions(uuid_representation=UuidRepresentation.STANDARD) +_COMMAND_LOGGER = logging.getLogger("pymongo.command") +_CONNECTION_LOGGER = logging.getLogger("pymongo.connection") +_SERVER_SELECTION_LOGGER = logging.getLogger("pymongo.serverSelection") +_CLIENT_LOGGER = logging.getLogger("pymongo.client") +_SDAM_LOGGER = logging.getLogger("pymongo.topology") +_VERBOSE_CONNECTION_ERROR_REASONS = { + ConnectionClosedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionCheckOutFailedReason.POOL_CLOSED: "Connection pool was closed", + ConnectionClosedReason.STALE: "Connection pool was stale", + ConnectionClosedReason.ERROR: "An error occurred while using the connection", + ConnectionCheckOutFailedReason.CONN_ERROR: "An error occurred while trying to establish a new connection", + ConnectionClosedReason.IDLE: "Connection was idle too long", + ConnectionCheckOutFailedReason.TIMEOUT: "Connection exceeded the specified timeout", +} + + +def _log_client_error() -> None: + # This is called from a daemon thread so check for None to account for interpreter shutdown. + logger = _CLIENT_LOGGER + if logger: + # logger.exception includes the full traceback. + logger.exception("MongoClient background task encountered an error:") + + +def _debug_log(logger: logging.Logger, **fields: Any) -> None: + logger.debug(LogMessage(**fields)) + + +def _verbose_connection_error_reason(reason: str) -> str: + return _VERBOSE_CONNECTION_ERROR_REASONS.get(reason, reason) + + +def _info_log(logger: logging.Logger, **fields: Any) -> None: + logger.info(LogMessage(**fields)) + + +def _log_or_warn(logger: logging.Logger, message: str) -> None: + if logger.isEnabledFor(logging.INFO): + logger.info(message) + else: + # stacklevel=4 ensures that the warning is for the user's code. + warnings.warn(message, UserWarning, stacklevel=4) + + +class LogMessage: + __slots__ = ("_kwargs", "_redacted") + + def __init__(self, **kwargs: Any): + self._kwargs = kwargs + self._redacted = False + + def __str__(self) -> str: + self._redact() + return "%s" % ( + json_util.dumps( + self._kwargs, json_options=_JSON_OPTIONS, default=lambda o: o.__repr__() + ) + ) + + def _is_sensitive(self, doc_name: str) -> bool: + is_speculative_authenticate = ( + self._kwargs.pop("speculative_authenticate", False) + or "speculativeAuthenticate" in self._kwargs[doc_name] + ) + is_sensitive_command = ( + "commandName" in self._kwargs and self._kwargs["commandName"] in _SENSITIVE_COMMANDS + ) + + is_sensitive_hello = ( + self._kwargs.get("commandName", None) in _HELLO_COMMANDS and is_speculative_authenticate + ) + + return is_sensitive_command or is_sensitive_hello + + def _redact(self) -> None: + if self._redacted: + return + self._kwargs = {k: v for k, v in self._kwargs.items() if v is not None} + if "durationMS" in self._kwargs and hasattr(self._kwargs["durationMS"], "total_seconds"): + self._kwargs["durationMS"] = self._kwargs["durationMS"].total_seconds() * 1000 + if "serviceId" in self._kwargs: + self._kwargs["serviceId"] = str(self._kwargs["serviceId"]) + document_length = int(os.getenv("MONGOB_LOG_MAX_DOCUMENT_LENGTH", _DEFAULT_DOCUMENT_LENGTH)) + if document_length < 0: + document_length = _DEFAULT_DOCUMENT_LENGTH + is_server_side_error = self._kwargs.pop("isServerSideError", False) + + for doc_name in _DOCUMENT_NAMES: + doc = self._kwargs.get(doc_name) + if doc: + if doc_name == "failure" and is_server_side_error: + doc = {k: v for k, v in doc.items() if k in _REDACTED_FAILURE_FIELDS} + if doc_name != "failure" and self._is_sensitive(doc_name): + doc = json_util.dumps({}) + else: + truncated_doc = _truncate_documents(doc, document_length)[0] + doc = json_util.dumps( + truncated_doc, + json_options=_JSON_OPTIONS, + default=lambda o: o.__repr__(), + ) + if len(doc) > document_length: + doc = ( + doc.encode()[:document_length].decode("unicode-escape", "ignore") + ) + "..." + self._kwargs[doc_name] = doc + self._redacted = True diff --git a/pymongo/max_staleness_selectors.py b/pymongo/max_staleness_selectors.py index 28b0bb615e..5f1e404720 100644 --- a/pymongo/max_staleness_selectors.py +++ b/pymongo/max_staleness_selectors.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -26,17 +26,24 @@ where "SMax" is the secondary with the greatest lastWriteDate. """ +from __future__ import annotations + +from typing import TYPE_CHECKING from pymongo.errors import ConfigurationError from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_selectors import Selection + + # Constant defined in Max Staleness Spec: An idle primary writes a no-op every # 10 seconds to refresh secondaries' lastWriteDate values. IDLE_WRITE_PERIOD = 10 SMALLEST_MAX_STALENESS = 90 -def _validate_max_staleness(max_staleness, heartbeat_frequency): +def _validate_max_staleness(max_staleness: int, heartbeat_frequency: int) -> None: # We checked for max staleness -1 before this, it must be positive here. if max_staleness < heartbeat_frequency + IDLE_WRITE_PERIOD: raise ConfigurationError( @@ -53,14 +60,16 @@ def _validate_max_staleness(max_staleness, heartbeat_frequency): ) -def _with_primary(max_staleness, selection): +def _with_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with a known primary.""" primary = selection.primary + assert primary sds = [] for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert s.last_write_date and primary.last_write_date # noqa: PT018 staleness = ( (s.last_update_time - s.last_write_date) - (primary.last_update_time - primary.last_write_date) @@ -75,7 +84,7 @@ def _with_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def _no_primary(max_staleness, selection): +def _no_primary(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection with no known primary.""" # Secondary that's replicated the most recent writes. smax = selection.secondary_with_max_last_write_date() @@ -88,6 +97,7 @@ def _no_primary(max_staleness, selection): for s in selection.server_descriptions: if s.server_type == SERVER_TYPE.RSSecondary: # See max-staleness.rst for explanation of this formula. + assert smax.last_write_date and s.last_write_date # noqa: PT018 staleness = smax.last_write_date - s.last_write_date + selection.heartbeat_frequency if staleness <= max_staleness: @@ -98,7 +108,7 @@ def _no_primary(max_staleness, selection): return selection.with_server_descriptions(sds) -def select(max_staleness, selection): +def select(max_staleness: int, selection: Selection) -> Selection: """Apply max_staleness, in seconds, to a Selection.""" if max_staleness == -1: return selection diff --git a/pymongo/message.py b/pymongo/message.py index 8f37fdc062..0f3aaaba77 100644 --- a/pymongo/message.py +++ b/pymongo/message.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -19,18 +19,35 @@ .. note:: This module is for internal use and is generally not needed by application developers. """ +from __future__ import annotations import datetime import random import struct from io import BytesIO as _BytesIO -from typing import Any, Dict, NoReturn +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Iterable, + Mapping, + MutableMapping, + NoReturn, + Optional, + Union, +) import bson -from bson import CodecOptions, _decode_selective, _dict_to_bson, _make_c_string, encode +from bson import CodecOptions, _dict_to_bson, _make_c_string from bson.int64 import Int64 -from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson -from bson.son import SON +from bson.raw_bson import ( + _RAW_ARRAY_BSON_OPTIONS, + DEFAULT_RAW_BSON_OPTIONS, + RawBSONDocument, + _inflate_bson, +) +from pymongo.hello import HelloCompat +from pymongo.monitoring import _EventListeners try: from pymongo import _cmessage # type: ignore[attr-defined] @@ -48,9 +65,19 @@ OperationFailure, ProtocolError, ) -from pymongo.hello import HelloCompat -from pymongo.read_preferences import ReadPreference -from pymongo.write_concern import WriteConcern +from pymongo.read_preferences import ReadPreference, _ServerMode + +if TYPE_CHECKING: + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.read_concern import ReadConcern + from pymongo.typings import ( + _Address, + _AgnosticClientSession, + _AgnosticConnection, + _AgnosticMongoClient, + _DocumentOut, + ) + MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 @@ -74,19 +101,26 @@ _UPDATE: b"\x04updates\x00\x00\x00\x00\x00", _DELETE: b"\x04deletes\x00\x00\x00\x00\x00", } -_FIELD_MAP = {"insert": "documents", "update": "updates", "delete": "deletes"} +_FIELD_MAP = { + "insert": "documents", + "update": "updates", + "delete": "deletes", + "bulkWrite": "ops", +} -_UNICODE_REPLACE_CODEC_OPTIONS: "CodecOptions[Dict[str, Any]]" = CodecOptions( +_UNICODE_REPLACE_CODEC_OPTIONS: CodecOptions[Mapping[str, Any]] = CodecOptions( unicode_decode_error_handler="replace" ) -def _randint(): +def _randint() -> int: """Generate a pseudo random 32 bit integer.""" - return random.randint(MIN_INT32, MAX_INT32) + return random.randint(MIN_INT32, MAX_INT32) # noqa: S311 -def _maybe_add_read_preference(spec, read_preference): +def _maybe_add_read_preference( + spec: MutableMapping[str, Any], read_preference: _ServerMode +) -> MutableMapping[str, Any]: """Add $readPreference to spec when appropriate.""" mode = read_preference.mode document = read_preference.document @@ -97,19 +131,31 @@ def _maybe_add_read_preference(spec, read_preference): # the secondaryOkay bit has the same effect). if mode and (mode != ReadPreference.SECONDARY_PREFERRED.mode or len(document) > 1): if "$query" not in spec: - spec = SON([("$query", spec)]) + spec = {"$query": spec} spec["$readPreference"] = document return spec -def _convert_exception(exception): +def _convert_exception(exception: Exception) -> dict[str, Any]: """Convert an Exception into a failure document for publishing.""" return {"errmsg": str(exception), "errtype": exception.__class__.__name__} -def _convert_write_result(operation, command, result): - """Convert a legacy write result to write command format.""" +def _convert_client_bulk_exception(exception: Exception) -> dict[str, Any]: + """Convert an Exception into a failure document for publishing, + for use in client-level bulk write API. + """ + return { + "errmsg": str(exception), + "code": exception.code, # type: ignore[attr-defined] + "errtype": exception.__class__.__name__, + } + +def _convert_write_result( + operation: str, command: Mapping[str, Any], result: Mapping[str, Any] +) -> dict[str, Any]: + """Convert a legacy write result to write command format.""" # Based on _merge_legacy from bulk.py affected = result.get("n", 0) res = {"ok": 1, "n": affected} @@ -142,50 +188,46 @@ def _convert_write_result(operation, command, result): return res -_OPTIONS = SON( - [ - ("tailable", 2), - ("oplogReplay", 8), - ("noCursorTimeout", 16), - ("awaitData", 32), - ("allowPartialResults", 128), - ] -) +_OPTIONS = { + "tailable": 2, + "oplogReplay": 8, + "noCursorTimeout": 16, + "awaitData": 32, + "allowPartialResults": 128, +} -_MODIFIERS = SON( - [ - ("$query", "filter"), - ("$orderby", "sort"), - ("$hint", "hint"), - ("$comment", "comment"), - ("$maxScan", "maxScan"), - ("$maxTimeMS", "maxTimeMS"), - ("$max", "max"), - ("$min", "min"), - ("$returnKey", "returnKey"), - ("$showRecordId", "showRecordId"), - ("$showDiskLoc", "showRecordId"), # <= MongoDb 3.0 - ("$snapshot", "snapshot"), - ] -) +_MODIFIERS = { + "$query": "filter", + "$orderby": "sort", + "$hint": "hint", + "$comment": "comment", + "$maxScan": "maxScan", + "$maxTimeMS": "maxTimeMS", + "$max": "max", + "$min": "min", + "$returnKey": "returnKey", + "$showRecordId": "showRecordId", + "$showDiskLoc": "showRecordId", # <= MongoDb 3.0 + "$snapshot": "snapshot", +} def _gen_find_command( - coll, - spec, - projection, - skip, - limit, - batch_size, - options, - read_concern, - collation=None, - session=None, - allow_disk_use=None, -): + coll: str, + spec: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + skip: int, + limit: int, + batch_size: Optional[int], + options: Optional[int], + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]] = None, + session: Optional[_AgnosticClientSession] = None, + allow_disk_use: Optional[bool] = None, +) -> dict[str, Any]: """Generate a find command document.""" - cmd = SON([("find", coll)]) + cmd: dict[str, Any] = {"find": coll} if "$query" in spec: cmd.update( [ @@ -209,6 +251,10 @@ def _gen_find_command( if limit < 0: cmd["singleBatch"] = True if batch_size: + # When limit and batchSize are equal we increase batchSize by 1 to + # avoid an unnecessary killCursors. + if limit == batch_size: + batch_size += 1 cmd["batchSize"] = batch_size if read_concern.level and not (session and session.in_transaction): cmd["readConcern"] = read_concern.document @@ -222,476 +268,170 @@ def _gen_find_command( return cmd -def _gen_get_more_command(cursor_id, coll, batch_size, max_await_time_ms, comment, sock_info): +def _gen_get_more_command( + cursor_id: Optional[int], + coll: str, + batch_size: Optional[int], + max_await_time_ms: Optional[int], + comment: Optional[Any], + conn: _AgnosticConnection, +) -> dict[str, Any]: """Generate a getMore command document.""" - cmd = SON([("getMore", cursor_id), ("collection", coll)]) + cmd: dict[str, Any] = {"getMore": cursor_id, "collection": coll} if batch_size: cmd["batchSize"] = batch_size if max_await_time_ms is not None: cmd["maxTimeMS"] = max_await_time_ms - if comment is not None and sock_info.max_wire_version >= 9: + if comment is not None and conn.max_wire_version >= 9: cmd["comment"] = comment return cmd -class _Query(object): - """A query operation.""" - - __slots__ = ( - "flags", - "db", - "coll", - "ntoskip", - "spec", - "fields", - "codec_options", - "read_preference", - "limit", - "batch_size", - "name", - "read_concern", - "collation", - "session", - "client", - "allow_disk_use", - "_as_command", - "exhaust", - ) - - # For compatibility with the _GetMore class. - sock_mgr = None - cursor_id = None - - def __init__( - self, - flags, - db, - coll, - ntoskip, - spec, - fields, - codec_options, - read_preference, - limit, - batch_size, - read_concern, - collation, - session, - client, - allow_disk_use, - exhaust, - ): - self.flags = flags - self.db = db - self.coll = coll - self.ntoskip = ntoskip - self.spec = spec - self.fields = fields - self.codec_options = codec_options - self.read_preference = read_preference - self.read_concern = read_concern - self.limit = limit - self.batch_size = batch_size - self.collation = collation - self.session = session - self.client = client - self.allow_disk_use = allow_disk_use - self.name = "find" - self._as_command = None - self.exhaust = exhaust +_pack_compression_header = struct.Struct(" tuple[int, bytes]: + """Takes message data, compresses it, and adds an OP_COMPRESSED header.""" + compressed = ctx.compress(data) + request_id = _randint() - def use_command(self, sock_info): - use_find_cmd = False - if not self.exhaust: - use_find_cmd = True - elif sock_info.max_wire_version >= 8: - # OP_MSG supports exhaust on MongoDB 4.2+ - use_find_cmd = True - elif not self.read_concern.ok_for_legacy: - raise ConfigurationError( - "read concern level of %s is not valid " - "with a max wire version of %d." - % (self.read_concern.level, sock_info.max_wire_version) - ) + header = _pack_compression_header( + _COMPRESSION_HEADER_SIZE + len(compressed), # Total message length + request_id, # Request id + 0, # responseTo + 2012, # operation id + operation, # original operation id + len(data), # uncompressed message length + ctx.compressor_id, + ) # compressor id + return request_id, header + compressed - sock_info.validate_session(self.client, self.session) - return use_find_cmd - def as_command(self, sock_info, apply_timeout=False): - """Return a find command document for this query.""" - # We use the command twice: on the wire and for command monitoring. - # Generate it once, for speed and to avoid repeating side-effects. - if self._as_command is not None: - return self._as_command +_pack_header = struct.Struct(" tuple[int, bytes]: + """Takes message data and adds a message header based on the operation. - ns = self.namespace() - spec = self.spec + Returns the resultant message string. + """ + rid = _randint() + message = _pack_header(16 + len(data), rid, 0, operation) + return rid, message + data - if use_cmd: - spec = self.as_command(sock_info, apply_timeout=True)[0] - request_id, msg, size, _ = _op_msg( - 0, - spec, - self.db, - read_preference, - self.codec_options, - ctx=sock_info.compression_context, - ) - return request_id, msg, size - # OP_QUERY treats ntoreturn of -1 and 1 the same, return - # one document and close the cursor. We have to use 2 for - # batch size if 1 is specified. - ntoreturn = self.batch_size == 1 and 2 or self.batch_size - if self.limit: - if ntoreturn: - ntoreturn = min(self.limit, ntoreturn) - else: - ntoreturn = self.limit +_pack_int = struct.Struct(" tuple[bytes, int, int]: + """Get a OP_MSG message. + Note: this method handles multiple documents in a type one payload but + it does not perform batch splitting and the total message size is + only checked *after* generating the entire message. + """ + # Encode the command document in payload 0 without checking keys. + encoded = _dict_to_bson(command, False, opts) + flags_type = _pack_op_msg_flags_type(flags, 0) + total_size = len(encoded) + max_doc_size = 0 + if identifier and docs is not None: + type_one = _pack_byte(1) + cstring = _make_c_string(identifier) + encoded_docs = [_dict_to_bson(doc, False, opts) for doc in docs] + size = len(cstring) + sum(len(doc) for doc in encoded_docs) + 4 + encoded_size = _pack_int(size) + total_size += size + max_doc_size = max(len(doc) for doc in encoded_docs) + data = [flags_type, encoded, type_one, encoded_size, cstring, *encoded_docs] + else: + data = [flags_type, encoded] + return b"".join(data), total_size, max_doc_size -class _GetMore(object): - """A getmore operation.""" - __slots__ = ( - "db", - "coll", - "ntoreturn", - "cursor_id", - "max_await_time_ms", - "codec_options", - "read_preference", - "session", - "client", - "sock_mgr", - "_as_command", - "exhaust", - "comment", - ) +def _op_msg_compressed( + flags: int, + command: Mapping[str, Any], + identifier: str, + docs: Optional[list[Mapping[str, Any]]], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes, int, int]: + """Internal OP_MSG message helper.""" + msg, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) + rid, msg = _compress(2013, msg, ctx) + return rid, msg, total_size, max_bson_size - name = "getMore" - def __init__( - self, - db, - coll, - ntoreturn, - cursor_id, - codec_options, - read_preference, - session, - client, - max_await_time_ms, - sock_mgr, - exhaust, - comment, - ): - self.db = db - self.coll = coll - self.ntoreturn = ntoreturn - self.cursor_id = cursor_id - self.codec_options = codec_options - self.read_preference = read_preference - self.session = session - self.client = client - self.max_await_time_ms = max_await_time_ms - self.sock_mgr = sock_mgr - self._as_command = None - self.exhaust = exhaust - self.comment = comment +def _op_msg_uncompressed( + flags: int, + command: Mapping[str, Any], + identifier: str, + docs: Optional[list[Mapping[str, Any]]], + opts: CodecOptions[Any], +) -> tuple[int, bytes, int, int]: + """Internal compressed OP_MSG message helper.""" + data, total_size, max_bson_size = _op_msg_no_header(flags, command, identifier, docs, opts) + request_id, op_message = __pack_message(2013, data) + return request_id, op_message, total_size, max_bson_size - def reset(self): - self._as_command = None - def namespace(self): - return "%s.%s" % (self.db, self.coll) +if _use_c: + _op_msg_uncompressed = _cmessage._op_msg - def use_command(self, sock_info): - use_cmd = False - if not self.exhaust: - use_cmd = True - elif sock_info.max_wire_version >= 8: - # OP_MSG supports exhaust on MongoDB 4.2+ - use_cmd = True - sock_info.validate_session(self.client, self.session) - return use_cmd +def _op_msg( + flags: int, + command: MutableMapping[str, Any], + dbname: str, + read_preference: Optional[_ServerMode], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes, int, int]: + """Get a OP_MSG message.""" + command["$db"] = dbname + # getMore commands do not send $readPreference. + if read_preference is not None and "$readPreference" not in command: + # Only send $readPreference if it's not primary (the default). + if read_preference.mode: + command["$readPreference"] = read_preference.document + name = next(iter(command)) + try: + identifier = _FIELD_MAP[name] + docs = command.pop(identifier) + except KeyError: + identifier = "" + docs = None + try: + if ctx: + return _op_msg_compressed(flags, command, identifier, docs, opts, ctx) + return _op_msg_uncompressed(flags, command, identifier, docs, opts) + finally: + # Add the field back to the command. + if identifier: + command[identifier] = docs - def as_command(self, sock_info, apply_timeout=False): - """Return a getMore command document for this query.""" - # See _Query.as_command for an explanation of this caching. - if self._as_command is not None: - return self._as_command - cmd = _gen_get_more_command( - self.cursor_id, - self.coll, - self.ntoreturn, - self.max_await_time_ms, - self.comment, - sock_info, - ) - if self.session: - self.session._apply_to(cmd, False, self.read_preference, sock_info) - sock_info.add_server_api(cmd) - sock_info.send_cluster_time(cmd, self.session, self.client) - # Support auto encryption - client = self.client - if client._encrypter and not client._encrypter._bypass_auto_encryption: - cmd = client._encrypter.encrypt(self.db, cmd, self.codec_options) - # Support CSOT - if apply_timeout: - sock_info.apply_timeout(client, cmd=None) - self._as_command = cmd, self.db - return self._as_command - - def get_message(self, dummy0, sock_info, use_cmd=False): - """Get a getmore message.""" - - ns = self.namespace() - ctx = sock_info.compression_context - - if use_cmd: - spec = self.as_command(sock_info, apply_timeout=True)[0] - if self.sock_mgr: - flags = _OpMsg.EXHAUST_ALLOWED - else: - flags = 0 - request_id, msg, size, _ = _op_msg( - flags, spec, self.db, None, self.codec_options, ctx=sock_info.compression_context - ) - return request_id, msg, size - - return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) - - -class _RawBatchQuery(_Query): - def use_command(self, sock_info): - # Compatibility checks. - super(_RawBatchQuery, self).use_command(sock_info) - if sock_info.max_wire_version >= 8: - # MongoDB 4.2+ supports exhaust over OP_MSG - return True - elif not self.exhaust: - return True - return False - - -class _RawBatchGetMore(_GetMore): - def use_command(self, sock_info): - # Compatibility checks. - super(_RawBatchGetMore, self).use_command(sock_info) - if sock_info.max_wire_version >= 8: - # MongoDB 4.2+ supports exhaust over OP_MSG - return True - elif not self.exhaust: - return True - return False - - -class _CursorAddress(tuple): - """The server address (host, port) of a cursor, with namespace property.""" - - __namespace: Any - - def __new__(cls, address, namespace): - self = tuple.__new__(cls, address) - self.__namespace = namespace - return self - - @property - def namespace(self): - """The namespace this cursor.""" - return self.__namespace - - def __hash__(self): - # Two _CursorAddress instances with different namespaces - # must not hash the same. - return (self + (self.__namespace,)).__hash__() - - def __eq__(self, other): - if isinstance(other, _CursorAddress): - return tuple(self) == tuple(other) and self.namespace == other.namespace - return NotImplemented - - def __ne__(self, other): - return not self == other - - -_pack_compression_header = struct.Struct(" tuple[bytes, int]: """Get an OP_QUERY message.""" encoded = _dict_to_bson(query, False, opts) if field_selector: @@ -703,7 +443,7 @@ def _query_impl(options, collection_name, num_to_skip, num_to_return, query, fie b"".join( [ _pack_int(options), - _make_c_string(collection_name), + bson._make_c_string(collection_name), _pack_int(num_to_skip), _pack_int(num_to_return), encoded, @@ -715,8 +455,15 @@ def _query_impl(options, collection_name, num_to_skip, num_to_return, query, fie def _query_compressed( - options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None -): + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes, int]: """Internal compressed query message helper.""" op_query, max_bson_size = _query_impl( options, collection_name, num_to_skip, num_to_return, query, field_selector, opts @@ -726,8 +473,14 @@ def _query_compressed( def _query_uncompressed( - options, collection_name, num_to_skip, num_to_return, query, field_selector, opts -): + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], +) -> tuple[int, bytes, int]: """Internal query message helper.""" op_query, max_bson_size = _query_impl( options, collection_name, num_to_skip, num_to_return, query, field_selector, opts @@ -737,12 +490,19 @@ def _query_uncompressed( if _use_c: - _query_uncompressed = _cmessage._query_message # noqa: F811 + _query_uncompressed = _cmessage._query_message def _query( - options, collection_name, num_to_skip, num_to_return, query, field_selector, opts, ctx=None -): + options: int, + collection_name: str, + num_to_skip: int, + num_to_return: int, + query: Mapping[str, Any], + field_selector: Optional[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes, int]: """Get a **query** message.""" if ctx: return _query_compressed( @@ -756,45 +516,67 @@ def _query( _pack_long_long = struct.Struct(" bytes: """Get an OP_GET_MORE message.""" return b"".join( [ _ZERO_32, - _make_c_string(collection_name), + bson._make_c_string(collection_name), _pack_int(num_to_return), _pack_long_long(cursor_id), ] ) -def _get_more_compressed(collection_name, num_to_return, cursor_id, ctx): +def _get_more_compressed( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext], +) -> tuple[int, bytes]: """Internal compressed getMore message helper.""" return _compress(2005, _get_more_impl(collection_name, num_to_return, cursor_id), ctx) -def _get_more_uncompressed(collection_name, num_to_return, cursor_id): +def _get_more_uncompressed( + collection_name: str, num_to_return: int, cursor_id: int +) -> tuple[int, bytes]: """Internal getMore message helper.""" return __pack_message(2005, _get_more_impl(collection_name, num_to_return, cursor_id)) if _use_c: - _get_more_uncompressed = _cmessage._get_more_message # noqa: F811 + _get_more_uncompressed = _cmessage._get_more_message -def _get_more(collection_name, num_to_return, cursor_id, ctx=None): +def _get_more( + collection_name: str, + num_to_return: int, + cursor_id: int, + ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, +) -> tuple[int, bytes]: """Get a **getMore** message.""" if ctx: return _get_more_compressed(collection_name, num_to_return, cursor_id, ctx) return _get_more_uncompressed(collection_name, num_to_return, cursor_id) -class _BulkWriteContext(object): - """A wrapper around SocketInfo for use with write splitting functions.""" +# OP_MSG ------------------------------------------------------------- + + +_OP_MSG_MAP = { + _INSERT: b"documents\x00", + _UPDATE: b"updates\x00", + _DELETE: b"deletes\x00", +} + + +class _BulkWriteContextBase: + """Private base class for wrapping around AsyncConnection to use with write splitting functions.""" __slots__ = ( "db_name", - "sock_info", + "conn", "op_id", "name", "field", @@ -808,176 +590,142 @@ class _BulkWriteContext(object): ) def __init__( - self, database_name, cmd_name, sock_info, operation_id, listeners, session, op_type, codec + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + op_type: int, + codec: CodecOptions[Any], ): self.db_name = database_name - self.sock_info = sock_info + self.conn = conn self.op_id = operation_id self.listeners = listeners self.publish = listeners.enabled_for_commands self.name = cmd_name self.field = _FIELD_MAP[self.name] - self.start_time = datetime.datetime.now() if self.publish else None + self.start_time = datetime.datetime.now() self.session = session - self.compress = True if sock_info.compression_context else False + self.compress = bool(conn.compression_context) self.op_type = op_type self.codec = codec - def _batch_command(self, cmd, docs): - namespace = self.db_name + ".$cmd" - request_id, msg, to_send = _do_batched_op_msg( - namespace, self.op_type, cmd, docs, self.codec, self - ) - if not to_send: - raise InvalidOperation("cannot do an empty bulk write") - return request_id, msg, to_send - - def execute(self, cmd, docs, client): - request_id, msg, to_send = self._batch_command(cmd, docs) - result = self.write_command(cmd, request_id, msg, to_send) - client._process_response(result, self.session) - return result, to_send - - def execute_unack(self, cmd, docs, client): - request_id, msg, to_send = self._batch_command(cmd, docs) - # Though this isn't strictly a "legacy" write, the helper - # handles publishing commands and sending our message - # without receiving a result. Send 0 for max_doc_size - # to disable size checking. Size checking is handled while - # the documents are encoded to BSON. - self.unack_write(cmd, request_id, msg, 0, to_send) - return to_send - @property - def max_bson_size(self): + def max_bson_size(self) -> int: """A proxy for SockInfo.max_bson_size.""" - return self.sock_info.max_bson_size + return self.conn.max_bson_size @property - def max_message_size(self): + def max_message_size(self) -> int: """A proxy for SockInfo.max_message_size.""" if self.compress: # Subtract 16 bytes for the message header. - return self.sock_info.max_message_size - 16 - return self.sock_info.max_message_size + return self.conn.max_message_size - 16 + return self.conn.max_message_size @property - def max_write_batch_size(self): + def max_write_batch_size(self) -> int: """A proxy for SockInfo.max_write_batch_size.""" - return self.sock_info.max_write_batch_size + return self.conn.max_write_batch_size @property - def max_split_size(self): + def max_split_size(self) -> int: """The maximum size of a BSON command before batch splitting.""" return self.max_bson_size - def unack_write(self, cmd, request_id, msg, max_doc_size, docs): - """A proxy for SocketInfo.unack_write that handles event publishing.""" - if self.publish: - assert self.start_time is not None - duration = datetime.datetime.now() - self.start_time - cmd = self._start(cmd, request_id, docs) - start = datetime.datetime.now() - try: - result = self.sock_info.unack_write(msg, max_doc_size) - if self.publish: - duration = (datetime.datetime.now() - start) + duration - if result is not None: - reply = _convert_write_result(self.name, cmd, result) - else: - # Comply with APM spec. - reply = {"ok": 1} - self._succeed(request_id, reply, duration) - except Exception as exc: - if self.publish: - assert self.start_time is not None - duration = (datetime.datetime.now() - start) + duration - if isinstance(exc, OperationFailure): - failure = _convert_write_result(self.name, cmd, exc.details) - elif isinstance(exc, NotPrimaryError): - failure = exc.details - else: - failure = _convert_exception(exc) - self._fail(request_id, failure, duration) - raise - finally: - self.start_time = datetime.datetime.now() - return result - - def write_command(self, cmd, request_id, msg, docs): - """A proxy for SocketInfo.write_command that handles event publishing.""" - if self.publish: - assert self.start_time is not None - duration = datetime.datetime.now() - self.start_time - self._start(cmd, request_id, docs) - start = datetime.datetime.now() - try: - reply = self.sock_info.write_command(request_id, msg, self.codec) - if self.publish: - duration = (datetime.datetime.now() - start) + duration - self._succeed(request_id, reply, duration) - except Exception as exc: - if self.publish: - duration = (datetime.datetime.now() - start) + duration - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure = exc.details - else: - failure = _convert_exception(exc) - self._fail(request_id, failure, duration) - raise - finally: - self.start_time = datetime.datetime.now() - return reply - - def _start(self, cmd, request_id, docs): - """Publish a CommandStartedEvent.""" - cmd[self.field] = docs - self.listeners.publish_command_start( - cmd, - self.db_name, - request_id, - self.sock_info.address, - self.op_id, - self.sock_info.service_id, - ) - return cmd - - def _succeed(self, request_id, reply, duration): + def _succeed(self, request_id: int, reply: _DocumentOut, duration: datetime.timedelta) -> None: """Publish a CommandSucceededEvent.""" self.listeners.publish_command_success( duration, reply, self.name, request_id, - self.sock_info.address, + self.conn.address, + self.conn.server_connection_id, self.op_id, - self.sock_info.service_id, + self.conn.service_id, + database_name=self.db_name, ) - def _fail(self, request_id, failure, duration): + def _fail(self, request_id: int, failure: _DocumentOut, duration: datetime.timedelta) -> None: """Publish a CommandFailedEvent.""" self.listeners.publish_command_failure( duration, failure, self.name, request_id, - self.sock_info.address, + self.conn.address, + self.conn.server_connection_id, self.op_id, - self.sock_info.service_id, + self.conn.service_id, + database_name=self.db_name, ) -# From the Client Side Encryption spec: -# Because automatic encryption increases the size of commands, the driver -# MUST split bulk writes at a reduced size limit before undergoing automatic -# encryption. The write payload MUST be split at 2MiB (2097152). -_MAX_SPLIT_SIZE_ENC = 2097152 +class _BulkWriteContext(_BulkWriteContextBase): + """A wrapper around AsyncConnection/Connection for use with the collection-level bulk write API.""" + + __slots__ = () + + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + op_type: int, + codec: CodecOptions[Any], + ): + super().__init__( + database_name, + cmd_name, + conn, + operation_id, + listeners, + session, + op_type, + codec, + ) + + def batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]]]: + namespace = self.db_name + ".$cmd" + request_id, msg, to_send = _do_batched_op_msg( + namespace, self.op_type, cmd, docs, self.codec, self + ) + if not to_send: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send + + def _start( + self, cmd: MutableMapping[str, Any], request_id: int, docs: list[Mapping[str, Any]] + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd[self.field] = docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd class _EncryptedBulkWriteContext(_BulkWriteContext): __slots__ = () - def _batch_command(self, cmd, docs): + def batch_command( + self, cmd: MutableMapping[str, Any], docs: list[Mapping[str, Any]] + ) -> tuple[int, dict[str, Any], list[Mapping[str, Any]]]: namespace = self.db_name + ".$cmd" msg, to_send = _encode_batched_write_command( namespace, self.op_type, cmd, docs, self.codec, self @@ -987,29 +735,11 @@ def _batch_command(self, cmd, docs): # Chop off the OP_QUERY header to get a properly batched write command. cmd_start = msg.index(b"\x00", 4) + 9 - cmd = _inflate_bson(memoryview(msg)[cmd_start:], DEFAULT_RAW_BSON_OPTIONS) - return cmd, to_send - - def execute(self, cmd, docs, client): - batched_cmd, to_send = self._batch_command(cmd, docs) - result = self.sock_info.command( - self.db_name, batched_cmd, codec_options=self.codec, session=self.session, client=client - ) - return result, to_send - - def execute_unack(self, cmd, docs, client): - batched_cmd, to_send = self._batch_command(cmd, docs) - self.sock_info.command( - self.db_name, - batched_cmd, - write_concern=WriteConcern(w=0), - session=self.session, - client=client, - ) - return to_send + outgoing = _inflate_bson(memoryview(msg)[cmd_start:], DEFAULT_RAW_BSON_OPTIONS) + return -1, outgoing, to_send @property - def max_split_size(self): + def max_split_size(self) -> int: """Reduce the batch splitting size.""" return _MAX_SPLIT_SIZE_ENC @@ -1026,20 +756,25 @@ def _raise_document_too_large(operation: str, doc_size: int, max_size: int) -> N else: # There's nothing intelligent we can say # about size for update and delete - raise DocumentTooLarge("%r command document too large" % (operation,)) - - -# OP_MSG ------------------------------------------------------------- + raise DocumentTooLarge(f"{operation!r} command document too large") -_OP_MSG_MAP = { - _INSERT: b"documents\x00", - _UPDATE: b"updates\x00", - _DELETE: b"deletes\x00", -} +# From the Client Side Encryption spec: +# Because automatic encryption increases the size of commands, the driver +# MUST split bulk writes at a reduced size limit before undergoing automatic +# encryption. The write payload MUST be split at 2MiB (2097152). +_MAX_SPLIT_SIZE_ENC = 2097152 -def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): +def _batched_op_msg_impl( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: """Create a batched OP_MSG write.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1061,7 +796,7 @@ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): try: buf.write(_OP_MSG_MAP[operation]) except KeyError: - raise InvalidOperation("Unknown command") + raise InvalidOperation("Unknown command") from None to_send = [] idx = 0 @@ -1072,7 +807,7 @@ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): new_message_size = buf.tell() + doc_length # Does first document exceed max_message_size? doc_too_large = idx == 0 and (new_message_size > max_message_size) - # When OP_MSG is used unacknowleged we have to check + # When OP_MSG is used unacknowledged we have to check # document size client side or applications won't be notified. # Otherwise we let the server deal with documents that are too large # since ordered=False causes those documents to be skipped instead of @@ -1099,7 +834,14 @@ def _batched_op_msg_impl(operation, command, docs, ack, opts, ctx, buf): return to_send, length -def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): +def _encode_batched_op_msg( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: """Encode the next batched insert, update, or delete operation as OP_MSG. """ @@ -1110,20 +852,35 @@ def _encode_batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _encode_batched_op_msg = _cmessage._encode_batched_op_msg # noqa: F811 + _encode_batched_op_msg = _cmessage._encode_batched_op_msg -def _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx): +def _batched_op_msg_compressed( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation with OP_MSG, compressed. """ data, to_send = _encode_batched_op_msg(operation, command, docs, ack, opts, ctx) - request_id, msg = _compress(2013, data, ctx.sock_info.compression_context) + assert ctx.conn.compression_context is not None + request_id, msg = _compress(2013, data, ctx.conn.compression_context) return request_id, msg, to_send -def _batched_op_msg(operation, command, docs, ack, opts, ctx): +def _batched_op_msg( + operation: int, + command: Mapping[str, Any], + docs: list[Mapping[str, Any]], + ack: bool, + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """OP_MSG implementation entry point.""" buf = _BytesIO() @@ -1145,10 +902,17 @@ def _batched_op_msg(operation, command, docs, ack, opts, ctx): if _use_c: - _batched_op_msg = _cmessage._batched_op_msg # noqa: F811 + _batched_op_msg = _cmessage._batched_op_msg -def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): +def _do_batched_op_msg( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]]]: """Create the next batched insert, update, or delete operation using OP_MSG. """ @@ -1157,27 +921,361 @@ def _do_batched_op_msg(namespace, operation, command, docs, opts, ctx): ack = bool(command["writeConcern"].get("w", 1)) else: ack = True - if ctx.sock_info.compression_context: + if ctx.conn.compression_context: return _batched_op_msg_compressed(operation, command, docs, ack, opts, ctx) return _batched_op_msg(operation, command, docs, ack, opts, ctx) -# End OP_MSG ----------------------------------------------------- - - -def _encode_batched_write_command(namespace, operation, command, docs, opts, ctx): - """Encode the next batched insert, update, or delete command.""" - buf = _BytesIO() +class _ClientBulkWriteContext(_BulkWriteContextBase): + """A wrapper around AsyncConnection/Connection for use with the client-level bulk write API.""" - to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) - return buf.getvalue(), to_send + __slots__ = () + def __init__( + self, + database_name: str, + cmd_name: str, + conn: _AgnosticConnection, + operation_id: int, + listeners: _EventListeners, + session: Optional[_AgnosticClientSession], + codec: CodecOptions[Any], + ): + super().__init__( + database_name, + cmd_name, + conn, + operation_id, + listeners, + session, + 0, + codec, + ) -if _use_c: - _encode_batched_write_command = _cmessage._encode_batched_write_command # noqa: F811 + def batch_command( + self, + cmd: MutableMapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[int, Union[bytes, dict[str, Any]], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + request_id, msg, to_send_ops, to_send_ns = _client_do_batched_op_msg( + cmd, operations, namespaces, self.codec, self + ) + if not to_send_ops: + raise InvalidOperation("cannot do an empty bulk write") + return request_id, msg, to_send_ops, to_send_ns + def _start( + self, + cmd: MutableMapping[str, Any], + request_id: int, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + ) -> MutableMapping[str, Any]: + """Publish a CommandStartedEvent.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + self.listeners.publish_command_start( + cmd, + self.db_name, + request_id, + self.conn.address, + self.conn.server_connection_id, + self.op_id, + self.conn.service_id, + ) + return cmd + + +_OP_MSG_OVERHEAD = 1000 + + +def _client_construct_op_msg( + command_encoded: bytes, + to_send_ops_encoded: list[bytes], + to_send_ns_encoded: list[bytes], + ack: bool, + buf: _BytesIO, +) -> int: + # Write flags + flags = b"\x00\x00\x00\x00" if ack else b"\x02\x00\x00\x00" + buf.write(flags) + + # Type 0 Section + buf.write(b"\x00") + buf.write(command_encoded) + + # Type 1 Section for ops + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"ops\x00") + # Write all the ops documents + for op_encoded in to_send_ops_encoded: + buf.write(op_encoded) + resume_location = buf.tell() + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + buf.seek(resume_location) + + # Type 1 Section for nsInfo + buf.write(b"\x01") + size_location = buf.tell() + # Save space for size + buf.write(b"\x00\x00\x00\x00") + buf.write(b"nsInfo\x00") + # Write all the nsInfo documents + for ns_encoded in to_send_ns_encoded: + buf.write(ns_encoded) + # Write type 1 section size + length = buf.tell() + buf.seek(size_location) + buf.write(_pack_int(length - size_location)) + + return length + + +def _client_batched_op_msg_impl( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]], int]: + """Create a batched OP_MSG write for client-level bulk write.""" + + def _check_doc_size_limits( + op_type: str, + doc_size: int, + limit: int, + ) -> None: + if doc_size > limit: + _raise_document_too_large(op_type, doc_size, limit) + + max_bson_size = ctx.max_bson_size + max_write_batch_size = ctx.max_write_batch_size + max_message_size = ctx.max_message_size + + command_encoded = _dict_to_bson(command, False, opts) + # When OP_MSG is used unacknowledged we have to check command + # document size client-side or applications won't be notified. + if not ack: + _check_doc_size_limits("bulkWrite", len(command_encoded), max_bson_size + _COMMAND_OVERHEAD) + + # Don't include bulkWrite-command-agnostic fields in batch-splitting calculations. + abridged_keys = ["bulkWrite", "errorsOnly", "ordered"] + if command.get("bypassDocumentValidation"): + abridged_keys.append("bypassDocumentValidation") + if command.get("comment"): + abridged_keys.append("comment") + if command.get("let"): + abridged_keys.append("let") + command_abridged = {key: command[key] for key in abridged_keys} + command_len_abridged = len(_dict_to_bson(command_abridged, False, opts)) + + # Maximum combined size of the ops and nsInfo document sequences. + max_doc_sequences_bytes = max_message_size - (_OP_MSG_OVERHEAD + command_len_abridged) + + ns_info = {} + to_send_ops: list[Mapping[str, Any]] = [] + to_send_ns: list[Mapping[str, str]] = [] + to_send_ops_encoded: list[bytes] = [] + to_send_ns_encoded: list[bytes] = [] + total_ops_length = 0 + total_ns_length = 0 + idx = 0 -def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf): + for (real_op_type, op_doc), namespace in zip(operations, namespaces): + op_type = real_op_type + # Check insert/replace document size if unacknowledged. + if real_op_type == "insert": + if not ack: + doc_size = len(_dict_to_bson(op_doc["document"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) + if real_op_type == "replace": + op_type = "update" + if not ack: + doc_size = len(_dict_to_bson(op_doc["updateMods"], False, opts)) + _check_doc_size_limits(real_op_type, doc_size, max_bson_size) + + ns_doc = None + ns_length = 0 + + if namespace not in ns_info: + ns_doc = {"ns": namespace} + new_ns_index = len(to_send_ns) + ns_info[namespace] = new_ns_index + + # First entry in the operation doc has the operation type as its + # key and the index of its namespace within ns_info as its value. + op_doc[op_type] = ns_info[namespace] # type: ignore[index] + + # Encode current operation doc and, if newly added, namespace doc. + op_doc_encoded = _dict_to_bson(op_doc, False, opts) + op_length = len(op_doc_encoded) + if ns_doc: + ns_doc_encoded = _dict_to_bson(ns_doc, False, opts) + ns_length = len(ns_doc_encoded) + + # Check operation document size if unacknowledged. + if not ack: + _check_doc_size_limits(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) + + new_message_size = total_ops_length + total_ns_length + op_length + ns_length + # We have enough data, return this batch. + if new_message_size > max_doc_sequences_bytes: + if idx == 0: + _raise_document_too_large(op_type, op_length, max_bson_size + _COMMAND_OVERHEAD) + break + + # Add op and ns documents to this batch. + to_send_ops.append(op_doc) + to_send_ops_encoded.append(op_doc_encoded) + total_ops_length += op_length + if ns_doc: + to_send_ns.append(ns_doc) + to_send_ns_encoded.append(ns_doc_encoded) + total_ns_length += ns_length + + idx += 1 + + # We have enough documents, return this batch. + if idx == max_write_batch_size: + break + + # Construct the entire OP_MSG. + length = _client_construct_op_msg( + command_encoded, to_send_ops_encoded, to_send_ns_encoded, ack, buf + ) + + return to_send_ops, to_send_ns, length + + +def _client_encode_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Encode the next batched client-level bulkWrite + operation as OP_MSG. + """ + buf = _BytesIO() + + to_send_ops, to_send_ns, _ = _client_batched_op_msg_impl( + command, operations, namespaces, ack, opts, ctx, buf + ) + return buf.getvalue(), to_send_ops, to_send_ns + + +def _client_batched_op_msg_compressed( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite operation + with OP_MSG, compressed. + """ + data, to_send_ops, to_send_ns = _client_encode_batched_op_msg( + command, operations, namespaces, ack, opts, ctx + ) + + assert ctx.conn.compression_context is not None + request_id, msg = _compress(2013, data, ctx.conn.compression_context) + return request_id, msg, to_send_ops, to_send_ns + + +def _client_batched_op_msg( + command: Mapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ack: bool, + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """OP_MSG implementation entry point for client-level bulkWrite.""" + buf = _BytesIO() + + # Save space for message length and request id + buf.write(_ZERO_64) + # responseTo, opCode + buf.write(b"\x00\x00\x00\x00\xdd\x07\x00\x00") + + to_send_ops, to_send_ns, length = _client_batched_op_msg_impl( + command, operations, namespaces, ack, opts, ctx, buf + ) + + # Header - request id and message length + buf.seek(4) + request_id = _randint() + buf.write(_pack_int(request_id)) + buf.seek(0) + buf.write(_pack_int(length)) + + return request_id, buf.getvalue(), to_send_ops, to_send_ns + + +def _client_do_batched_op_msg( + command: MutableMapping[str, Any], + operations: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + opts: CodecOptions[Any], + ctx: _ClientBulkWriteContext, +) -> tuple[int, bytes, list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Create the next batched client-level bulkWrite + operation using OP_MSG. + """ + command["$db"] = "admin" + if "writeConcern" in command: + ack = bool(command["writeConcern"].get("w", 1)) + else: + ack = True + if ctx.conn.compression_context: + return _client_batched_op_msg_compressed(command, operations, namespaces, ack, opts, ctx) + return _client_batched_op_msg(command, operations, namespaces, ack, opts, ctx) + + +# End OP_MSG ----------------------------------------------------- + + +def _encode_batched_write_command( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, +) -> tuple[bytes, list[Mapping[str, Any]]]: + """Encode the next batched insert, update, or delete command.""" + buf = _BytesIO() + + to_send, _ = _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, buf) + return buf.getvalue(), to_send + + +if _use_c: + _encode_batched_write_command = _cmessage._encode_batched_write_command + + +def _batched_write_command_impl( + namespace: str, + operation: int, + command: MutableMapping[str, Any], + docs: list[Mapping[str, Any]], + opts: CodecOptions[Any], + ctx: _BulkWriteContext, + buf: _BytesIO, +) -> tuple[list[Mapping[str, Any]], int]: """Create a batched OP_QUERY write command.""" max_bson_size = ctx.max_bson_size max_write_batch_size = ctx.max_write_batch_size @@ -1196,7 +1294,7 @@ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, # Where to write command document length command_start = buf.tell() - buf.write(encode(command)) + buf.write(bson.encode(command)) # Start of payload buf.seek(-1, 2) @@ -1205,7 +1303,7 @@ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, try: buf.write(_OP_MAP[operation]) except KeyError: - raise InvalidOperation("Unknown command") + raise InvalidOperation("Unknown command") from None # Where to write list document length list_start = buf.tell() - 4 @@ -1246,7 +1344,7 @@ def _batched_write_command_impl(namespace, operation, command, docs, opts, ctx, return to_send, length -class _OpReply(object): +class _OpReply: """A MongoDB OP_REPLY response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "documents") @@ -1254,13 +1352,17 @@ class _OpReply(object): UNPACK_FROM = struct.Struct(" list[bytes | memoryview]: """Check the response header from the database, without decoding BSON. Check the response for errors and unpack. @@ -1268,8 +1370,7 @@ def raw_response(self, cursor_id=None, user_fields=None): Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. - :Parameters: - - `cursor_id` (optional): cursor_id we sent to get this response - + :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response. """ @@ -1284,7 +1385,7 @@ def raw_response(self, cursor_id=None, user_fields=None): errobj = {"ok": 0, "errmsg": msg, "code": 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: - error_object: dict = bson.BSON(self.documents).decode() + error_object: dict[str, Any] = bson.BSON(self.documents).decode() # Fake the ok field if it doesn't exist. error_object.setdefault("ok", 0) if error_object["$err"].startswith(HelloCompat.LEGACY_ERROR): @@ -1305,11 +1406,11 @@ def raw_response(self, cursor_id=None, user_fields=None): def unpack_response( self, - cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, - legacy_response=False, - ): + cursor_id: Optional[int] = None, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: """Unpack a response from the database and decode the BSON document(s). Check the response for errors and unpack, returning a dictionary @@ -1318,36 +1419,38 @@ def unpack_response( Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. - :Parameters: - - `cursor_id` (optional): cursor_id we sent to get this response - + :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response - - `codec_options` (optional): an instance of + :param codec_options: an instance of :class:`~bson.codec_options.CodecOptions` + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. """ self.raw_response(cursor_id) if legacy_response: return bson.decode_all(self.documents, codec_options) return bson._decode_all_selective(self.documents, codec_options, user_fields) - def command_response(self, codec_options): + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: """Unpack a command response.""" docs = self.unpack_response(codec_options=codec_options) assert self.number_returned == 1 return docs[0] - def raw_command_response(self): + def raw_command_response(self) -> NoReturn: """Return the bytes of the command response.""" # This should never be called on _OpReply. raise NotImplementedError @property - def more_to_come(self): + def more_to_come(self) -> bool: """Is the moreToCome bit set on this response?""" return False @classmethod - def unpack(cls, msg): + def unpack(cls, msg: bytes | memoryview) -> _OpReply: """Construct an _OpReply from raw bytes.""" # PYTHON-945: ignore starting_from field. flags, cursor_id, _, number_returned = cls.UNPACK_FROM(msg) @@ -1356,7 +1459,7 @@ def unpack(cls, msg): return cls(flags, cursor_id, number_returned, documents) -class _OpMsg(object): +class _OpMsg: """A MongoDB OP_MSG response message.""" __slots__ = ("flags", "cursor_id", "number_returned", "payload_document") @@ -1369,63 +1472,69 @@ class _OpMsg(object): MORE_TO_COME = 1 << 1 EXHAUST_ALLOWED = 1 << 16 # Only present on requests. - def __init__(self, flags, payload_document): + def __init__(self, flags: int, payload_document: bytes | memoryview): self.flags = flags self.payload_document = payload_document - def raw_response(self, cursor_id=None, user_fields={}): # noqa: B006 + def raw_response( + self, + cursor_id: Optional[int] = None, + user_fields: Optional[Mapping[str, Any]] = {}, + ) -> list[Mapping[str, Any]]: """ cursor_id is ignored user_fields is used to determine which fields must not be decoded """ - inflated_response = _decode_selective( - RawBSONDocument(self.payload_document), user_fields, DEFAULT_RAW_BSON_OPTIONS + inflated_response = bson._decode_selective( + RawBSONDocument(self.payload_document), user_fields, _RAW_ARRAY_BSON_OPTIONS ) return [inflated_response] def unpack_response( self, - cursor_id=None, - codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, - user_fields=None, - legacy_response=False, - ): + cursor_id: Optional[int] = None, + codec_options: CodecOptions[Any] = _UNICODE_REPLACE_CODEC_OPTIONS, + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[dict[str, Any]]: """Unpack a OP_MSG command response. - :Parameters: - - `cursor_id` (optional): Ignored, for compatibility with _OpReply. - - `codec_options` (optional): an instance of + :param cursor_id: Ignored, for compatibility with _OpReply. + :param codec_options: an instance of :class:`~bson.codec_options.CodecOptions` + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. """ # If _OpMsg is in-use, this cannot be a legacy response. assert not legacy_response return bson._decode_all_selective(self.payload_document, codec_options, user_fields) - def command_response(self, codec_options): + def command_response(self, codec_options: CodecOptions[Any]) -> dict[str, Any]: """Unpack a command response.""" return self.unpack_response(codec_options=codec_options)[0] - def raw_command_response(self): + def raw_command_response(self) -> bytes | memoryview: """Return the bytes of the command response.""" return self.payload_document @property - def more_to_come(self): + def more_to_come(self) -> bool: """Is the moreToCome bit set on this response?""" - return self.flags & self.MORE_TO_COME + return bool(self.flags & self.MORE_TO_COME) @classmethod - def unpack(cls, msg): + def unpack(cls, msg: bytes | memoryview) -> _OpMsg: """Construct an _OpMsg from raw bytes.""" flags, first_payload_type, first_payload_size = cls.UNPACK_FROM(msg) if flags != 0: if flags & cls.CHECKSUM_PRESENT: - raise ProtocolError("Unsupported OP_MSG flag checksumPresent: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flag checksumPresent: 0x{flags:x}") if flags ^ cls.MORE_TO_COME: - raise ProtocolError("Unsupported OP_MSG flags: 0x%x" % (flags,)) + raise ProtocolError(f"Unsupported OP_MSG flags: 0x{flags:x}") if first_payload_type != 0: - raise ProtocolError("Unsupported OP_MSG payload type: 0x%x" % (first_payload_type,)) + raise ProtocolError(f"Unsupported OP_MSG payload type: 0x{first_payload_type:x}") if len(msg) != first_payload_size + 5: raise ProtocolError("Unsupported OP_MSG reply: >1 section") @@ -1434,7 +1543,360 @@ def unpack(cls, msg): return cls(flags, payload_document) -_UNPACK_REPLY = { +_UNPACK_REPLY: dict[int, Callable[[bytes | memoryview], Union[_OpReply, _OpMsg]]] = { _OpReply.OP_CODE: _OpReply.unpack, _OpMsg.OP_CODE: _OpMsg.unpack, } + + +class _Query: + """A query operation.""" + + __slots__ = ( + "flags", + "db", + "coll", + "ntoskip", + "spec", + "fields", + "codec_options", + "read_preference", + "limit", + "batch_size", + "name", + "read_concern", + "collation", + "session", + "client", + "allow_disk_use", + "_as_command", + "exhaust", + ) + + # For compatibility with the _GetMore class. + conn_mgr = None + cursor_id = None + + def __init__( + self, + flags: int, + db: str, + coll: str, + ntoskip: int, + spec: Mapping[str, Any], + fields: Optional[Mapping[str, Any]], + codec_options: CodecOptions[Any], + read_preference: _ServerMode, + limit: int, + batch_size: int, + read_concern: ReadConcern, + collation: Optional[Mapping[str, Any]], + session: Optional[_AgnosticClientSession], + client: _AgnosticMongoClient, + allow_disk_use: Optional[bool], + exhaust: bool, + ): + self.flags = flags + self.db = db + self.coll = coll + self.ntoskip = ntoskip + self.spec = spec + self.fields = fields + self.codec_options = codec_options + self.read_preference = read_preference + self.read_concern = read_concern + self.limit = limit + self.batch_size = batch_size + self.collation = collation + self.session = session + self.client = client + self.allow_disk_use = allow_disk_use + self.name = "find" + self._as_command: Optional[tuple[dict[str, Any], str]] = None + self.exhaust = exhaust + + def reset(self) -> None: + self._as_command = None + + def namespace(self) -> str: + return f"{self.db}.{self.coll}" + + def use_command(self, conn: _AgnosticConnection) -> bool: + use_find_cmd = False + if not self.exhaust: + use_find_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_find_cmd = True + elif not self.read_concern.ok_for_legacy: + raise ConfigurationError( + "read concern level of %s is not valid " + "with a max wire version of %d." % (self.read_concern.level, conn.max_wire_version) + ) + + conn.validate_session(self.client, self.session) # type: ignore[arg-type] + return use_find_cmd + + def update_command(self, cmd: dict[str, Any]) -> None: + self._as_command = cmd, self.db + + def as_command( + self, conn: _AgnosticConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + """Return a find command document for this query.""" + # We use the command twice: on the wire and for command monitoring. + # Generate it once, for speed and to avoid repeating side-effects. + if self._as_command is not None: + return self._as_command + + explain = "$explain" in self.spec + cmd: dict[str, Any] = _gen_find_command( + self.coll, + self.spec, + self.fields, + self.ntoskip, + self.limit, + self.batch_size, + self.flags, + self.read_concern, + self.collation, + self.session, + self.allow_disk_use, + ) + if explain: + self.name = "explain" + cmd = {"explain": cmd} + conn.add_server_api(cmd) + if self.session: + self.session._apply_to(cmd, False, self.read_preference, conn) # type: ignore[arg-type] + # Explain does not support readConcern. + if not explain and not self.session.in_transaction: + self.session._update_read_concern(cmd, conn) # type: ignore[arg-type] + conn.send_cluster_time(cmd, self.session, self.client) # type: ignore[arg-type] + # Support CSOT + if apply_timeout: + conn.apply_timeout(self.client, cmd=cmd) # type: ignore[arg-type] + self._as_command = cmd, self.db + return self._as_command + + def get_message( + self, read_preference: _ServerMode, conn: _AgnosticConnection, use_cmd: bool = False + ) -> tuple[int, bytes, int]: + """Get a query message, possibly setting the secondaryOk bit.""" + # Use the read_preference decided by _socket_from_server. + self.read_preference = read_preference + if read_preference.mode: + # Set the secondaryOk bit. + flags = self.flags | 4 + else: + flags = self.flags + + ns = self.namespace() + spec = self.spec + + if use_cmd: + spec = self.as_command(conn)[0] + request_id, msg, size, _ = _op_msg( + 0, + spec, + self.db, + read_preference, + self.codec_options, + ctx=conn.compression_context, + ) + return request_id, msg, size + + # OP_QUERY treats ntoreturn of -1 and 1 the same, return + # one document and close the cursor. We have to use 2 for + # batch size if 1 is specified. + ntoreturn = self.batch_size == 1 and 2 or self.batch_size + if self.limit: + if ntoreturn: + ntoreturn = min(self.limit, ntoreturn) + else: + ntoreturn = self.limit + + if conn.is_mongos: + assert isinstance(spec, MutableMapping) + spec = _maybe_add_read_preference(spec, read_preference) + + return _query( + flags, + ns, + self.ntoskip, + ntoreturn, + spec, + None if use_cmd else self.fields, + self.codec_options, + ctx=conn.compression_context, + ) + + +class _GetMore: + """A getmore operation.""" + + __slots__ = ( + "db", + "coll", + "ntoreturn", + "cursor_id", + "max_await_time_ms", + "codec_options", + "read_preference", + "session", + "client", + "conn_mgr", + "_as_command", + "exhaust", + "comment", + ) + + name = "getMore" + + def __init__( + self, + db: str, + coll: str, + ntoreturn: int, + cursor_id: int, + codec_options: CodecOptions[Any], + read_preference: _ServerMode, + session: Optional[_AgnosticClientSession], + client: _AgnosticMongoClient, + max_await_time_ms: Optional[int], + conn_mgr: Any, + exhaust: bool, + comment: Any, + ): + self.db = db + self.coll = coll + self.ntoreturn = ntoreturn + self.cursor_id = cursor_id + self.codec_options = codec_options + self.read_preference = read_preference + self.session = session + self.client = client + self.max_await_time_ms = max_await_time_ms + self.conn_mgr = conn_mgr + self._as_command: Optional[tuple[dict[str, Any], str]] = None + self.exhaust = exhaust + self.comment = comment + + def reset(self) -> None: + self._as_command = None + + def namespace(self) -> str: + return f"{self.db}.{self.coll}" + + def use_command(self, conn: _AgnosticConnection) -> bool: + use_cmd = False + if not self.exhaust: + use_cmd = True + elif conn.max_wire_version >= 8: + # OP_MSG supports exhaust on MongoDB 4.2+ + use_cmd = True + + conn.validate_session(self.client, self.session) # type: ignore[arg-type] + return use_cmd + + def update_command(self, cmd: dict[str, Any]) -> None: + self._as_command = cmd, self.db + + def as_command( + self, conn: _AgnosticConnection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + """Return a getMore command document for this query.""" + # See _Query.as_command for an explanation of this caching. + if self._as_command is not None: + return self._as_command + + cmd: dict[str, Any] = _gen_get_more_command( + self.cursor_id, + self.coll, + self.ntoreturn, + self.max_await_time_ms, + self.comment, + conn, + ) + if self.session: + self.session._apply_to(cmd, False, self.read_preference, conn) # type: ignore[arg-type] + conn.add_server_api(cmd) + conn.send_cluster_time(cmd, self.session, self.client) # type: ignore[arg-type] + # Support CSOT + if apply_timeout: + conn.apply_timeout(self.client, cmd=None) # type: ignore[arg-type] + self._as_command = cmd, self.db + return self._as_command + + def get_message( + self, dummy0: Any, conn: _AgnosticConnection, use_cmd: bool = False + ) -> Union[tuple[int, bytes, int], tuple[int, bytes]]: + """Get a getmore message.""" + ns = self.namespace() + ctx = conn.compression_context + + if use_cmd: + spec = self.as_command(conn)[0] + if self.conn_mgr and self.exhaust: + flags = _OpMsg.EXHAUST_ALLOWED + else: + flags = 0 + request_id, msg, size, _ = _op_msg( + flags, spec, self.db, None, self.codec_options, ctx=conn.compression_context + ) + return request_id, msg, size + + return _get_more(ns, self.ntoreturn, self.cursor_id, ctx) + + +class _RawBatchQuery(_Query): + def use_command(self, conn: _AgnosticConnection) -> bool: + # Compatibility checks. + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True + return False + + +class _RawBatchGetMore(_GetMore): + def use_command(self, conn: _AgnosticConnection) -> bool: + # Compatibility checks. + super().use_command(conn) + if conn.max_wire_version >= 8: + # MongoDB 4.2+ supports exhaust over OP_MSG + return True + elif not self.exhaust: + return True + return False + + +class _CursorAddress(tuple[Any, ...]): + """The server address (host, port) of a cursor, with namespace property.""" + + __namespace: Any + + def __new__(cls, address: _Address, namespace: str) -> _CursorAddress: + self = tuple.__new__(cls, address) + self.__namespace = namespace + return self + + @property + def namespace(self) -> str: + """The namespace this cursor.""" + return self.__namespace + + def __hash__(self) -> int: + # Two _CursorAddress instances with different namespaces + # must not hash the same. + return ((*self, self.__namespace)).__hash__() + + def __eq__(self, other: object) -> bool: + if isinstance(other, _CursorAddress): + return tuple(self) == tuple(other) and self.namespace == other.namespace + return NotImplemented + + def __ne__(self, other: object) -> bool: + return not self == other diff --git a/pymongo/mongo_client.py b/pymongo/mongo_client.py index 080ae8757c..778abe27ef 100644 --- a/pymongo/mongo_client.py +++ b/pymongo/mongo_client.py @@ -1,2144 +1,22 @@ -# Copyright 2009-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -"""Tools for connecting to MongoDB. +"""Re-import of synchronous MongoClient API for compatibility.""" +from __future__ import annotations -.. seealso:: :doc:`/examples/high_availability` for examples of connecting - to replica sets or sets of mongos servers. +from pymongo.synchronous.mongo_client import * # noqa: F403 +from pymongo.synchronous.mongo_client import __doc__ as original_doc -To get a :class:`~pymongo.database.Database` instance from a -:class:`MongoClient` use either dictionary-style or attribute-style -access: - -.. doctest:: - - >>> from pymongo import MongoClient - >>> c = MongoClient() - >>> c.test_database - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') - >>> c['test-database'] - Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') -""" - -import contextlib -import threading -import weakref -from collections import defaultdict -from typing import ( - TYPE_CHECKING, - Any, - Dict, - FrozenSet, - Generic, - List, - Mapping, - NoReturn, - Optional, - Sequence, - Set, - Tuple, - Type, - Union, - cast, -) - -from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry -from bson.son import SON -from bson.timestamp import Timestamp -from pymongo import ( - _csot, - client_session, - common, - database, - helpers, - message, - periodic_executor, - uri_parser, -) -from pymongo.change_stream import ChangeStream, ClusterChangeStream -from pymongo.client_options import ClientOptions -from pymongo.client_session import _EmptyServerSession -from pymongo.command_cursor import CommandCursor -from pymongo.errors import ( - AutoReconnect, - BulkWriteError, - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NotPrimaryError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WaitQueueTimeoutError, -) -from pymongo.pool import ConnectionClosedReason -from pymongo.read_preferences import ReadPreference, _ServerMode -from pymongo.server_selectors import writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext -from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription -from pymongo.typings import _Address, _CollationIn, _DocumentType, _Pipeline -from pymongo.uri_parser import ( - _check_options, - _handle_option_deprecations, - _handle_security_options, - _normalize_options, -) -from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern - -if TYPE_CHECKING: - import sys - - from pymongo.read_concern import ReadConcern - - if sys.version_info[:2] >= (3, 9): - from collections.abc import Generator - else: - # Deprecated since version 3.9: collections.abc.Generator now supports []. - from typing import Generator - - -class MongoClient(common.BaseObject, Generic[_DocumentType]): - """ - A client-side representation of a MongoDB cluster. - - Instances can represent either a standalone MongoDB server, a replica - set, or a sharded cluster. Instances of this class are responsible for - maintaining up-to-date state of the cluster, and possibly cache - resources related to this, including background threads for monitoring, - and connection pools. - """ - - HOST = "localhost" - PORT = 27017 - # Define order to retrieve options from ClientOptions for __repr__. - # No host/port; these are retrieved from TopologySettings. - _constructor_args = ("document_class", "tz_aware", "connect") - - def __init__( - self, - host: Optional[Union[str, Sequence[str]]] = None, - port: Optional[int] = None, - document_class: Optional[Type[_DocumentType]] = None, - tz_aware: Optional[bool] = None, - connect: Optional[bool] = None, - type_registry: Optional[TypeRegistry] = None, - **kwargs: Any, - ) -> None: - """Client for a MongoDB instance, a replica set, or a set of mongoses. - - .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of - False instead of None. - For more details, see the relevant section of the PyMongo 4.x migration guide: - :ref:`pymongo4-migration-direct-connection`. - - The client object is thread-safe and has connection-pooling built in. - If an operation fails because of a network error, - :class:`~pymongo.errors.ConnectionFailure` is raised and the client - reconnects in the background. Application code should handle this - exception (recognizing that the operation failed) and then continue to - execute. - - The `host` parameter can be a full `mongodb URI - `_, in addition to - a simple hostname. It can also be a list of hostnames but no more - than one URI. Any port specified in the host string(s) will override - the `port` parameter. For username and - passwords reserved characters like ':', '/', '+' and '@' must be - percent encoded following RFC 2396:: - - from urllib.parse import quote_plus - - uri = "mongodb://%s:%s@%s" % ( - quote_plus(user), quote_plus(password), host) - client = MongoClient(uri) - - Unix domain sockets are also supported. The socket path must be percent - encoded in the URI:: - - uri = "mongodb://%s:%s@%s" % ( - quote_plus(user), quote_plus(password), quote_plus(socket_path)) - client = MongoClient(uri) - - But not when passed as a simple hostname:: - - client = MongoClient('/tmp/mongodb-27017.sock') - - Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The - URI must include one, and only one, hostname. The hostname will be - resolved to one or more DNS `SRV records - `_ which will be used - as the seed list for connecting to the MongoDB deployment. When using - SRV URIs, the `authSource` and `replicaSet` configuration options can - be specified using `TXT records - `_. See the - `Initial DNS Seedlist Discovery spec - `_ - for more details. Note that the use of SRV URIs implicitly enables - TLS support. Pass tls=false in the URI to override. - - .. note:: MongoClient creation will block waiting for answers from - DNS when mongodb+srv:// URIs are used. - - .. note:: Starting with version 3.0 the :class:`MongoClient` - constructor no longer blocks while connecting to the server or - servers, and it no longer raises - :class:`~pymongo.errors.ConnectionFailure` if they are - unavailable, nor :class:`~pymongo.errors.ConfigurationError` - if the user's credentials are wrong. Instead, the constructor - returns immediately and launches the connection process on - background threads. You can check if the server is available - like this:: - - from pymongo.errors import ConnectionFailure - client = MongoClient() - try: - # The ping command is cheap and does not require auth. - client.admin.command('ping') - except ConnectionFailure: - print("Server not available") - - .. warning:: When using PyMongo in a multiprocessing context, please - read :ref:`multiprocessing` first. - - .. note:: Many of the following options can be passed using a MongoDB - URI or keyword parameters. If the same option is passed in a URI and - as a keyword parameter the keyword parameter takes precedence. - - :Parameters: - - `host` (optional): hostname or IP address or Unix domain socket - path of a single mongod or mongos instance to connect to, or a - mongodb URI, or a list of hostnames (but no more than one mongodb - URI). If `host` is an IPv6 literal it must be enclosed in '[' - and ']' characters - following the RFC2732 URL syntax (e.g. '[::1]' for localhost). - Multihomed and round robin DNS addresses are **not** supported. - - `port` (optional): port number on which to connect - - `document_class` (optional): default class to use for - documents returned from queries on this client - - `tz_aware` (optional): if ``True``, - :class:`~datetime.datetime` instances returned as values - in a document by this :class:`MongoClient` will be timezone - aware (otherwise they will be naive) - - `connect` (optional): if ``True`` (the default), immediately - begin connecting to MongoDB in the background. Otherwise connect - on the first operation. - - `type_registry` (optional): instance of - :class:`~bson.codec_options.TypeRegistry` to enable encoding - and decoding of custom types. - - | **Other optional parameters can be passed as keyword arguments:** - - - `directConnection` (optional): if ``True``, forces this client to - connect directly to the specified MongoDB host as a standalone. - If ``false``, the client connects to the entire replica set of - which the given MongoDB host(s) is a part. If this is ``True`` - and a mongodb+srv:// URI or a URI containing multiple seeds is - provided, an exception will be raised. - - `maxPoolSize` (optional): The maximum allowable number of - concurrent connections to each connected server. Requests to a - server will block if there are `maxPoolSize` outstanding - connections to the requested server. Defaults to 100. Can be - either 0 or None, in which case there is no limit on the number - of concurrent connections. - - `minPoolSize` (optional): The minimum required number of concurrent - connections that the pool will maintain to each connected server. - Default is 0. - - `maxIdleTimeMS` (optional): The maximum number of milliseconds that - a connection can remain idle in the pool before being removed and - replaced. Defaults to `None` (no limit). - - `maxConnecting` (optional): The maximum number of connections that - each pool can establish concurrently. Defaults to `2`. - - `timeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait when executing an operation - (including retry attempts) before raising a timeout error. - ``0`` or ``None`` means no timeout. - - `socketTimeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait for a response after sending an - ordinary (non-monitoring) database operation before concluding that - a network error has occurred. ``0`` or ``None`` means no timeout. - Defaults to ``None`` (no timeout). - - `connectTimeoutMS`: (integer or None) Controls how long (in - milliseconds) the driver will wait during server monitoring when - connecting a new socket to a server before concluding the server - is unavailable. ``0`` or ``None`` means no timeout. - Defaults to ``20000`` (20 seconds). - - `server_selector`: (callable or None) Optional, user-provided - function that augments server selection rules. The function should - accept as an argument a list of - :class:`~pymongo.server_description.ServerDescription` objects and - return a list of server descriptions that should be considered - suitable for the desired operation. - - `serverSelectionTimeoutMS`: (integer) Controls how long (in - milliseconds) the driver will wait to find an available, - appropriate server to carry out a database operation; while it is - waiting, multiple server monitoring operations may be carried out, - each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 - seconds). - - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) - a thread will wait for a socket from the pool if the pool has no - free sockets. Defaults to ``None`` (no timeout). - - `heartbeatFrequencyMS`: (optional) The number of milliseconds - between periodic server checks, or None to accept the default - frequency of 10 seconds. - - `appname`: (string or None) The name of the application that - created this MongoClient instance. The server will log this value - upon establishing each connection. It is also recorded in the slow - query log and profile collections. - - `driver`: (pair or None) A driver implemented on top of PyMongo can - pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, - version, and platform to the message printed in the server log when - establishing a connection. - - `event_listeners`: a list or tuple of event listeners. See - :mod:`~pymongo.monitoring` for details. - - `retryWrites`: (boolean) Whether supported write operations - executed within this MongoClient will be retried once after a - network error. Defaults to ``True``. - The supported write operations are: - - - :meth:`~pymongo.collection.Collection.bulk_write`, as long as - :class:`~pymongo.operations.UpdateMany` or - :class:`~pymongo.operations.DeleteMany` are not included. - - :meth:`~pymongo.collection.Collection.delete_one` - - :meth:`~pymongo.collection.Collection.insert_one` - - :meth:`~pymongo.collection.Collection.insert_many` - - :meth:`~pymongo.collection.Collection.replace_one` - - :meth:`~pymongo.collection.Collection.update_one` - - :meth:`~pymongo.collection.Collection.find_one_and_delete` - - :meth:`~pymongo.collection.Collection.find_one_and_replace` - - :meth:`~pymongo.collection.Collection.find_one_and_update` - - Unsupported write operations include, but are not limited to, - :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` - pipeline operator and any operation with an unacknowledged write - concern (e.g. {w: 0})). See - https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst - - `retryReads`: (boolean) Whether supported read operations - executed within this MongoClient will be retried once after a - network error. Defaults to ``True``. - The supported read operations are: - :meth:`~pymongo.collection.Collection.find`, - :meth:`~pymongo.collection.Collection.find_one`, - :meth:`~pymongo.collection.Collection.aggregate` without ``$out``, - :meth:`~pymongo.collection.Collection.distinct`, - :meth:`~pymongo.collection.Collection.count`, - :meth:`~pymongo.collection.Collection.estimated_document_count`, - :meth:`~pymongo.collection.Collection.count_documents`, - :meth:`pymongo.collection.Collection.watch`, - :meth:`~pymongo.collection.Collection.list_indexes`, - :meth:`pymongo.database.Database.watch`, - :meth:`~pymongo.database.Database.list_collections`, - :meth:`pymongo.mongo_client.MongoClient.watch`, - and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. - - Unsupported read operations include, but are not limited to - :meth:`~pymongo.database.Database.command` and any getMore - operation on a cursor. - - Enabling retryable reads makes applications more resilient to - transient errors such as network failures, database upgrades, and - replica set failovers. For an exact definition of which errors - trigger a retry, see the `retryable reads specification - `_. - - - `compressors`: Comma separated list of compressors for wire - protocol compression. The list is used to negotiate a compressor - with the server. Currently supported options are "snappy", "zlib" - and "zstd". Support for snappy requires the - `python-snappy `_ package. - zlib support requires the Python standard library zlib module. zstd - requires the `zstandard `_ - package. By default no compression is used. Compression support - must also be enabled on the server. MongoDB 3.6+ supports snappy - and zlib compression. MongoDB 4.2+ adds support for zstd. - - `zlibCompressionLevel`: (int) The zlib compression level to use - when zlib is used as the wire protocol compressor. Supported values - are -1 through 9. -1 tells the zlib library to use its default - compression level (usually 6). 0 means no compression. 1 is best - speed. 9 is best compression. Defaults to -1. - - `uuidRepresentation`: The BSON representation to use when encoding - from and decoding to instances of :class:`~uuid.UUID`. Valid - values are the strings: "standard", "pythonLegacy", "javaLegacy", - "csharpLegacy", and "unspecified" (the default). New applications - should consider setting this to "standard" for cross language - compatibility. See :ref:`handling-uuid-data-example` for details. - - `unicode_decode_error_handler`: The error handler to apply when - a Unicode-related error occurs during BSON decoding that would - otherwise raise :exc:`UnicodeDecodeError`. Valid options include - 'strict', 'replace', 'backslashreplace', 'surrogateescape', and - 'ignore'. Defaults to 'strict'. - - `srvServiceName`: (string) The SRV service name to use for - "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: - - MongoClient("mongodb+srv://example.com/?srvServiceName=customname") - - - | **Write Concern options:** - | (Only set if passed. No default values.) - - - `w`: (integer or string) If this is a replica set, write operations - will block until they have been replicated to the specified number - or tagged set of servers. `w=` always includes the replica set - primary (e.g. w=3 means write to the primary and wait until - replicated to **two** secondaries). Passing w=0 **disables write - acknowledgement** and all other write concern options. - - `wTimeoutMS`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given - timeframe, a timeout exception is raised. Passing wTimeoutMS=0 - will cause **write operations to wait indefinitely**. - - `journal`: If ``True`` block until write operations have been - committed to the journal. Cannot be used in combination with - `fsync`. Write operations will fail with an exception if this - option is used when the server is running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, - blocks until the server has synced all data files to disk. If the - server is running with journaling, this acts the same as the `j` - option, blocking until write operations have been committed to the - journal. Cannot be used in combination with `j`. - - | **Replica set keyword arguments for connecting with a replica set - - either directly or via a mongos:** - - - `replicaSet`: (string or None) The name of the replica set to - connect to. The driver will verify that all servers it connects to - match this name. Implies that the hosts specified are a seed list - and the driver should attempt to find all members of the set. - Defaults to ``None``. - - | **Read Preference:** - - - `readPreference`: The replica set read preference for this client. - One of ``primary``, ``primaryPreferred``, ``secondary``, - ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. - - `readPreferenceTags`: Specifies a tag set as a comma-separated list - of colon-separated key-value pairs. For example ``dc:ny,rack:1``. - Defaults to ``None``. - - `maxStalenessSeconds`: (integer) The maximum estimated - length of time a replica set secondary can fall behind the primary - in replication before it will no longer be selected for operations. - Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds - is set, it must be a positive integer greater than or equal to - 90 seconds. - - .. seealso:: :doc:`/examples/server_selection` - - | **Authentication:** - - - `username`: A string. - - `password`: A string. - - Although username and password must be percent-escaped in a MongoDB - URI, they must not be percent-escaped when passed as parameters. In - this example, both the space and slash special characters are passed - as-is:: - - MongoClient(username="user name", password="pass/word") - - - `authSource`: The database to authenticate on. Defaults to the - database specified in the URI, if provided, or to "admin". - - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. - If no mechanism is specified, PyMongo automatically SCRAM-SHA-1 - when connected to MongoDB 3.6 and negotiates the mechanism to use - (SCRAM-SHA-1 or SCRAM-SHA-256) when connected to MongoDB 4.0+. - - `authMechanismProperties`: Used to specify authentication mechanism - specific options. To specify the service name for GSSAPI - authentication pass authMechanismProperties='SERVICE_NAME:'. - To specify the session token for MONGODB-AWS authentication pass - ``authMechanismProperties='AWS_SESSION_TOKEN:'``. - - .. seealso:: :doc:`/examples/authentication` - - | **TLS/SSL configuration:** - - - `tls`: (boolean) If ``True``, create the connection to the server - using transport layer security. Defaults to ``False``. - - `tlsInsecure`: (boolean) Specify whether TLS constraints should be - relaxed as much as possible. Setting ``tlsInsecure=True`` implies - ``tlsAllowInvalidCertificates=True`` and - ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think - very carefully before setting this to ``True`` as it dramatically - reduces the security of TLS. - - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues - the TLS handshake regardless of the outcome of the certificate - verification process. If this is ``False``, and a value is not - provided for ``tlsCAFile``, PyMongo will attempt to load system - provided CA certificates. If the python version in use does not - support loading system CA certificates then the ``tlsCAFile`` - parameter must point to a file of CA certificates. - ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. - Defaults to ``False``. Think very carefully before setting this - to ``True`` as that could make your application vulnerable to - on-path attackers. - - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS - hostname verification. ``tlsAllowInvalidHostnames=False`` implies - ``tls=True``. Defaults to ``False``. Think very carefully before - setting this to ``True`` as that could make your application - vulnerable to on-path attackers. - - `tlsCAFile`: A file containing a single or a bundle of - "certification authority" certificates, which are used to validate - certificates passed from the other end of the connection. - Implies ``tls=True``. Defaults to ``None``. - - `tlsCertificateKeyFile`: A file containing the client certificate - and private key. Implies ``tls=True``. Defaults to ``None``. - - `tlsCRLFile`: A file containing a PEM or DER formatted - certificate revocation list. Only supported by python 2.7.9+ - (pypy 2.5.1+). Implies ``tls=True``. Defaults to ``None``. - - `tlsCertificateKeyFilePassword`: The password or passphrase for - decrypting the private key in ``tlsCertificateKeyFile``. Only - necessary if the private key is encrypted. Only supported by - python 2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``. - - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables - certificate revocation status checking via the OCSP responder - specified on the server certificate. - ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. - Defaults to ``False``. - - `ssl`: (boolean) Alias for ``tls``. - - | **Read Concern options:** - | (If not set explicitly, this will use the server default) - - - `readConcernLevel`: (string) The read concern level specifies the - level of isolation for read operations. For example, a read - operation using a read concern level of ``majority`` will only - return data that has been written to a majority of nodes. If the - level is left unspecified, the server default will be used. - - | **Client side encryption options:** - | (If not set explicitly, client side encryption will not be enabled.) - - - `auto_encryption_opts`: A - :class:`~pymongo.encryption_options.AutoEncryptionOpts` which - configures this client to automatically encrypt collection commands - and automatically decrypt results. See - :ref:`automatic-client-side-encryption` for an example. - If a :class:`MongoClient` is configured with - ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a - separate internal ``MongoClient`` is created if any of the - following are true: - - - A ``key_vault_client`` is not passed to - :class:`~pymongo.encryption_options.AutoEncryptionOpts` - - ``bypass_auto_encrpytion=False`` is passed to - :class:`~pymongo.encryption_options.AutoEncryptionOpts` - - | **Stable API options:** - | (If not set explicitly, Stable API will not be enabled.) - - - `server_api`: A - :class:`~pymongo.server_api.ServerApi` which configures this - client to use Stable API. See :ref:`versioned-api-ref` for - details. - - .. seealso:: The MongoDB documentation on `connections `_. - - .. versionchanged:: 4.2 - Added the ``timeoutMS`` keyword argument. - - .. versionchanged:: 4.0 - - - Removed the fsync, unlock, is_locked, database_names, and - close_cursor methods. - See the :ref:`pymongo4-migration-guide`. - - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` - keyword arguments. - - The default for `uuidRepresentation` was changed from - ``pythonLegacy`` to ``unspecified``. - - Added the ``srvServiceName`` and ``maxConnecting`` URI and - keyword argument. - - .. versionchanged:: 3.12 - Added the ``server_api`` keyword argument. - The following keyword arguments were deprecated: - - - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor - of ``tlsCertificateKeyFile``. - - .. versionchanged:: 3.11 - Added the following keyword arguments and URI options: - - - ``tlsDisableOCSPEndpointCheck`` - - ``directConnection`` - - .. versionchanged:: 3.9 - Added the ``retryReads`` keyword argument and URI option. - Added the ``tlsInsecure`` keyword argument and URI option. - The following keyword arguments and URI options were deprecated: - - - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. - - ``j`` was deprecated in favor of ``journal``. - - ``ssl_cert_reqs`` was deprecated in favor of - ``tlsAllowInvalidCertificates``. - - ``ssl_match_hostname`` was deprecated in favor of - ``tlsAllowInvalidHostnames``. - - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. - - ``ssl_certfile`` was deprecated in favor of - ``tlsCertificateKeyFile``. - - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. - - ``ssl_pem_passphrase`` was deprecated in favor of - ``tlsCertificateKeyFilePassword``. - - .. versionchanged:: 3.9 - ``retryWrites`` now defaults to ``True``. - - .. versionchanged:: 3.8 - Added the ``server_selector`` keyword argument. - Added the ``type_registry`` keyword argument. - - .. versionchanged:: 3.7 - Added the ``driver`` keyword argument. - - .. versionchanged:: 3.6 - Added support for mongodb+srv:// URIs. - Added the ``retryWrites`` keyword argument and URI option. - - .. versionchanged:: 3.5 - Add ``username`` and ``password`` options. Document the - ``authSource``, ``authMechanism``, and ``authMechanismProperties`` - options. - Deprecated the ``socketKeepAlive`` keyword argument and URI option. - ``socketKeepAlive`` now defaults to ``True``. - - .. versionchanged:: 3.0 - :class:`~pymongo.mongo_client.MongoClient` is now the one and only - client class for a standalone server, mongos, or replica set. - It includes the functionality that had been split into - :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect - to a replica set, discover all its members, and monitor the set for - stepdowns, elections, and reconfigs. - - The :class:`~pymongo.mongo_client.MongoClient` constructor no - longer blocks while connecting to the server or servers, and it no - longer raises :class:`~pymongo.errors.ConnectionFailure` if they - are unavailable, nor :class:`~pymongo.errors.ConfigurationError` - if the user's credentials are wrong. Instead, the constructor - returns immediately and launches the connection process on - background threads. - - Therefore the ``alive`` method is removed since it no longer - provides meaningful information; even if the client is disconnected, - it may discover a server in time to fulfill the next operation. - - In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of - standalone MongoDB servers and used the first it could connect to:: - - MongoClient(['host1.com:27017', 'host2.com:27017']) - - A list of multiple standalones is no longer supported; if multiple - servers are listed they must be members of the same replica set, or - mongoses in the same sharded cluster. - - The behavior for a list of mongoses is changed from "high - availability" to "load balancing". Before, the client connected to - the lowest-latency mongos in the list, and used it until a network - error prompted it to re-evaluate all mongoses' latencies and - reconnect to one of them. In PyMongo 3, the client monitors its - network latency to all the mongoses continuously, and distributes - operations evenly among those with the lowest latency. See - :ref:`mongos-load-balancing` for more information. - - The ``connect`` option is added. - - The ``start_request``, ``in_request``, and ``end_request`` methods - are removed, as well as the ``auto_start_request`` option. - - The ``copy_database`` method is removed, see the - :doc:`copy_database examples ` for alternatives. - - The :meth:`MongoClient.disconnect` method is removed; it was a - synonym for :meth:`~pymongo.MongoClient.close`. - - :class:`~pymongo.mongo_client.MongoClient` no longer returns an - instance of :class:`~pymongo.database.Database` for attribute names - with leading underscores. You must use dict-style lookups instead:: - - client['__my_database__'] - - Not:: - - client.__my_database__ - """ - doc_class = document_class or dict - self.__init_kwargs: Dict[str, Any] = { - "host": host, - "port": port, - "document_class": doc_class, - "tz_aware": tz_aware, - "connect": connect, - "type_registry": type_registry, - **kwargs, - } - - if host is None: - host = self.HOST - if isinstance(host, str): - host = [host] - if port is None: - port = self.PORT - if not isinstance(port, int): - raise TypeError("port must be an instance of int") - - # _pool_class, _monitor_class, and _condition_class are for deep - # customization of PyMongo, e.g. Motor. - pool_class = kwargs.pop("_pool_class", None) - monitor_class = kwargs.pop("_monitor_class", None) - condition_class = kwargs.pop("_condition_class", None) - - # Parse options passed as kwargs. - keyword_opts = common._CaseInsensitiveDictionary(kwargs) - keyword_opts["document_class"] = doc_class - - seeds = set() - username = None - password = None - dbase = None - opts = common._CaseInsensitiveDictionary() - fqdn = None - srv_service_name = keyword_opts.get("srvservicename") - srv_max_hosts = keyword_opts.get("srvmaxhosts") - if len([h for h in host if "/" in h]) > 1: - raise ConfigurationError("host must not contain multiple MongoDB URIs") - for entity in host: - # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' - # it must be a URI, - # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names - if "/" in entity: - # Determine connection timeout from kwargs. - timeout = keyword_opts.get("connecttimeoutms") - if timeout is not None: - timeout = common.validate_timeout_or_none_or_zero( - keyword_opts.cased_key("connecttimeoutms"), timeout - ) - res = uri_parser.parse_uri( - entity, - port, - validate=True, - warn=True, - normalize=False, - connect_timeout=timeout, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - ) - seeds.update(res["nodelist"]) - username = res["username"] or username - password = res["password"] or password - dbase = res["database"] or dbase - opts = res["options"] - fqdn = res["fqdn"] - else: - seeds.update(uri_parser.split_hosts(entity, port)) - if not seeds: - raise ConfigurationError("need to specify at least one host") - - # Add options with named keyword arguments to the parsed kwarg options. - if type_registry is not None: - keyword_opts["type_registry"] = type_registry - if tz_aware is None: - tz_aware = opts.get("tz_aware", False) - if connect is None: - connect = opts.get("connect", True) - keyword_opts["tz_aware"] = tz_aware - keyword_opts["connect"] = connect - - # Handle deprecated options in kwarg options. - keyword_opts = _handle_option_deprecations(keyword_opts) - # Validate kwarg options. - keyword_opts = common._CaseInsensitiveDictionary( - dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) - ) - - # Override connection string options with kwarg options. - opts.update(keyword_opts) - - if srv_service_name is None: - srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) - - srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") - # Handle security-option conflicts in combined options. - opts = _handle_security_options(opts) - # Normalize combined options. - opts = _normalize_options(opts) - _check_options(seeds, opts) - - # Username and password passed as kwargs override user info in URI. - username = opts.get("username", username) - password = opts.get("password", password) - self.__options = options = ClientOptions(username, password, dbase, opts) - - self.__default_database_name = dbase - self.__lock = threading.Lock() - self.__kill_cursors_queue: List = [] - - self._event_listeners = options.pool_options._event_listeners - super(MongoClient, self).__init__( - options.codec_options, - options.read_preference, - options.write_concern, - options.read_concern, - ) - - self._topology_settings = TopologySettings( - seeds=seeds, - replica_set_name=options.replica_set_name, - pool_class=pool_class, - pool_options=options.pool_options, - monitor_class=monitor_class, - condition_class=condition_class, - local_threshold_ms=options.local_threshold_ms, - server_selection_timeout=options.server_selection_timeout, - server_selector=options.server_selector, - heartbeat_frequency=options.heartbeat_frequency, - fqdn=fqdn, - direct_connection=options.direct_connection, - load_balanced=options.load_balanced, - srv_service_name=srv_service_name, - srv_max_hosts=srv_max_hosts, - ) - - self._topology = Topology(self._topology_settings) - - def target(): - client = self_ref() - if client is None: - return False # Stop the executor. - MongoClient._process_periodic_tasks(client) - return True - - executor = periodic_executor.PeriodicExecutor( - interval=common.KILL_CURSOR_FREQUENCY, - min_interval=common.MIN_HEARTBEAT_INTERVAL, - target=target, - name="pymongo_kill_cursors_thread", - ) - - # We strongly reference the executor and it weakly references us via - # this closure. When the client is freed, stop the executor soon. - self_ref: Any = weakref.ref(self, executor.close) - self._kill_cursors_executor = executor - - if connect: - self._get_topology() - - self._encrypter = None - if self.__options.auto_encryption_opts: - from pymongo.encryption import _Encrypter - - self._encrypter = _Encrypter(self, self.__options.auto_encryption_opts) - self._timeout = options.timeout - - def _duplicate(self, **kwargs): - args = self.__init_kwargs.copy() - args.update(kwargs) - return MongoClient(**args) - - def _server_property(self, attr_name): - """An attribute of the current server's description. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - Not threadsafe if used multiple times in a single method, since - the server may change. In such cases, store a local reference to a - ServerDescription first, then use its properties. - """ - server = self._topology.select_server(writable_server_selector) - - return getattr(server.description, attr_name) - - def watch( - self, - pipeline: Optional[_Pipeline] = None, - full_document: Optional[str] = None, - resume_after: Optional[Mapping[str, Any]] = None, - max_await_time_ms: Optional[int] = None, - batch_size: Optional[int] = None, - collation: Optional[_CollationIn] = None, - start_at_operation_time: Optional[Timestamp] = None, - session: Optional[client_session.ClientSession] = None, - start_after: Optional[Mapping[str, Any]] = None, - comment: Optional[Any] = None, - full_document_before_change: Optional[str] = None, - ) -> ChangeStream[_DocumentType]: - """Watch changes on this cluster. - - Performs an aggregation with an implicit initial ``$changeStream`` - stage and returns a - :class:`~pymongo.change_stream.ClusterChangeStream` cursor which - iterates over changes on all databases on this cluster. - - Introduced in MongoDB 4.0. - - .. code-block:: python - - with client.watch() as stream: - for change in stream: - print(change) - - The :class:`~pymongo.change_stream.ClusterChangeStream` iterable - blocks until the next change document is returned or an error is - raised. If the - :meth:`~pymongo.change_stream.ClusterChangeStream.next` method - encounters a network error when retrieving a batch from the server, - it will automatically attempt to recreate the cursor such that no - change events are missed. Any error encountered during the resume - attempt indicates there may be an outage and will be raised. - - .. code-block:: python - - try: - with client.watch( - [{'$match': {'operationType': 'insert'}}]) as stream: - for insert_change in stream: - print(insert_change) - except pymongo.errors.PyMongoError: - # The ChangeStream encountered an unrecoverable error or the - # resume attempt failed to recreate the cursor. - logging.error('...') - - For a precise description of the resume process see the - `change streams specification`_. - - :Parameters: - - `pipeline` (optional): A list of aggregation pipeline stages to - append to an initial ``$changeStream`` stage. Not all - pipeline stages are valid after a ``$changeStream`` stage, see the - MongoDB documentation on change streams for the supported stages. - - `full_document` (optional): The fullDocument to pass as an option - to the ``$changeStream`` stage. Allowed values: 'updateLookup', - 'whenAvailable', 'required'. When set to 'updateLookup', the - change notification for partial updates will include both a delta - describing the changes to the document, as well as a copy of the - entire document that was changed from some time after the change - occurred. - - `full_document_before_change`: Allowed values: 'whenAvailable' - and 'required'. Change events may now result in a - 'fullDocumentBeforeChange' response field. - - `resume_after` (optional): A resume token. If provided, the - change stream will start returning changes that occur directly - after the operation specified in the resume token. A resume token - is the _id value of a change document. - - `max_await_time_ms` (optional): The maximum time in milliseconds - for the server to wait for changes before responding to a getMore - operation. - - `batch_size` (optional): The maximum number of documents to return - per batch. - - `collation` (optional): The :class:`~pymongo.collation.Collation` - to use for the aggregation. - - `start_at_operation_time` (optional): If provided, the resulting - change stream will only return changes that occurred at or after - the specified :class:`~bson.timestamp.Timestamp`. Requires - MongoDB >= 4.0. - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `start_after` (optional): The same as `resume_after` except that - `start_after` can resume notifications after an invalidate event. - This option and `resume_after` are mutually exclusive. - - `comment` (optional): A user-provided comment to attach to this - command. - - :Returns: - A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. - - .. versionchanged:: 4.2 - Added ``full_document_before_change`` parameter. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.9 - Added the ``start_after`` parameter. - - .. versionadded:: 3.7 - - .. seealso:: The MongoDB documentation on `changeStreams `_. - - .. _change streams specification: - https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst - """ - return ClusterChangeStream( - self.admin, - pipeline, - full_document, - resume_after, - max_await_time_ms, - batch_size, - collation, - start_at_operation_time, - session, - start_after, - comment, - full_document_before_change, - ) - - @property - def topology_description(self) -> TopologyDescription: - """The description of the connected MongoDB deployment. - - >>> client.topology_description - , , ]> - >>> client.topology_description.topology_type_name - 'ReplicaSetWithPrimary' - - Note that the description is periodically updated in the background - but the returned object itself is immutable. Access this property again - to get a more recent - :class:`~pymongo.topology_description.TopologyDescription`. - - :Returns: - An instance of - :class:`~pymongo.topology_description.TopologyDescription`. - - .. versionadded:: 4.0 - """ - return self._topology.description - - @property - def address(self) -> Optional[Tuple[str, int]]: - """(host, port) of the current standalone, primary, or mongos, or None. - - Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if - the client is load-balancing among mongoses, since there is no single - address. Use :attr:`nodes` instead. - - If the client is not connected, this will block until a connection is - established or raise ServerSelectionTimeoutError if no server is - available. - - .. versionadded:: 3.0 - """ - topology_type = self._topology._description.topology_type - if ( - topology_type == TOPOLOGY_TYPE.Sharded - and len(self.topology_description.server_descriptions()) > 1 - ): - raise InvalidOperation( - 'Cannot use "address" property when load balancing among' - ' mongoses, use "nodes" instead.' - ) - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.Single, - TOPOLOGY_TYPE.LoadBalanced, - TOPOLOGY_TYPE.Sharded, - ): - return None - return self._server_property("address") - - @property - def primary(self) -> Optional[Tuple[str, int]]: - """The (host, port) of the current primary of the replica set. - - Returns ``None`` if this client is not connected to a replica set, - there is no primary, or this client was created without the - `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_primary() - - @property - def secondaries(self) -> Set[Tuple[str, int]]: - """The secondary members known to this client. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no visible secondaries, or this - client was created without the `replicaSet` option. - - .. versionadded:: 3.0 - MongoClient gained this property in version 3.0. - """ - return self._topology.get_secondaries() - - @property - def arbiters(self) -> Set[Tuple[str, int]]: - """Arbiters in the replica set. - - A sequence of (host, port) pairs. Empty if this client is not - connected to a replica set, there are no arbiters, or this client was - created without the `replicaSet` option. - """ - return self._topology.get_arbiters() - - @property - def is_primary(self) -> bool: - """If this client is connected to a server that can accept writes. - - True if the current server is a standalone, mongos, or the primary of - a replica set. If the client is not connected, this will block until a - connection is established or raise ServerSelectionTimeoutError if no - server is available. - """ - return self._server_property("is_writable") - - @property - def is_mongos(self) -> bool: - """If this client is connected to mongos. If the client is not - connected, this will block until a connection is established or raise - ServerSelectionTimeoutError if no server is available. - """ - return self._server_property("server_type") == SERVER_TYPE.Mongos - - @property - def nodes(self) -> FrozenSet[_Address]: - """Set of all currently connected servers. - - .. warning:: When connected to a replica set the value of :attr:`nodes` - can change over time as :class:`MongoClient`'s view of the replica - set changes. :attr:`nodes` can also be an empty set when - :class:`MongoClient` is first instantiated and hasn't yet connected - to any servers, or a network partition causes it to lose connection - to all servers. - """ - description = self._topology.description - return frozenset(s.address for s in description.known_servers) - - @property - def options(self) -> ClientOptions: - """The configuration options for this client. - - :Returns: - An instance of :class:`~pymongo.client_options.ClientOptions`. - - .. versionadded:: 4.0 - """ - return self.__options - - def _end_sessions(self, session_ids): - """Send endSessions command(s) with the given session ids.""" - try: - # Use SocketInfo.command directly to avoid implicitly creating - # another session. - with self._socket_for_reads(ReadPreference.PRIMARY_PREFERRED, None) as ( - sock_info, - read_pref, - ): - if not sock_info.supports_sessions: - return - - for i in range(0, len(session_ids), common._MAX_END_SESSIONS): - spec = SON([("endSessions", session_ids[i : i + common._MAX_END_SESSIONS])]) - sock_info.command("admin", spec, read_preference=read_pref, client=self) - except PyMongoError: - # Drivers MUST ignore any errors returned by the endSessions - # command. - pass - - def close(self) -> None: - """Cleanup client resources and disconnect from MongoDB. - - End all server sessions created by this client by sending one or more - endSessions commands. - - Close all sockets in the connection pools and stop the monitor threads. - - .. versionchanged:: 4.0 - Once closed, the client cannot be used again and any attempt will - raise :exc:`~pymongo.errors.InvalidOperation`. - - .. versionchanged:: 3.6 - End all server sessions created by this client. - """ - session_ids = self._topology.pop_all_sessions() - if session_ids: - self._end_sessions(session_ids) - # Stop the periodic task thread and then send pending killCursor - # requests before closing the topology. - self._kill_cursors_executor.close() - self._process_kill_cursors() - self._topology.close() - if self._encrypter: - # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. - self._encrypter.close() - - def _get_topology(self): - """Get the internal :class:`~pymongo.topology.Topology` object. - - If this client was created with "connect=False", calling _get_topology - launches the connection process in the background. - """ - self._topology.open() - with self.__lock: - self._kill_cursors_executor.open() - return self._topology - - @contextlib.contextmanager - def _get_socket(self, server, session): - in_txn = session and session.in_transaction - with _MongoClientErrorHandler(self, server, session) as err_handler: - # Reuse the pinned connection, if it exists. - if in_txn and session._pinned_connection: - err_handler.contribute_socket(session._pinned_connection) - yield session._pinned_connection - return - with server.get_socket(handler=err_handler) as sock_info: - # Pin this session to the selected server or connection. - if in_txn and server.description.server_type in ( - SERVER_TYPE.Mongos, - SERVER_TYPE.LoadBalancer, - ): - session._pin(server, sock_info) - err_handler.contribute_socket(sock_info) - if ( - self._encrypter - and not self._encrypter._bypass_auto_encryption - and sock_info.max_wire_version < 8 - ): - raise ConfigurationError( - "Auto-encryption requires a minimum MongoDB version of 4.2" - ) - yield sock_info - - def _select_server(self, server_selector, session, address=None): - """Select a server to run an operation on this client. - - :Parameters: - - `server_selector`: The server selector to use if the session is - not pinned and no address is given. - - `session`: The ClientSession for the next operation, or None. May - be pinned to a mongos server address. - - `address` (optional): Address when sending a message - to a specific server, used for getMore. - """ - try: - topology = self._get_topology() - if session and not session.in_transaction: - session._transaction.reset() - address = address or (session and session._pinned_address) - if address: - # We're running a getMore or this session is pinned to a mongos. - server = topology.select_server_by_address(address) - if not server: - raise AutoReconnect("server %s:%d no longer available" % address) - else: - server = topology.select_server(server_selector) - return server - except PyMongoError as exc: - # Server selection errors in a transaction are transient. - if session and session.in_transaction: - exc._add_error_label("TransientTransactionError") - session._unpin() - raise - - def _socket_for_writes(self, session): - server = self._select_server(writable_server_selector, session) - return self._get_socket(server, session) - - @contextlib.contextmanager - def _socket_from_server(self, read_preference, server, session): - assert read_preference is not None, "read_preference must not be None" - # Get a socket for a server matching the read preference, and yield - # sock_info with the effective read preference. The Server Selection - # Spec says not to send any $readPreference to standalones and to - # always send primaryPreferred when directly connected to a repl set - # member. - # Thread safe: if the type is single it cannot change. - topology = self._get_topology() - single = topology.description.topology_type == TOPOLOGY_TYPE.Single - - with self._get_socket(server, session) as sock_info: - if single: - if sock_info.is_repl and not (session and session.in_transaction): - # Use primary preferred to ensure any repl set member - # can handle the request. - read_preference = ReadPreference.PRIMARY_PREFERRED - elif sock_info.is_standalone: - # Don't send read preference to standalones. - read_preference = ReadPreference.PRIMARY - yield sock_info, read_preference - - def _socket_for_reads(self, read_preference, session): - assert read_preference is not None, "read_preference must not be None" - _ = self._get_topology() - server = self._select_server(read_preference, session) - return self._socket_from_server(read_preference, server, session) - - def _should_pin_cursor(self, session): - return self.__options.load_balanced and not (session and session.in_transaction) - - @_csot.apply - def _run_operation(self, operation, unpack_res, address=None): - """Run a _Query/_GetMore operation and return a Response. - - :Parameters: - - `operation`: a _Query or _GetMore object. - - `unpack_res`: A callable that decodes the wire protocol response. - - `address` (optional): Optional address when sending a message - to a specific server, used for getMore. - """ - if operation.sock_mgr: - server = self._select_server( - operation.read_preference, operation.session, address=address - ) - - with operation.sock_mgr.lock: - with _MongoClientErrorHandler(self, server, operation.session) as err_handler: - err_handler.contribute_socket(operation.sock_mgr.sock) - return server.run_operation( - operation.sock_mgr.sock, operation, True, self._event_listeners, unpack_res - ) - - def _cmd(session, server, sock_info, read_preference): - operation.reset() # Reset op in case of retry. - return server.run_operation( - sock_info, operation, read_preference, self._event_listeners, unpack_res - ) - - return self._retryable_read( - _cmd, - operation.read_preference, - operation.session, - address=address, - retryable=isinstance(operation, message._Query), - ) - - def _retry_with_session(self, retryable, func, session, bulk): - """Execute an operation with at most one consecutive retries - - Returns func()'s return value on success. On error retries the same - command once. - - Re-raises any exception thrown by func(). - """ - retryable = ( - retryable and self.options.retry_writes and session and not session.in_transaction - ) - return self._retry_internal(retryable, func, session, bulk) - - @_csot.apply - def _retry_internal(self, retryable, func, session, bulk): - """Internal retryable write helper.""" - max_wire_version = 0 - last_error: Optional[Exception] = None - retrying = False - multiple_retries = _csot.get_timeout() is not None - - def is_retrying(): - return bulk.retrying if bulk else retrying - - # Increment the transaction id up front to ensure any retry attempt - # will use the proper txnNumber, even if server or socket selection - # fails before the command can be sent. - if retryable and session and not session.in_transaction: - session._start_retryable_write() - if bulk: - bulk.started_retryable_write = True - - while True: - if is_retrying(): - remaining = _csot.remaining() - if remaining is not None and remaining <= 0: - assert last_error is not None - raise last_error - try: - server = self._select_server(writable_server_selector, session) - supports_session = ( - session is not None and server.description.retryable_writes_supported - ) - with self._get_socket(server, session) as sock_info: - max_wire_version = sock_info.max_wire_version - if retryable and not supports_session: - if is_retrying(): - # A retry is not possible because this server does - # not support sessions raise the last error. - assert last_error is not None - raise last_error - retryable = False - return func(session, sock_info, retryable) - except ServerSelectionTimeoutError: - if is_retrying(): - # The application may think the write was never attempted - # if we raise ServerSelectionTimeoutError on the retry - # attempt. Raise the original exception instead. - assert last_error is not None - raise last_error - # A ServerSelectionTimeoutError error indicates that there may - # be a persistent outage. Attempting to retry in this case will - # most likely be a waste of time. - raise - except PyMongoError as exc: - if not retryable: - raise - # Add the RetryableWriteError label, if applicable. - _add_retryable_write_error(exc, max_wire_version) - retryable_error = exc.has_error_label("RetryableWriteError") - if retryable_error: - session._unpin() - if not retryable_error or (is_retrying() and not multiple_retries): - raise - if bulk: - bulk.retrying = True - else: - retrying = True - last_error = exc - - @_csot.apply - def _retryable_read(self, func, read_pref, session, address=None, retryable=True): - """Execute an operation with at most one consecutive retries - - Returns func()'s return value on success. On error retries the same - command once. - - Re-raises any exception thrown by func(). - """ - retryable = ( - retryable and self.options.retry_reads and not (session and session.in_transaction) - ) - last_error: Optional[Exception] = None - retrying = False - multiple_retries = _csot.get_timeout() is not None - - while True: - if retrying: - remaining = _csot.remaining() - if remaining is not None and remaining <= 0: - assert last_error is not None - raise last_error - try: - server = self._select_server(read_pref, session, address=address) - with self._socket_from_server(read_pref, server, session) as (sock_info, read_pref): - if retrying and not retryable: - # A retry is not possible because this server does - # not support retryable reads, raise the last error. - assert last_error is not None - raise last_error - return func(session, server, sock_info, read_pref) - except ServerSelectionTimeoutError: - if retrying: - # The application may think the write was never attempted - # if we raise ServerSelectionTimeoutError on the retry - # attempt. Raise the original exception instead. - assert last_error is not None - raise last_error - # A ServerSelectionTimeoutError error indicates that there may - # be a persistent outage. Attempting to retry in this case will - # most likely be a waste of time. - raise - except ConnectionFailure as exc: - if not retryable or (retrying and not multiple_retries): - raise - retrying = True - last_error = exc - except OperationFailure as exc: - if not retryable or (retrying and not multiple_retries): - raise - if exc.code not in helpers._RETRYABLE_ERROR_CODES: - raise - retrying = True - last_error = exc - - def _retryable_write(self, retryable, func, session): - """Internal retryable write helper.""" - with self._tmp_session(session) as s: - return self._retry_with_session(retryable, func, s, None) - - def __eq__(self, other: Any) -> bool: - if isinstance(other, self.__class__): - return self._topology == other._topology - return NotImplemented - - def __ne__(self, other: Any) -> bool: - return not self == other - - def __hash__(self) -> int: - return hash(self._topology) - - def _repr_helper(self): - def option_repr(option, value): - """Fix options whose __repr__ isn't usable in a constructor.""" - if option == "document_class": - if value is dict: - return "document_class=dict" - else: - return "document_class=%s.%s" % (value.__module__, value.__name__) - if option in common.TIMEOUT_OPTIONS and value is not None: - return "%s=%s" % (option, int(value * 1000)) - - return "%s=%r" % (option, value) - - # Host first... - options = [ - "host=%r" - % [ - "%s:%d" % (host, port) if port is not None else host - for host, port in self._topology_settings.seeds - ] - ] - # ... then everything in self._constructor_args... - options.extend( - option_repr(key, self.__options._options[key]) for key in self._constructor_args - ) - # ... then everything else. - options.extend( - option_repr(key, self.__options._options[key]) - for key in self.__options._options - if key not in set(self._constructor_args) and key != "username" and key != "password" - ) - return ", ".join(options) - - def __repr__(self): - return "MongoClient(%s)" % (self._repr_helper(),) - - def __getattr__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - if name.startswith("_"): - raise AttributeError( - "MongoClient has no attribute %r. To access the %s" - " database, use client[%r]." % (name, name, name) - ) - return self.__getitem__(name) - - def __getitem__(self, name: str) -> database.Database[_DocumentType]: - """Get a database by name. - - Raises :class:`~pymongo.errors.InvalidName` if an invalid - database name is used. - - :Parameters: - - `name`: the name of the database to get - """ - return database.Database(self, name) - - def _cleanup_cursor( - self, locks_allowed, cursor_id, address, sock_mgr, session, explicit_session - ): - """Cleanup a cursor from cursor.close() or __del__. - - This method handles cleanup for Cursors/CommandCursors including any - pinned connection or implicit session attached at the time the cursor - was closed or garbage collected. - - :Parameters: - - `locks_allowed`: True if we are allowed to acquire locks. - - `cursor_id`: The cursor id which may be 0. - - `address`: The _CursorAddress. - - `sock_mgr`: The _SocketManager for the pinned connection or None. - - `session`: The cursor's session. - - `explicit_session`: True if the session was passed explicitly. - """ - if locks_allowed: - if cursor_id: - if sock_mgr and sock_mgr.more_to_come: - # If this is an exhaust cursor and we haven't completely - # exhausted the result set we *must* close the socket - # to stop the server from sending more data. - sock_mgr.sock.close_socket(ConnectionClosedReason.ERROR) - else: - self._close_cursor_now(cursor_id, address, session=session, sock_mgr=sock_mgr) - if sock_mgr: - sock_mgr.close() - else: - # The cursor will be closed later in a different session. - if cursor_id or sock_mgr: - self._close_cursor_soon(cursor_id, address, sock_mgr) - if session and not explicit_session: - session._end_session(lock=locks_allowed) - - def _close_cursor_soon(self, cursor_id, address, sock_mgr=None): - """Request that a cursor and/or connection be cleaned up soon.""" - self.__kill_cursors_queue.append((address, cursor_id, sock_mgr)) - - def _close_cursor_now(self, cursor_id, address=None, session=None, sock_mgr=None): - """Send a kill cursors message with the given id. - - The cursor is closed synchronously on the current thread. - """ - if not isinstance(cursor_id, int): - raise TypeError("cursor_id must be an instance of int") - - try: - if sock_mgr: - with sock_mgr.lock: - # Cursor is pinned to LB outside of a transaction. - self._kill_cursor_impl([cursor_id], address, session, sock_mgr.sock) - else: - self._kill_cursors([cursor_id], address, self._get_topology(), session) - except PyMongoError: - # Make another attempt to kill the cursor later. - self._close_cursor_soon(cursor_id, address) - - def _kill_cursors(self, cursor_ids, address, topology, session): - """Send a kill cursors message with the given ids.""" - if address: - # address could be a tuple or _CursorAddress, but - # select_server_by_address needs (host, port). - server = topology.select_server_by_address(tuple(address)) - else: - # Application called close_cursor() with no address. - server = topology.select_server(writable_server_selector) - - with self._get_socket(server, session) as sock_info: - self._kill_cursor_impl(cursor_ids, address, session, sock_info) - - def _kill_cursor_impl(self, cursor_ids, address, session, sock_info): - namespace = address.namespace - db, coll = namespace.split(".", 1) - spec = SON([("killCursors", coll), ("cursors", cursor_ids)]) - sock_info.command(db, spec, session=session, client=self) - - def _process_kill_cursors(self): - """Process any pending kill cursors requests.""" - address_to_cursor_ids = defaultdict(list) - pinned_cursors = [] - - # Other threads or the GC may append to the queue concurrently. - while True: - try: - address, cursor_id, sock_mgr = self.__kill_cursors_queue.pop() - except IndexError: - break - - if sock_mgr: - pinned_cursors.append((address, cursor_id, sock_mgr)) - else: - address_to_cursor_ids[address].append(cursor_id) - - for address, cursor_id, sock_mgr in pinned_cursors: - try: - self._cleanup_cursor(True, cursor_id, address, sock_mgr, None, False) - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - # Raise the exception when client is closed so that it - # can be caught in _process_periodic_tasks - raise - else: - helpers._handle_exception() - - # Don't re-open topology if it's closed and there's no pending cursors. - if address_to_cursor_ids: - topology = self._get_topology() - for address, cursor_ids in address_to_cursor_ids.items(): - try: - self._kill_cursors(cursor_ids, address, topology, session=None) - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - raise - else: - helpers._handle_exception() - - # This method is run periodically by a background thread. - def _process_periodic_tasks(self): - """Process any pending kill cursors requests and - maintain connection pool parameters.""" - try: - self._process_kill_cursors() - self._topology.update_pool() - except Exception as exc: - if isinstance(exc, InvalidOperation) and self._topology._closed: - return - else: - helpers._handle_exception() - - def __start_session(self, implicit, **kwargs): - # Raises ConfigurationError if sessions are not supported. - if implicit: - self._topology._check_implicit_session_support() - server_session = _EmptyServerSession() - else: - server_session = self._get_server_session() - opts = client_session.SessionOptions(**kwargs) - return client_session.ClientSession(self, server_session, opts, implicit) - - def start_session( - self, - causal_consistency: Optional[bool] = None, - default_transaction_options: Optional[client_session.TransactionOptions] = None, - snapshot: Optional[bool] = False, - ) -> client_session.ClientSession: - """Start a logical session. - - This method takes the same parameters as - :class:`~pymongo.client_session.SessionOptions`. See the - :mod:`~pymongo.client_session` module for details and examples. - - A :class:`~pymongo.client_session.ClientSession` may only be used with - the MongoClient that started it. :class:`ClientSession` instances are - **not thread-safe or fork-safe**. They can only be used by one thread - or process at a time. A single :class:`ClientSession` cannot be used - to run multiple operations concurrently. - - :Returns: - An instance of :class:`~pymongo.client_session.ClientSession`. - - .. versionadded:: 3.6 - """ - return self.__start_session( - False, - causal_consistency=causal_consistency, - default_transaction_options=default_transaction_options, - snapshot=snapshot, - ) - - def _get_server_session(self): - """Internal: start or resume a _ServerSession.""" - return self._topology.get_server_session() - - def _return_server_session(self, server_session, lock): - """Internal: return a _ServerSession to the pool.""" - if isinstance(server_session, _EmptyServerSession): - return - return self._topology.return_server_session(server_session, lock) - - def _ensure_session(self, session=None): - """If provided session is None, lend a temporary session.""" - if session: - return session - - try: - # Don't make implicit sessions causally consistent. Applications - # should always opt-in. - return self.__start_session(True, causal_consistency=False) - except (ConfigurationError, InvalidOperation): - # Sessions not supported. - return None - - @contextlib.contextmanager - def _tmp_session( - self, session: Optional[client_session.ClientSession], close: bool = True - ) -> "Generator[Optional[client_session.ClientSession], None, None]": - """If provided session is None, lend a temporary session.""" - if session is not None: - if not isinstance(session, client_session.ClientSession): - raise ValueError("'session' argument must be a ClientSession or None.") - # Don't call end_session. - yield session - return - - s = self._ensure_session(session) - if s: - try: - yield s - except Exception as exc: - if isinstance(exc, ConnectionFailure): - s._server_session.mark_dirty() - - # Always call end_session on error. - s.end_session() - raise - finally: - # Call end_session when we exit this scope. - if close: - s.end_session() - else: - yield None - - def _send_cluster_time(self, command, session): - topology_time = self._topology.max_cluster_time() - session_time = session.cluster_time if session else None - if topology_time and session_time: - if topology_time["clusterTime"] > session_time["clusterTime"]: - cluster_time = topology_time - else: - cluster_time = session_time - else: - cluster_time = topology_time or session_time - if cluster_time: - command["$clusterTime"] = cluster_time - - def _process_response(self, reply, session): - self._topology.receive_cluster_time(reply.get("$clusterTime")) - if session is not None: - session._process_response(reply) - - def server_info(self, session: Optional[client_session.ClientSession] = None) -> Dict[str, Any]: - """Get information about the MongoDB server we're connected to. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - """ - return cast( - dict, - self.admin.command( - "buildinfo", read_preference=ReadPreference.PRIMARY, session=session - ), - ) - - def list_databases( - self, - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - **kwargs: Any, - ) -> CommandCursor[Dict[str, Any]]: - """Get a cursor over the databases of the connected server. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - `**kwargs` (optional): Optional parameters of the - `listDatabases command - `_ - can be passed as keyword arguments to this method. The supported - options differ by server version. - - - :Returns: - An instance of :class:`~pymongo.command_cursor.CommandCursor`. - - .. versionadded:: 3.6 - """ - cmd = SON([("listDatabases", 1)]) - cmd.update(kwargs) - if comment is not None: - cmd["comment"] = comment - admin = self._database_default_options("admin") - res = admin._retryable_read_command(cmd, session=session) - # listDatabases doesn't return a cursor (yet). Fake one. - cursor = { - "id": 0, - "firstBatch": res["databases"], - "ns": "admin.$cmd", - } - return CommandCursor(admin["$cmd"], cursor, None, comment=comment) - - def list_database_names( - self, - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - ) -> List[str]: - """Get a list of the names of all databases on the connected server. - - :Parameters: - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionadded:: 3.6 - """ - return [doc["name"] for doc in self.list_databases(session, nameOnly=True, comment=comment)] - - @_csot.apply - def drop_database( - self, - name_or_database: Union[str, database.Database], - session: Optional[client_session.ClientSession] = None, - comment: Optional[Any] = None, - ) -> None: - """Drop a database. - - Raises :class:`TypeError` if `name_or_database` is not an instance of - :class:`basestring` (:class:`str` in python 3) or - :class:`~pymongo.database.Database`. - - :Parameters: - - `name_or_database`: the name of a database to drop, or a - :class:`~pymongo.database.Database` instance representing the - database to drop - - `session` (optional): a - :class:`~pymongo.client_session.ClientSession`. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.6 - Added ``session`` parameter. - - .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of - this client is automatically applied to this operation. - - .. versionchanged:: 3.4 - Apply this client's write concern automatically to this operation - when connected to MongoDB >= 3.4. - - """ - name = name_or_database - if isinstance(name, database.Database): - name = name.name - - if not isinstance(name, str): - raise TypeError("name_or_database must be an instance of str or a Database") - - with self._socket_for_writes(session) as sock_info: - self[name]._command( - sock_info, - {"dropDatabase": 1, "comment": comment}, - read_preference=ReadPreference.PRIMARY, - write_concern=self._write_concern_for(session), - parse_write_concern_error=True, - session=session, - ) - - def get_default_database( - self, - default: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> database.Database[_DocumentType]: - """Get the database named in the MongoDB connection URI. - - >>> uri = 'mongodb://host/my_database' - >>> client = MongoClient(uri) - >>> db = client.get_default_database() - >>> assert db.name == 'my_database' - >>> db = client.get_database() - >>> assert db.name == 'my_database' - - Useful in scripts where you want to choose which database to use - based only on the URI in a configuration file. - - :Parameters: - - `default` (optional): the database name to use if no database name - was provided in the URI. - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. - - `comment` (optional): A user-provided comment to attach to this - command. - - .. versionchanged:: 4.1 - Added ``comment`` parameter. - - .. versionchanged:: 3.8 - Undeprecated. Added the ``default``, ``codec_options``, - ``read_preference``, ``write_concern`` and ``read_concern`` - parameters. - - .. versionchanged:: 3.5 - Deprecated, use :meth:`get_database` instead. - """ - if self.__default_database_name is None and default is None: - raise ConfigurationError("No default database name defined or provided.") - - name = cast(str, self.__default_database_name or default) - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) - - def get_database( - self, - name: Optional[str] = None, - codec_options: Optional[CodecOptions] = None, - read_preference: Optional[_ServerMode] = None, - write_concern: Optional[WriteConcern] = None, - read_concern: Optional["ReadConcern"] = None, - ) -> database.Database[_DocumentType]: - """Get a :class:`~pymongo.database.Database` with the given name and - options. - - Useful for creating a :class:`~pymongo.database.Database` with - different codec options, read preference, and/or write concern from - this :class:`MongoClient`. - - >>> client.read_preference - Primary() - >>> db1 = client.test - >>> db1.read_preference - Primary() - >>> from pymongo import ReadPreference - >>> db2 = client.get_database( - ... 'test', read_preference=ReadPreference.SECONDARY) - >>> db2.read_preference - Secondary(tag_sets=None) - - :Parameters: - - `name` (optional): The name of the database - a string. If ``None`` - (the default) the database named in the MongoDB connection URI is - returned. - - `codec_options` (optional): An instance of - :class:`~bson.codec_options.CodecOptions`. If ``None`` (the - default) the :attr:`codec_options` of this :class:`MongoClient` is - used. - - `read_preference` (optional): The read preference to use. If - ``None`` (the default) the :attr:`read_preference` of this - :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` - for options. - - `write_concern` (optional): An instance of - :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the - default) the :attr:`write_concern` of this :class:`MongoClient` is - used. - - `read_concern` (optional): An instance of - :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the - default) the :attr:`read_concern` of this :class:`MongoClient` is - used. - - .. versionchanged:: 3.5 - The `name` parameter is now optional, defaulting to the database - named in the MongoDB connection URI. - """ - if name is None: - if self.__default_database_name is None: - raise ConfigurationError("No default database defined") - name = self.__default_database_name - - return database.Database( - self, name, codec_options, read_preference, write_concern, read_concern - ) - - def _database_default_options(self, name): - """Get a Database instance with the default settings.""" - return self.get_database( - name, - codec_options=DEFAULT_CODEC_OPTIONS, - read_preference=ReadPreference.PRIMARY, - write_concern=DEFAULT_WRITE_CONCERN, - ) - - def __enter__(self) -> "MongoClient[_DocumentType]": - return self - - def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: - self.close() - - # See PYTHON-3084. - __iter__ = None - - def __next__(self) -> NoReturn: - raise TypeError("'MongoClient' object is not iterable") - - next = __next__ - - -def _retryable_error_doc(exc): - """Return the server response from PyMongo exception or None.""" - if isinstance(exc, BulkWriteError): - # Check the last writeConcernError to determine if this - # BulkWriteError is retryable. - wces = exc.details["writeConcernErrors"] - wce = wces[-1] if wces else None - return wce - if isinstance(exc, (NotPrimaryError, OperationFailure)): - return exc.details - return None - - -def _add_retryable_write_error(exc, max_wire_version): - doc = _retryable_error_doc(exc) - if doc: - code = doc.get("code", 0) - # retryWrites on MMAPv1 should raise an actionable error. - if code == 20 and str(exc).startswith("Transaction numbers"): - errmsg = ( - "This MongoDB deployment does not support " - "retryable writes. Please add retryWrites=false " - "to your connection string." - ) - raise OperationFailure(errmsg, code, exc.details) - if max_wire_version >= 9: - # In MongoDB 4.4+, the server reports the error labels. - for label in doc.get("errorLabels", []): - exc._add_error_label(label) - else: - if code in helpers._RETRYABLE_ERROR_CODES: - exc._add_error_label("RetryableWriteError") - - # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is - # handled above. - if isinstance(exc, ConnectionFailure) and not isinstance( - exc, (NotPrimaryError, WaitQueueTimeoutError) - ): - exc._add_error_label("RetryableWriteError") - - -class _MongoClientErrorHandler(object): - """Handle errors raised when executing an operation.""" - - __slots__ = ( - "client", - "server_address", - "session", - "max_wire_version", - "sock_generation", - "completed_handshake", - "service_id", - "handled", - ) - - def __init__(self, client, server, session): - self.client = client - self.server_address = server.description.address - self.session = session - self.max_wire_version = common.MIN_WIRE_VERSION - # XXX: When get_socket fails, this generation could be out of date: - # "Note that when a network error occurs before the handshake - # completes then the error's generation number is the generation - # of the pool at the time the connection attempt was started." - self.sock_generation = server.pool.gen.get_overall() - self.completed_handshake = False - self.service_id = None - self.handled = False - - def contribute_socket(self, sock_info, completed_handshake=True): - """Provide socket information to the error handler.""" - self.max_wire_version = sock_info.max_wire_version - self.sock_generation = sock_info.generation - self.service_id = sock_info.service_id - self.completed_handshake = completed_handshake - - def handle(self, exc_type, exc_val): - if self.handled or exc_type is None: - return - self.handled = True - if self.session: - if issubclass(exc_type, ConnectionFailure): - if self.session.in_transaction: - exc_val._add_error_label("TransientTransactionError") - self.session._server_session.mark_dirty() - - if issubclass(exc_type, PyMongoError): - if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( - "RetryableWriteError" - ): - self.session._unpin() - - err_ctx = _ErrorContext( - exc_val, - self.max_wire_version, - self.sock_generation, - self.completed_handshake, - self.service_id, - ) - self.client._topology.handle_error(self.server_address, err_ctx) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - return self.handle(exc_type, exc_val) +__doc__ = original_doc +__all__ = ["MongoClient"] # noqa: F405 diff --git a/pymongo/monitor.py b/pymongo/monitor.py deleted file mode 100644 index 844ad02262..0000000000 --- a/pymongo/monitor.py +++ /dev/null @@ -1,440 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Class to monitor a MongoDB server on a background thread.""" - -import atexit -import threading -import time -import weakref -from typing import Any, Mapping, cast - -from pymongo import common, periodic_executor -from pymongo.errors import NotPrimaryError, OperationFailure, _OperationCancelled -from pymongo.hello import Hello -from pymongo.periodic_executor import _shutdown_executors -from pymongo.read_preferences import MovingAverage -from pymongo.server_description import ServerDescription -from pymongo.srv_resolver import _SrvResolver - - -def _sanitize(error): - """PYTHON-2433 Clear error traceback info.""" - error.__traceback__ = None - error.__context__ = None - error.__cause__ = None - - -class MonitorBase(object): - def __init__(self, topology, name, interval, min_interval): - """Base class to do periodic work on a background thread. - - The the background thread is signaled to stop when the Topology or - this instance is freed. - """ - # We strongly reference the executor and it weakly references us via - # this closure. When the monitor is freed, stop the executor soon. - def target(): - monitor = self_ref() - if monitor is None: - return False # Stop the executor. - monitor._run() # type:ignore[attr-defined] - return True - - executor = periodic_executor.PeriodicExecutor( - interval=interval, min_interval=min_interval, target=target, name=name - ) - - self._executor = executor - - def _on_topology_gc(dummy=None): - # This prevents GC from waiting 10 seconds for hello to complete - # See test_cleanup_executors_on_client_del. - monitor = self_ref() - if monitor: - monitor.gc_safe_close() - - # Avoid cycles. When self or topology is freed, stop executor soon. - self_ref = weakref.ref(self, executor.close) - self._topology = weakref.proxy(topology, _on_topology_gc) - _register(self) - - def open(self): - """Start monitoring, or restart after a fork. - - Multiple calls have no effect. - """ - self._executor.open() - - def gc_safe_close(self): - """GC safe close.""" - self._executor.close() - - def close(self): - """Close and stop monitoring. - - open() restarts the monitor after closing. - """ - self.gc_safe_close() - - def join(self, timeout=None): - """Wait for the monitor to stop.""" - self._executor.join(timeout) - - def request_check(self): - """If the monitor is sleeping, wake it soon.""" - self._executor.wake() - - -class Monitor(MonitorBase): - def __init__(self, server_description, topology, pool, topology_settings): - """Class to monitor a MongoDB server on a background thread. - - Pass an initial ServerDescription, a Topology, a Pool, and - TopologySettings. - - The Topology is weakly referenced. The Pool must be exclusive to this - Monitor. - """ - super(Monitor, self).__init__( - topology, - "pymongo_server_monitor_thread", - topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL, - ) - self._server_description = server_description - self._pool = pool - self._settings = topology_settings - self._listeners = self._settings._pool_options._event_listeners - pub = self._listeners is not None - self._publish = pub and self._listeners.enabled_for_server_heartbeat - self._cancel_context = None - self._rtt_monitor = _RttMonitor( - topology, - topology_settings, - topology._create_pool_for_monitor(server_description.address), - ) - self.heartbeater = None - - def cancel_check(self): - """Cancel any concurrent hello check. - - Note: this is called from a weakref.proxy callback and MUST NOT take - any locks. - """ - context = self._cancel_context - if context: - # Note: we cannot close the socket because doing so may cause - # concurrent reads/writes to hang until a timeout occurs - # (depending on the platform). - context.cancel() - - def _start_rtt_monitor(self): - """Start an _RttMonitor that periodically runs ping.""" - # If this monitor is closed directly before (or during) this open() - # call, the _RttMonitor will not be closed. Checking if this monitor - # was closed directly after resolves the race. - self._rtt_monitor.open() - if self._executor._stopped: - self._rtt_monitor.close() - - def gc_safe_close(self): - self._executor.close() - self._rtt_monitor.gc_safe_close() - self.cancel_check() - - def close(self): - self.gc_safe_close() - self._rtt_monitor.close() - # Increment the generation and maybe close the socket. If the executor - # thread has the socket checked out, it will be closed when checked in. - self._reset_connection() - - def _reset_connection(self): - # Clear our pooled connection. - self._pool.reset() - - def _run(self): - try: - prev_sd = self._server_description - try: - self._server_description = self._check_server() - except _OperationCancelled as exc: - _sanitize(exc) - # Already closed the connection, wait for the next check. - self._server_description = ServerDescription( - self._server_description.address, error=exc - ) - if prev_sd.is_server_type_known: - # Immediately retry since we've already waited 500ms to - # discover that we've been cancelled. - self._executor.skip_sleep() - return - - # Update the Topology and clear the server pool on error. - self._topology.on_change( - self._server_description, reset_pool=self._server_description.error - ) - - if ( - self._server_description.is_server_type_known - and self._server_description.topology_version - ): - self._start_rtt_monitor() - # Immediately check for the next streaming response. - self._executor.skip_sleep() - - if self._server_description.error and prev_sd.is_server_type_known: - # Immediately retry on network errors. - self._executor.skip_sleep() - except ReferenceError: - # Topology was garbage-collected. - self.close() - - def _check_server(self): - """Call hello or read the next streaming response. - - Returns a ServerDescription. - """ - start = time.monotonic() - try: - try: - return self._check_once() - except (OperationFailure, NotPrimaryError) as exc: - # Update max cluster time even when hello fails. - details = cast(Mapping[str, Any], exc.details) - self._topology.receive_cluster_time(details.get("$clusterTime")) - raise - except ReferenceError: - raise - except Exception as error: - _sanitize(error) - sd = self._server_description - address = sd.address - duration = time.monotonic() - start - if self._publish: - awaited = sd.is_server_type_known and sd.topology_version - self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) - self._reset_connection() - if isinstance(error, _OperationCancelled): - raise - self._rtt_monitor.reset() - # Server type defaults to Unknown. - return ServerDescription(address, error=error) - - def _check_once(self): - """A single attempt to call hello. - - Returns a ServerDescription, or raises an exception. - """ - address = self._server_description.address - if self._publish: - self._listeners.publish_server_heartbeat_started(address) - - if self._cancel_context and self._cancel_context.cancelled: - self._reset_connection() - with self._pool.get_socket() as sock_info: - self._cancel_context = sock_info.cancel_context - response, round_trip_time = self._check_with_socket(sock_info) - if not response.awaitable: - self._rtt_monitor.add_sample(round_trip_time) - - sd = ServerDescription(address, response, self._rtt_monitor.average()) - if self._publish: - self._listeners.publish_server_heartbeat_succeeded( - address, round_trip_time, response, response.awaitable - ) - return sd - - def _check_with_socket(self, conn): - """Return (Hello, round_trip_time). - - Can raise ConnectionFailure or OperationFailure. - """ - cluster_time = self._topology.max_cluster_time() - start = time.monotonic() - if conn.more_to_come: - # Read the next streaming hello (MongoDB 4.4+). - response = Hello(conn._next_reply(), awaitable=True) - elif conn.performed_handshake and self._server_description.topology_version: - # Initiate streaming hello (MongoDB 4.4+). - response = conn._hello( - cluster_time, - self._server_description.topology_version, - self._settings.heartbeat_frequency, - ) - else: - # New connection handshake or polling hello (MongoDB <4.4). - response = conn._hello(cluster_time, None, None) - return response, time.monotonic() - start - - -class SrvMonitor(MonitorBase): - def __init__(self, topology, topology_settings): - """Class to poll SRV records on a background thread. - - Pass a Topology and a TopologySettings. - - The Topology is weakly referenced. - """ - super(SrvMonitor, self).__init__( - topology, - "pymongo_srv_polling_thread", - common.MIN_SRV_RESCAN_INTERVAL, - topology_settings.heartbeat_frequency, - ) - self._settings = topology_settings - self._seedlist = self._settings._seeds - self._fqdn = self._settings.fqdn - - def _run(self): - seedlist = self._get_seedlist() - if seedlist: - self._seedlist = seedlist - try: - self._topology.on_srv_update(self._seedlist) - except ReferenceError: - # Topology was garbage-collected. - self.close() - - def _get_seedlist(self): - """Poll SRV records for a seedlist. - - Returns a list of ServerDescriptions. - """ - try: - resolver = _SrvResolver( - self._fqdn, - self._settings.pool_options.connect_timeout, - self._settings.srv_service_name, - ) - seedlist, ttl = resolver.get_hosts_and_min_ttl() - if len(seedlist) == 0: - # As per the spec: this should be treated as a failure. - raise Exception - except Exception: - # As per the spec, upon encountering an error: - # - An error must not be raised - # - SRV records must be rescanned every heartbeatFrequencyMS - # - Topology must be left unchanged - self.request_check() - return None - else: - self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) - return seedlist - - -class _RttMonitor(MonitorBase): - def __init__(self, topology, topology_settings, pool): - """Maintain round trip times for a server. - - The Topology is weakly referenced. - """ - super(_RttMonitor, self).__init__( - topology, - "pymongo_server_rtt_thread", - topology_settings.heartbeat_frequency, - common.MIN_HEARTBEAT_INTERVAL, - ) - - self._pool = pool - self._moving_average = MovingAverage() - self._lock = threading.Lock() - - def close(self): - self.gc_safe_close() - # Increment the generation and maybe close the socket. If the executor - # thread has the socket checked out, it will be closed when checked in. - self._pool.reset() - - def add_sample(self, sample): - """Add a RTT sample.""" - with self._lock: - self._moving_average.add_sample(sample) - - def average(self): - """Get the calculated average, or None if no samples yet.""" - with self._lock: - return self._moving_average.get() - - def reset(self): - """Reset the average RTT.""" - with self._lock: - return self._moving_average.reset() - - def _run(self): - try: - # NOTE: This thread is only run when when using the streaming - # heartbeat protocol (MongoDB 4.4+). - # XXX: Skip check if the server is unknown? - rtt = self._ping() - self.add_sample(rtt) - except ReferenceError: - # Topology was garbage-collected. - self.close() - except Exception: - self._pool.reset() - - def _ping(self): - """Run a "hello" command and return the RTT.""" - with self._pool.get_socket() as sock_info: - if self._executor._stopped: - raise Exception("_RttMonitor closed") - start = time.monotonic() - sock_info.hello() - return time.monotonic() - start - - -# Close monitors to cancel any in progress streaming checks before joining -# executor threads. For an explanation of how this works see the comment -# about _EXECUTORS in periodic_executor.py. -_MONITORS = set() - - -def _register(monitor): - ref = weakref.ref(monitor, _unregister) - _MONITORS.add(ref) - - -def _unregister(monitor_ref): - _MONITORS.remove(monitor_ref) - - -def _shutdown_monitors(): - if _MONITORS is None: - return - - # Copy the set. Closing monitors removes them. - monitors = list(_MONITORS) - - # Close all monitors. - for ref in monitors: - monitor = ref() - if monitor: - monitor.gc_safe_close() - - monitor = None - - -def _shutdown_resources(): - # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. - shutdown = _shutdown_monitors - if shutdown: - shutdown() - shutdown = _shutdown_executors - if shutdown: - shutdown() - - -atexit.register(_shutdown_resources) diff --git a/pymongo/monitoring.py b/pymongo/monitoring.py index f3f773fbbd..46a78aea0b 100644 --- a/pymongo/monitoring.py +++ b/pymongo/monitoring.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -20,6 +20,9 @@ are included in the PyMongo distribution under the :mod:`~pymongo.event_loggers` submodule. +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. + + Use :func:`register` to register global listeners for specific events. Listeners must inherit from one of the abstract classes below and implement the correct functions for that class. @@ -125,6 +128,9 @@ class ConnectionPoolLogger(ConnectionPoolListener): def pool_created(self, event): logging.info("[pool {0.address}] pool created".format(event)) + def pool_ready(self, event): + logging.info("[pool {0.address}] pool is ready".format(event)) + def pool_cleared(self, event): logging.info("[pool {0.address}] pool cleared".format(event)) @@ -132,15 +138,15 @@ def pool_closed(self, event): logging.info("[pool {0.address}] pool closed".format(event)) def connection_created(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection created".format(event)) def connection_ready(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection setup succeeded".format(event)) def connection_closed(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection closed, reason: " "{0.reason}".format(event)) @@ -153,11 +159,11 @@ def connection_check_out_failed(self, event): "failed, reason: {0.reason}".format(event)) def connection_checked_out(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection checked out of pool".format(event)) def connection_checked_in(self, event): - logging.info("[pool {0.address}][conn #{0.connection_id}] " + logging.info("[pool {0.address}][connection #{0.connection_id}] " "connection checked into pool".format(event)) @@ -180,16 +186,20 @@ def connection_checked_in(self, event): handler first. """ +from __future__ import annotations + import datetime from collections import abc, namedtuple -from typing import TYPE_CHECKING, Any, Dict, Optional +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence from bson.objectid import ObjectId from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _handle_exception +from pymongo.helpers_shared import _SENSITIVE_COMMANDS, _handle_exception from pymongo.typings import _Address, _DocumentOut if TYPE_CHECKING: + from datetime import timedelta + from pymongo.server_description import ServerDescription from pymongo.topology_description import TopologyDescription @@ -208,7 +218,7 @@ def connection_checked_in(self, event): _LISTENERS = _Listeners([], [], [], [], []) -class _EventListener(object): +class _EventListener: """Abstract base class for all event listeners.""" @@ -219,27 +229,24 @@ class CommandListener(_EventListener): and `CommandFailedEvent`. """ - def started(self, event: "CommandStartedEvent") -> None: + def started(self, event: CommandStartedEvent) -> None: """Abstract method to handle a `CommandStartedEvent`. - :Parameters: - - `event`: An instance of :class:`CommandStartedEvent`. + :param event: An instance of :class:`CommandStartedEvent`. """ raise NotImplementedError - def succeeded(self, event: "CommandSucceededEvent") -> None: + def succeeded(self, event: CommandSucceededEvent) -> None: """Abstract method to handle a `CommandSucceededEvent`. - :Parameters: - - `event`: An instance of :class:`CommandSucceededEvent`. + :param event: An instance of :class:`CommandSucceededEvent`. """ raise NotImplementedError - def failed(self, event: "CommandFailedEvent") -> None: + def failed(self, event: CommandFailedEvent) -> None: """Abstract method to handle a `CommandFailedEvent`. - :Parameters: - - `event`: An instance of :class:`CommandFailedEvent`. + :param event: An instance of :class:`CommandFailedEvent`. """ raise NotImplementedError @@ -260,117 +267,106 @@ class ConnectionPoolListener(_EventListener): .. versionadded:: 3.9 """ - def pool_created(self, event: "PoolCreatedEvent") -> None: + def pool_created(self, event: PoolCreatedEvent) -> None: """Abstract method to handle a :class:`PoolCreatedEvent`. - Emitted when a Connection Pool is created. + Emitted when a connection Pool is created. - :Parameters: - - `event`: An instance of :class:`PoolCreatedEvent`. + :param event: An instance of :class:`PoolCreatedEvent`. """ raise NotImplementedError - def pool_ready(self, event: "PoolReadyEvent") -> None: + def pool_ready(self, event: PoolReadyEvent) -> None: """Abstract method to handle a :class:`PoolReadyEvent`. - Emitted when a Connection Pool is marked ready. + Emitted when a connection Pool is marked ready. - :Parameters: - - `event`: An instance of :class:`PoolReadyEvent`. + :param event: An instance of :class:`PoolReadyEvent`. .. versionadded:: 4.0 """ raise NotImplementedError - def pool_cleared(self, event: "PoolClearedEvent") -> None: + def pool_cleared(self, event: PoolClearedEvent) -> None: """Abstract method to handle a `PoolClearedEvent`. - Emitted when a Connection Pool is cleared. + Emitted when a connection Pool is cleared. - :Parameters: - - `event`: An instance of :class:`PoolClearedEvent`. + :param event: An instance of :class:`PoolClearedEvent`. """ raise NotImplementedError - def pool_closed(self, event: "PoolClosedEvent") -> None: + def pool_closed(self, event: PoolClosedEvent) -> None: """Abstract method to handle a `PoolClosedEvent`. - Emitted when a Connection Pool is closed. + Emitted when a connection Pool is closed. - :Parameters: - - `event`: An instance of :class:`PoolClosedEvent`. + :param event: An instance of :class:`PoolClosedEvent`. """ raise NotImplementedError - def connection_created(self, event: "ConnectionCreatedEvent") -> None: + def connection_created(self, event: ConnectionCreatedEvent) -> None: """Abstract method to handle a :class:`ConnectionCreatedEvent`. - Emitted when a Connection Pool creates a Connection object. + Emitted when a connection Pool creates a Connection object. - :Parameters: - - `event`: An instance of :class:`ConnectionCreatedEvent`. + :param event: An instance of :class:`ConnectionCreatedEvent`. """ raise NotImplementedError - def connection_ready(self, event: "ConnectionReadyEvent") -> None: + def connection_ready(self, event: ConnectionReadyEvent) -> None: """Abstract method to handle a :class:`ConnectionReadyEvent`. - Emitted when a Connection has finished its setup, and is now ready to + Emitted when a connection has finished its setup, and is now ready to use. - :Parameters: - - `event`: An instance of :class:`ConnectionReadyEvent`. + :param event: An instance of :class:`ConnectionReadyEvent`. """ raise NotImplementedError - def connection_closed(self, event: "ConnectionClosedEvent") -> None: + def connection_closed(self, event: ConnectionClosedEvent) -> None: """Abstract method to handle a :class:`ConnectionClosedEvent`. - Emitted when a Connection Pool closes a Connection. + Emitted when a connection Pool closes a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionClosedEvent`. + :param event: An instance of :class:`ConnectionClosedEvent`. """ raise NotImplementedError - def connection_check_out_started(self, event: "ConnectionCheckOutStartedEvent") -> None: + def connection_check_out_started(self, event: ConnectionCheckOutStartedEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckOutStartedEvent`. Emitted when the driver starts attempting to check out a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckOutStartedEvent`. + :param event: An instance of :class:`ConnectionCheckOutStartedEvent`. """ raise NotImplementedError - def connection_check_out_failed(self, event: "ConnectionCheckOutFailedEvent") -> None: + def connection_check_out_failed(self, event: ConnectionCheckOutFailedEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckOutFailedEvent`. Emitted when the driver's attempt to check out a connection fails. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckOutFailedEvent`. + :param event: An instance of :class:`ConnectionCheckOutFailedEvent`. """ raise NotImplementedError - def connection_checked_out(self, event: "ConnectionCheckedOutEvent") -> None: + def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckedOutEvent`. - Emitted when the driver successfully checks out a Connection. + Emitted when the driver successfully checks out a connection. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckedOutEvent`. + :param event: An instance of :class:`ConnectionCheckedOutEvent`. """ raise NotImplementedError - def connection_checked_in(self, event: "ConnectionCheckedInEvent") -> None: + def connection_checked_in(self, event: ConnectionCheckedInEvent) -> None: """Abstract method to handle a :class:`ConnectionCheckedInEvent`. - Emitted when the driver checks in a Connection back to the Connection + Emitted when the driver checks in a connection back to the connection Pool. - :Parameters: - - `event`: An instance of :class:`ConnectionCheckedInEvent`. + :param event: An instance of :class:`ConnectionCheckedInEvent`. """ raise NotImplementedError @@ -384,27 +380,24 @@ class ServerHeartbeatListener(_EventListener): .. versionadded:: 3.3 """ - def started(self, event: "ServerHeartbeatStartedEvent") -> None: + def started(self, event: ServerHeartbeatStartedEvent) -> None: """Abstract method to handle a `ServerHeartbeatStartedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatStartedEvent`. + :param event: An instance of :class:`ServerHeartbeatStartedEvent`. """ raise NotImplementedError - def succeeded(self, event: "ServerHeartbeatSucceededEvent") -> None: + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: """Abstract method to handle a `ServerHeartbeatSucceededEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatSucceededEvent`. + :param event: An instance of :class:`ServerHeartbeatSucceededEvent`. """ raise NotImplementedError - def failed(self, event: "ServerHeartbeatFailedEvent") -> None: + def failed(self, event: ServerHeartbeatFailedEvent) -> None: """Abstract method to handle a `ServerHeartbeatFailedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerHeartbeatFailedEvent`. + :param event: An instance of :class:`ServerHeartbeatFailedEvent`. """ raise NotImplementedError @@ -417,27 +410,24 @@ class TopologyListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event: "TopologyOpenedEvent") -> None: + def opened(self, event: TopologyOpenedEvent) -> None: """Abstract method to handle a `TopologyOpenedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyOpenedEvent`. + :param event: An instance of :class:`TopologyOpenedEvent`. """ raise NotImplementedError - def description_changed(self, event: "TopologyDescriptionChangedEvent") -> None: + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: """Abstract method to handle a `TopologyDescriptionChangedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyDescriptionChangedEvent`. + :param event: An instance of :class:`TopologyDescriptionChangedEvent`. """ raise NotImplementedError - def closed(self, event: "TopologyClosedEvent") -> None: + def closed(self, event: TopologyClosedEvent) -> None: """Abstract method to handle a `TopologyClosedEvent`. - :Parameters: - - `event`: An instance of :class:`TopologyClosedEvent`. + :param event: An instance of :class:`TopologyClosedEvent`. """ raise NotImplementedError @@ -450,47 +440,47 @@ class ServerListener(_EventListener): .. versionadded:: 3.3 """ - def opened(self, event: "ServerOpeningEvent") -> None: + def opened(self, event: ServerOpeningEvent) -> None: """Abstract method to handle a `ServerOpeningEvent`. - :Parameters: - - `event`: An instance of :class:`ServerOpeningEvent`. + :param event: An instance of :class:`ServerOpeningEvent`. """ raise NotImplementedError - def description_changed(self, event: "ServerDescriptionChangedEvent") -> None: + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: """Abstract method to handle a `ServerDescriptionChangedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerDescriptionChangedEvent`. + :param event: An instance of :class:`ServerDescriptionChangedEvent`. """ raise NotImplementedError - def closed(self, event: "ServerClosedEvent") -> None: + def closed(self, event: ServerClosedEvent) -> None: """Abstract method to handle a `ServerClosedEvent`. - :Parameters: - - `event`: An instance of :class:`ServerClosedEvent`. + :param event: An instance of :class:`ServerClosedEvent`. """ raise NotImplementedError -def _to_micros(dur): +def _to_micros(dur: timedelta) -> int: """Convert duration 'dur' to microseconds.""" return int(dur.total_seconds() * 10e5) -def _validate_event_listeners(option, listeners): +def _validate_event_listeners( + option: str, listeners: Sequence[_EventListeners] +) -> Sequence[_EventListeners]: """Validate event listeners""" if not isinstance(listeners, abc.Sequence): - raise TypeError("%s must be a list or tuple" % (option,)) + raise TypeError(f"{option} must be a list or tuple, not {type(listeners)}") for listener in listeners: if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + f"Listeners for {option} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (option,) + "ConnectionPoolListener," + f"not {type(listener)}" ) return listeners @@ -498,17 +488,17 @@ def _validate_event_listeners(option, listeners): def register(listener: _EventListener) -> None: """Register a global event listener. - :Parameters: - - `listener`: A subclasses of :class:`CommandListener`, + :param listener: A subclasses of :class:`CommandListener`, :class:`ServerHeartbeatListener`, :class:`ServerListener`, :class:`TopologyListener`, or :class:`ConnectionPoolListener`. """ if not isinstance(listener, _EventListener): raise TypeError( - "Listeners for %s must be either a " + f"Listeners for {listener} must be either a " "CommandListener, ServerHeartbeatListener, " "ServerListener, TopologyListener, or " - "ConnectionPoolListener." % (listener,) + "ConnectionPoolListener," + f"not {type(listener)}" ) if isinstance(listener, CommandListener): _LISTENERS.command_listeners.append(listener) @@ -522,27 +512,9 @@ def register(listener: _EventListener) -> None: _LISTENERS.cmap_listeners.append(listener) -# Note - to avoid bugs from forgetting which if these is all lowercase and -# which are camelCase, and at the same time avoid having to add a test for -# every command, use all lowercase here and test against command_name.lower(). -_SENSITIVE_COMMANDS = set( - [ - "authenticate", - "saslstart", - "saslcontinue", - "getnonce", - "createuser", - "updateuser", - "copydbgetnonce", - "copydbsaslstart", - "copydb", - ] -) - - # The "hello" command is also deemed sensitive when attempting speculative # authentication. -def _is_speculative_authenticate(command_name, doc): +def _is_speculative_authenticate(command_name: str, doc: Mapping[str, Any]) -> bool: if ( command_name.lower() in ("hello", HelloCompat.LEGACY_CMD) and "speculativeAuthenticate" in doc @@ -551,10 +523,18 @@ def _is_speculative_authenticate(command_name, doc): return False -class _CommandEvent(object): +class _CommandEvent: """Base class for command events.""" - __slots__ = ("__cmd_name", "__rqst_id", "__conn_id", "__op_id", "__service_id") + __slots__ = ( + "__cmd_name", + "__rqst_id", + "__conn_id", + "__op_id", + "__service_id", + "__db", + "__server_conn_id", + ) def __init__( self, @@ -563,12 +543,16 @@ def __init__( connection_id: _Address, operation_id: Optional[int], service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: self.__cmd_name = command_name self.__rqst_id = request_id self.__conn_id = connection_id self.__op_id = operation_id self.__service_id = service_id + self.__db = database_name + self.__server_conn_id = server_connection_id @property def command_name(self) -> str: @@ -598,21 +582,36 @@ def operation_id(self) -> Optional[int]: """An id for this series of events or None.""" return self.__op_id + @property + def database_name(self) -> str: + """The database_name this command was sent to, or ``""``. + + .. versionadded:: 4.6 + """ + return self.__db + + @property + def server_connection_id(self) -> Optional[int]: + """The server-side connection id for the connection this command was sent on, or ``None``. + + .. versionadded:: 4.7 + """ + return self.__server_conn_id + class CommandStartedEvent(_CommandEvent): """Event published when a command starts. - :Parameters: - - `command`: The command document. - - `database_name`: The name of the database this command was run against. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param command: The command document. + :param database_name: The name of the database this command was run against. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. """ - __slots__ = ("__cmd", "__db") + __slots__ = ("__cmd",) def __init__( self, @@ -622,20 +621,26 @@ def __init__( connection_id: _Address, operation_id: Optional[int], service_id: Optional[ObjectId] = None, + server_connection_id: Optional[int] = None, ) -> None: if not command: - raise ValueError("%r is not a valid command" % (command,)) + raise ValueError(f"{command!r} is not a valid command") # Command name must be first key. command_name = next(iter(command)) - super(CommandStartedEvent, self).__init__( - command_name, request_id, connection_id, operation_id, service_id=service_id + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, ) cmd_name = command_name.lower() if cmd_name in _SENSITIVE_COMMANDS or _is_speculative_authenticate(cmd_name, command): self.__cmd: _DocumentOut = {} else: self.__cmd = command - self.__db = database_name @property def command(self) -> _DocumentOut: @@ -645,31 +650,34 @@ def command(self) -> _DocumentOut: @property def database_name(self) -> str: """The name of the database this command was run against.""" - return self.__db + return super().database_name - def __repr__(self): - return ("<%s %s db: %r, command: %r, operation_id: %s, service_id: %s>") % ( + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, service_id: {}, server_connection_id: {}>" + ).format( self.__class__.__name__, self.connection_id, self.database_name, self.command_name, self.operation_id, self.service_id, + self.server_connection_id, ) class CommandSucceededEvent(_CommandEvent): """Event published when a command succeeds. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `reply`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ __slots__ = ("__duration_micros", "__reply") @@ -683,9 +691,17 @@ def __init__( connection_id: _Address, operation_id: Optional[int], service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: - super(CommandSucceededEvent, self).__init__( - command_name, request_id, connection_id, operation_id, service_id=service_id + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, ) self.__duration_micros = _to_micros(duration) cmd_name = command_name.lower() @@ -704,29 +720,33 @@ def reply(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__reply - def __repr__(self): - return ("<%s %s command: %r, operation_id: %s, duration_micros: %s, service_id: %s>") % ( + def __repr__(self) -> str: + return ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, service_id: {}, server_connection_id: {}>" + ).format( self.__class__.__name__, self.connection_id, + self.database_name, self.command_name, self.operation_id, self.duration_micros, self.service_id, + self.server_connection_id, ) class CommandFailedEvent(_CommandEvent): """Event published when a command fails. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `failure`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this command + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `operation_id`: An optional identifier for a series of related events. - - `service_id`: The service_id this command was sent to, or ``None``. + :param operation_id: An optional identifier for a series of related events. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ __slots__ = ("__duration_micros", "__failure") @@ -740,9 +760,17 @@ def __init__( connection_id: _Address, operation_id: Optional[int], service_id: Optional[ObjectId] = None, + database_name: str = "", + server_connection_id: Optional[int] = None, ) -> None: - super(CommandFailedEvent, self).__init__( - command_name, request_id, connection_id, operation_id, service_id=service_id + super().__init__( + command_name, + request_id, + connection_id, + operation_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, ) self.__duration_micros = _to_micros(duration) self.__failure = failure @@ -757,22 +785,24 @@ def failure(self) -> _DocumentOut: """The server failure document for this operation.""" return self.__failure - def __repr__(self): + def __repr__(self) -> str: return ( - "<%s %s command: %r, operation_id: %s, duration_micros: %s, " - "failure: %r, service_id: %s>" - ) % ( + "<{} {} db: {!r}, command: {!r}, operation_id: {}, duration_micros: {}, " + "failure: {!r}, service_id: {}, server_connection_id: {}>" + ).format( self.__class__.__name__, self.connection_id, + self.database_name, self.command_name, self.operation_id, self.duration_micros, self.failure, self.service_id, + self.server_connection_id, ) -class _PoolEvent(object): +class _PoolEvent: """Base class for pool events.""" __slots__ = ("__address",) @@ -787,15 +817,14 @@ def address(self) -> _Address: """ return self.__address - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" class PoolCreatedEvent(_PoolEvent): """Published when a Connection Pool is created. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 3.9 @@ -803,24 +832,23 @@ class PoolCreatedEvent(_PoolEvent): __slots__ = ("__options",) - def __init__(self, address: _Address, options: Dict[str, Any]) -> None: - super(PoolCreatedEvent, self).__init__(address) + def __init__(self, address: _Address, options: dict[str, Any]) -> None: + super().__init__(address) self.__options = options @property - def options(self) -> Dict[str, Any]: + def options(self) -> dict[str, Any]: """Any non-default pool options that were set on this Connection Pool.""" return self.__options - def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__options) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__options!r})" class PoolReadyEvent(_PoolEvent): """Published when a Connection Pool is marked ready. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 4.0 @@ -832,19 +860,25 @@ class PoolReadyEvent(_PoolEvent): class PoolClearedEvent(_PoolEvent): """Published when a Connection Pool is cleared. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. - - `service_id`: The service_id this command was sent to, or ``None``. + :param service_id: The service_id this command was sent to, or ``None``. + :param interrupt_connections: True if all active connections were interrupted by the Pool during clearing. .. versionadded:: 3.9 """ - __slots__ = ("__service_id",) + __slots__ = ("__service_id", "__interrupt_connections") - def __init__(self, address: _Address, service_id: Optional[ObjectId] = None) -> None: - super(PoolClearedEvent, self).__init__(address) + def __init__( + self, + address: _Address, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + super().__init__(address) self.__service_id = service_id + self.__interrupt_connections = interrupt_connections @property def service_id(self) -> Optional[ObjectId]: @@ -856,15 +890,22 @@ def service_id(self) -> Optional[ObjectId]: """ return self.__service_id - def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.address, self.__service_id) + @property + def interrupt_connections(self) -> bool: + """If True, active connections are interrupted during clearing. + + .. versionadded:: 4.7 + """ + return self.__interrupt_connections + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__service_id!r}, {self.__interrupt_connections!r})" class PoolClosedEvent(_PoolEvent): """Published when a Connection Pool is closed. - :Parameters: - - `address`: The address (host, port) pair of the server this Pool is + :param address: The address (host, port) pair of the server this Pool is attempting to connect to. .. versionadded:: 3.9 @@ -873,7 +914,7 @@ class PoolClosedEvent(_PoolEvent): __slots__ = () -class ConnectionClosedReason(object): +class ConnectionClosedReason: """An enum that defines values for `reason` on a :class:`ConnectionClosedEvent`. @@ -894,7 +935,7 @@ class ConnectionClosedReason(object): """The pool was closed, making the connection no longer valid.""" -class ConnectionCheckOutFailedReason(object): +class ConnectionCheckOutFailedReason: """An enum that defines values for `reason` on a :class:`ConnectionCheckOutFailedEvent`. @@ -913,14 +954,13 @@ class ConnectionCheckOutFailedReason(object): """ -class _ConnectionEvent(object): - """Private base class for some connection events.""" +class _ConnectionEvent: + """Private base class for connection events.""" - __slots__ = ("__address", "__connection_id") + __slots__ = ("__address",) - def __init__(self, address: _Address, connection_id: int) -> None: + def __init__(self, address: _Address) -> None: self.__address = address - self.__connection_id = connection_id @property def address(self) -> _Address: @@ -929,25 +969,58 @@ def address(self) -> _Address: """ return self.__address + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__address!r})" + + +class _ConnectionIdEvent(_ConnectionEvent): + """Private base class for connection events with an id.""" + + __slots__ = ("__connection_id",) + + def __init__(self, address: _Address, connection_id: int) -> None: + super().__init__(address) + self.__connection_id = connection_id + @property def connection_id(self) -> int: - """The ID of the Connection.""" + """The ID of the connection.""" return self.__connection_id - def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__connection_id) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__connection_id!r})" + + +class _ConnectionDurationEvent(_ConnectionIdEvent): + """Private base class for connection events with a duration.""" + + __slots__ = ("__duration",) + + def __init__(self, address: _Address, connection_id: int, duration: Optional[float]) -> None: + super().__init__(address, connection_id) + self.__duration = duration + + @property + def duration(self) -> Optional[float]: + """The duration of the connection event. + + .. versionadded:: 4.7 + """ + return self.__duration + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.connection_id!r}, {self.__duration!r})" -class ConnectionCreatedEvent(_ConnectionEvent): + +class ConnectionCreatedEvent(_ConnectionIdEvent): """Published when a Connection Pool creates a Connection object. NOTE: This connection is not ready for use until the :class:`ConnectionReadyEvent` is published. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -955,13 +1028,12 @@ class ConnectionCreatedEvent(_ConnectionEvent): __slots__ = () -class ConnectionReadyEvent(_ConnectionEvent): +class ConnectionReadyEvent(_ConnectionDurationEvent): """Published when a Connection has finished its setup, and is ready to use. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -969,26 +1041,25 @@ class ConnectionReadyEvent(_ConnectionEvent): __slots__ = () -class ConnectionClosedEvent(_ConnectionEvent): +class ConnectionClosedEvent(_ConnectionIdEvent): """Published when a Connection is closed. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. - - `reason`: A reason explaining why this connection was closed. + :param connection_id: The integer ID of the Connection in this Pool. + :param reason: A reason explaining why this connection was closed. .. versionadded:: 3.9 """ __slots__ = ("__reason",) - def __init__(self, address, connection_id, reason): - super(ConnectionClosedEvent, self).__init__(address, connection_id) + def __init__(self, address: _Address, connection_id: int, reason: str): + super().__init__(address, connection_id) self.__reason = reason @property - def reason(self): + def reason(self) -> str: """A reason explaining why this connection was closed. The reason must be one of the strings from the @@ -996,8 +1067,8 @@ def reason(self): """ return self.__reason - def __repr__(self): - return "%s(%r, %r, %r)" % ( + def __repr__(self) -> str: + return "{}({!r}, {!r}, {!r})".format( self.__class__.__name__, self.address, self.connection_id, @@ -1005,56 +1076,34 @@ def __repr__(self): ) -class ConnectionCheckOutStartedEvent(object): +class ConnectionCheckOutStartedEvent(_ConnectionEvent): """Published when the driver starts attempting to check out a connection. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. .. versionadded:: 3.9 """ - __slots__ = ("__address",) - - def __init__(self, address): - self.__address = address - - @property - def address(self): - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - - def __repr__(self): - return "%s(%r)" % (self.__class__.__name__, self.__address) + __slots__ = () -class ConnectionCheckOutFailedEvent(object): +class ConnectionCheckOutFailedEvent(_ConnectionDurationEvent): """Published when the driver's attempt to check out a connection fails. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `reason`: A reason explaining why connection check out failed. + :param reason: A reason explaining why connection check out failed. .. versionadded:: 3.9 """ - __slots__ = ("__address", "__reason") + __slots__ = ("__reason",) - def __init__(self, address: _Address, reason: str) -> None: - self.__address = address + def __init__(self, address: _Address, reason: str, duration: Optional[float]) -> None: + super().__init__(address=address, connection_id=0, duration=duration) self.__reason = reason - @property - def address(self) -> _Address: - """The address (host, port) pair of the server this connection is - attempting to connect to. - """ - return self.__address - @property def reason(self) -> str: """A reason explaining why connection check out failed. @@ -1064,17 +1113,16 @@ def reason(self) -> str: """ return self.__reason - def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, self.__address, self.__reason) + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.address!r}, {self.__reason!r}, {self.duration!r})" -class ConnectionCheckedOutEvent(_ConnectionEvent): - """Published when the driver successfully checks out a Connection. +class ConnectionCheckedOutEvent(_ConnectionDurationEvent): + """Published when the driver successfully checks out a connection. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1082,13 +1130,12 @@ class ConnectionCheckedOutEvent(_ConnectionEvent): __slots__ = () -class ConnectionCheckedInEvent(_ConnectionEvent): +class ConnectionCheckedInEvent(_ConnectionIdEvent): """Published when the driver checks in a Connection into the Pool. - :Parameters: - - `address`: The address (host, port) pair of the server this + :param address: The address (host, port) pair of the server this Connection is attempting to connect to. - - `connection_id`: The integer ID of the Connection in this Pool. + :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9 """ @@ -1096,7 +1143,7 @@ class ConnectionCheckedInEvent(_ConnectionEvent): __slots__ = () -class _ServerEvent(object): +class _ServerEvent: """Base class for server events.""" __slots__ = ("__server_address", "__topology_id") @@ -1115,12 +1162,8 @@ def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id - def __repr__(self): - return "<%s %s topology_id: %s>" % ( - self.__class__.__name__, - self.server_address, - self.topology_id, - ) + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.server_address} topology_id: {self.topology_id}>" class ServerDescriptionChangedEvent(_ServerEvent): @@ -1133,28 +1176,30 @@ class ServerDescriptionChangedEvent(_ServerEvent): def __init__( self, - previous_description: "ServerDescription", - new_description: "ServerDescription", - *args: Any + previous_description: ServerDescription, + new_description: ServerDescription, + *args: Any, ) -> None: - super(ServerDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self) -> "ServerDescription": + def previous_description(self) -> ServerDescription: """The previous - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__previous_description @property - def new_description(self) -> "ServerDescription": + def new_description(self) -> ServerDescription: """The new - :class:`~pymongo.server_description.ServerDescription`.""" + :class:`~pymongo.server_description.ServerDescription`. + """ return self.__new_description - def __repr__(self): - return "<%s %s changed from: %s, to: %s>" % ( + def __repr__(self) -> str: + return "<{} {} changed from: {}, to: {}>".format( self.__class__.__name__, self.server_address, self.previous_description, @@ -1180,10 +1225,10 @@ class ServerClosedEvent(_ServerEvent): __slots__ = () -class TopologyEvent(object): +class TopologyEvent: """Base class for topology description events.""" - __slots__ = "__topology_id" + __slots__ = ("__topology_id",) def __init__(self, topology_id: ObjectId) -> None: self.__topology_id = topology_id @@ -1193,8 +1238,8 @@ def topology_id(self) -> ObjectId: """A unique identifier for the topology this server is a part of.""" return self.__topology_id - def __repr__(self): - return "<%s topology_id: %s>" % (self.__class__.__name__, self.topology_id) + def __repr__(self) -> str: + return f"<{self.__class__.__name__} topology_id: {self.topology_id}>" class TopologyDescriptionChangedEvent(TopologyEvent): @@ -1207,28 +1252,30 @@ class TopologyDescriptionChangedEvent(TopologyEvent): def __init__( self, - previous_description: "TopologyDescription", - new_description: "TopologyDescription", - *args: Any + previous_description: TopologyDescription, + new_description: TopologyDescription, + *args: Any, ) -> None: - super(TopologyDescriptionChangedEvent, self).__init__(*args) + super().__init__(*args) self.__previous_description = previous_description self.__new_description = new_description @property - def previous_description(self) -> "TopologyDescription": + def previous_description(self) -> TopologyDescription: """The previous - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__previous_description @property - def new_description(self) -> "TopologyDescription": + def new_description(self) -> TopologyDescription: """The new - :class:`~pymongo.topology_description.TopologyDescription`.""" + :class:`~pymongo.topology_description.TopologyDescription`. + """ return self.__new_description - def __repr__(self): - return "<%s topology_id: %s changed from: %s, to: %s>" % ( + def __repr__(self) -> str: + return "<{} topology_id: {} changed from: {}, to: {}>".format( self.__class__.__name__, self.topology_id, self.previous_description, @@ -1254,22 +1301,32 @@ class TopologyClosedEvent(TopologyEvent): __slots__ = () -class _ServerHeartbeatEvent(object): +class _ServerHeartbeatEvent: """Base class for server heartbeat events.""" - __slots__ = "__connection_id" + __slots__ = ("__connection_id", "__awaited") - def __init__(self, connection_id: _Address) -> None: + def __init__(self, connection_id: _Address, awaited: bool = False) -> None: self.__connection_id = connection_id + self.__awaited = awaited @property def connection_id(self) -> _Address: """The address (host, port) of the server this heartbeat was sent - to.""" + to. + """ return self.__connection_id - def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, self.connection_id) + @property + def awaited(self) -> bool: + """Whether the heartbeat was issued as an awaitable hello command. + + .. versionadded:: 4.6 + """ + return self.__awaited + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.connection_id} awaited: {self.awaited}>" class ServerHeartbeatStartedEvent(_ServerHeartbeatEvent): @@ -1287,15 +1344,18 @@ class ServerHeartbeatSucceededEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ("__duration", "__reply", "__awaited") + __slots__ = ("__duration", "__reply") def __init__( - self, duration: float, reply: Hello, connection_id: _Address, awaited: bool = False + self, + duration: float, + reply: Hello[dict[str, Any]], + connection_id: _Address, + awaited: bool = False, ) -> None: - super(ServerHeartbeatSucceededEvent, self).__init__(connection_id) + super().__init__(connection_id, awaited) self.__duration = duration self.__reply = reply - self.__awaited = awaited @property def duration(self) -> float: @@ -1303,7 +1363,7 @@ def duration(self) -> float: return self.__duration @property - def reply(self) -> Hello: + def reply(self) -> Hello[dict[str, Any]]: """An instance of :class:`~pymongo.hello.Hello`.""" return self.__reply @@ -1314,11 +1374,13 @@ def awaited(self) -> bool: If true, then :meth:`duration` reflects the sum of the round trip time to the server and the time that the server waited before sending a response. + + .. versionadded:: 3.11 """ - return self.__awaited + return super().awaited - def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %s>" % ( + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1334,15 +1396,14 @@ class ServerHeartbeatFailedEvent(_ServerHeartbeatEvent): .. versionadded:: 3.3 """ - __slots__ = ("__duration", "__reply", "__awaited") + __slots__ = ("__duration", "__reply") def __init__( self, duration: float, reply: Exception, connection_id: _Address, awaited: bool = False ) -> None: - super(ServerHeartbeatFailedEvent, self).__init__(connection_id) + super().__init__(connection_id, awaited) self.__duration = duration self.__reply = reply - self.__awaited = awaited @property def duration(self) -> float: @@ -1361,11 +1422,13 @@ def awaited(self) -> bool: If true, then :meth:`duration` reflects the sum of the round trip time to the server and the time that the server waited before sending a response. + + .. versionadded:: 3.11 """ - return self.__awaited + return super().awaited - def __repr__(self): - return "<%s %s duration: %s, awaited: %s, reply: %r>" % ( + def __repr__(self) -> str: + return "<{} {} duration: {}, awaited: {}, reply: {!r}>".format( self.__class__.__name__, self.connection_id, self.duration, @@ -1374,16 +1437,15 @@ def __repr__(self): ) -class _EventListeners(object): +class _EventListeners: """Configure event listeners for a client instance. Any event listeners registered globally are included by default. - :Parameters: - - `listeners`: A list of event listeners. + :param listeners: A list of event listeners. """ - def __init__(self, listeners): + def __init__(self, listeners: Optional[Sequence[_EventListener]]): self.__command_listeners = _LISTENERS.command_listeners[:] self.__server_listeners = _LISTENERS.server_listeners[:] lst = _LISTENERS.server_heartbeat_listeners @@ -1409,31 +1471,31 @@ def __init__(self, listeners): self.__enabled_for_cmap = bool(self.__cmap_listeners) @property - def enabled_for_commands(self): + def enabled_for_commands(self) -> bool: """Are any CommandListener instances registered?""" return self.__enabled_for_commands @property - def enabled_for_server(self): + def enabled_for_server(self) -> bool: """Are any ServerListener instances registered?""" return self.__enabled_for_server @property - def enabled_for_server_heartbeat(self): + def enabled_for_server_heartbeat(self) -> bool: """Are any ServerHeartbeatListener instances registered?""" return self.__enabled_for_server_heartbeat @property - def enabled_for_topology(self): + def enabled_for_topology(self) -> bool: """Are any TopologyListener instances registered?""" return self.__enabled_for_topology @property - def enabled_for_cmap(self): + def enabled_for_cmap(self) -> bool: """Are any ConnectionPoolListener instances registered?""" return self.__enabled_for_cmap - def event_listeners(self): + def event_listeners(self) -> list[_EventListeners]: """List of registered event listeners.""" return ( self.__command_listeners @@ -1444,24 +1506,36 @@ def event_listeners(self): ) def publish_command_start( - self, command, database_name, request_id, connection_id, op_id=None, service_id=None - ): + self, + command: _DocumentOut, + database_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + ) -> None: """Publish a CommandStartedEvent to all command listeners. - :Parameters: - - `command`: The command document. - - `database_name`: The name of the database this command was run + :param command: The command document. + :param database_name: The name of the database this command was run against. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. """ if op_id is None: op_id = request_id event = CommandStartedEvent( - command, database_name, request_id, connection_id, op_id, service_id=service_id + command, + database_name, + request_id, + connection_id, + op_id, + service_id=service_id, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1471,27 +1545,29 @@ def publish_command_start( def publish_command_success( self, - duration, - reply, - command_name, - request_id, - connection_id, - op_id=None, - service_id=None, - speculative_hello=False, - ): + duration: timedelta, + reply: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + speculative_hello: bool = False, + database_name: str = "", + ) -> None: """Publish a CommandSucceededEvent to all command listeners. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `reply`: The server reply document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param duration: The command duration as a datetime.timedelta. + :param reply: The server reply document. + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. - - `speculative_hello`: Was the command sent with speculative auth? + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param speculative_hello: Was the command sent with speculative auth? + :param database_name: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id @@ -1500,7 +1576,15 @@ def publish_command_success( # speculativeAuthenticate. reply = {} event = CommandSucceededEvent( - duration, reply, command_name, request_id, connection_id, op_id, service_id + duration, + reply, + command_name, + request_id, + connection_id, + op_id, + service_id, + database_name=database_name, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1510,31 +1594,41 @@ def publish_command_success( def publish_command_failure( self, - duration, - failure, - command_name, - request_id, - connection_id, - op_id=None, - service_id=None, - ): + duration: timedelta, + failure: _DocumentOut, + command_name: str, + request_id: int, + connection_id: _Address, + server_connection_id: Optional[int], + op_id: Optional[int] = None, + service_id: Optional[ObjectId] = None, + database_name: str = "", + ) -> None: """Publish a CommandFailedEvent to all command listeners. - :Parameters: - - `duration`: The command duration as a datetime.timedelta. - - `failure`: The server reply document or failure description + :param duration: The command duration as a datetime.timedelta. + :param failure: The server reply document or failure description document. - - `command_name`: The command name. - - `request_id`: The request id for this operation. - - `connection_id`: The address (host, port) of the server this + :param command_name: The command name. + :param request_id: The request id for this operation. + :param connection_id: The address (host, port) of the server this command was sent to. - - `op_id`: The (optional) operation id for this operation. - - `service_id`: The service_id this command was sent to, or ``None``. + :param op_id: The (optional) operation id for this operation. + :param service_id: The service_id this command was sent to, or ``None``. + :param database_name: The database this command was sent to, or ``""``. """ if op_id is None: op_id = request_id event = CommandFailedEvent( - duration, failure, command_name, request_id, connection_id, op_id, service_id=service_id + duration, + failure, + command_name, + request_id, + connection_id, + op_id, + service_id=service_id, + database_name=database_name, + server_connection_id=server_connection_id, ) for subscriber in self.__command_listeners: try: @@ -1542,30 +1636,31 @@ def publish_command_failure( except Exception: _handle_exception() - def publish_server_heartbeat_started(self, connection_id): + def publish_server_heartbeat_started(self, connection_id: _Address, awaited: bool) -> None: """Publish a ServerHeartbeatStartedEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. + :param connection_id: The address (host, port) pair of the connection. + :param awaited: True if this heartbeat is part of an awaitable hello command. """ - event = ServerHeartbeatStartedEvent(connection_id) + event = ServerHeartbeatStartedEvent(connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: try: subscriber.started(event) except Exception: _handle_exception() - def publish_server_heartbeat_succeeded(self, connection_id, duration, reply, awaited): + def publish_server_heartbeat_succeeded( + self, connection_id: _Address, duration: float, reply: Hello[dict[str, Any]], awaited: bool + ) -> None: """Publish a ServerHeartbeatSucceededEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. - - `duration`: The execution time of the event in the highest possible + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible resolution for the platform. - - `reply`: The command reply. - - `awaited`: True if the response was awaited. + :param reply: The command reply. + :param awaited: True if the response was awaited. """ event = ServerHeartbeatSucceededEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: @@ -1574,16 +1669,17 @@ def publish_server_heartbeat_succeeded(self, connection_id, duration, reply, awa except Exception: _handle_exception() - def publish_server_heartbeat_failed(self, connection_id, duration, reply, awaited): + def publish_server_heartbeat_failed( + self, connection_id: _Address, duration: float, reply: Exception, awaited: bool + ) -> None: """Publish a ServerHeartbeatFailedEvent to all server heartbeat listeners. - :Parameters: - - `connection_id`: The address (host, port) pair of the connection. - - `duration`: The execution time of the event in the highest possible + :param connection_id: The address (host, port) pair of the connection. + :param duration: The execution time of the event in the highest possible resolution for the platform. - - `reply`: The command reply. - - `awaited`: True if the response was awaited. + :param reply: The command reply. + :param awaited: True if the response was awaited. """ event = ServerHeartbeatFailedEvent(duration, reply, connection_id, awaited) for subscriber in self.__server_heartbeat_listeners: @@ -1592,12 +1688,11 @@ def publish_server_heartbeat_failed(self, connection_id, duration, reply, awaite except Exception: _handle_exception() - def publish_server_opened(self, server_address, topology_id): + def publish_server_opened(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerOpeningEvent to all server listeners. - :Parameters: - - `server_address`: The address (host, port) pair of the server. - - `topology_id`: A unique identifier for the topology this server + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerOpeningEvent(server_address, topology_id) @@ -1607,12 +1702,11 @@ def publish_server_opened(self, server_address, topology_id): except Exception: _handle_exception() - def publish_server_closed(self, server_address, topology_id): + def publish_server_closed(self, server_address: _Address, topology_id: ObjectId) -> None: """Publish a ServerClosedEvent to all server listeners. - :Parameters: - - `server_address`: The address (host, port) pair of the server. - - `topology_id`: A unique identifier for the topology this server + :param server_address: The address (host, port) pair of the server. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerClosedEvent(server_address, topology_id) @@ -1623,15 +1717,18 @@ def publish_server_closed(self, server_address, topology_id): _handle_exception() def publish_server_description_changed( - self, previous_description, new_description, server_address, topology_id - ): + self, + previous_description: ServerDescription, + new_description: ServerDescription, + server_address: _Address, + topology_id: ObjectId, + ) -> None: """Publish a ServerDescriptionChangedEvent to all server listeners. - :Parameters: - - `previous_description`: The previous server description. - - `server_address`: The address (host, port) pair of the server. - - `new_description`: The new server description. - - `topology_id`: A unique identifier for the topology this server + :param previous_description: The previous server description. + :param server_address: The address (host, port) pair of the server. + :param new_description: The new server description. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = ServerDescriptionChangedEvent( @@ -1643,11 +1740,10 @@ def publish_server_description_changed( except Exception: _handle_exception() - def publish_topology_opened(self, topology_id): + def publish_topology_opened(self, topology_id: ObjectId) -> None: """Publish a TopologyOpenedEvent to all topology listeners. - :Parameters: - - `topology_id`: A unique identifier for the topology this server + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyOpenedEvent(topology_id) @@ -1657,11 +1753,10 @@ def publish_topology_opened(self, topology_id): except Exception: _handle_exception() - def publish_topology_closed(self, topology_id): + def publish_topology_closed(self, topology_id: ObjectId) -> None: """Publish a TopologyClosedEvent to all topology listeners. - :Parameters: - - `topology_id`: A unique identifier for the topology this server + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyClosedEvent(topology_id) @@ -1672,14 +1767,16 @@ def publish_topology_closed(self, topology_id): _handle_exception() def publish_topology_description_changed( - self, previous_description, new_description, topology_id - ): + self, + previous_description: TopologyDescription, + new_description: TopologyDescription, + topology_id: ObjectId, + ) -> None: """Publish a TopologyDescriptionChangedEvent to all topology listeners. - :Parameters: - - `previous_description`: The previous topology description. - - `new_description`: The new topology description. - - `topology_id`: A unique identifier for the topology this server + :param previous_description: The previous topology description. + :param new_description: The new topology description. + :param topology_id: A unique identifier for the topology this server is a part of. """ event = TopologyDescriptionChangedEvent(previous_description, new_description, topology_id) @@ -1689,7 +1786,7 @@ def publish_topology_description_changed( except Exception: _handle_exception() - def publish_pool_created(self, address, options): + def publish_pool_created(self, address: _Address, options: dict[str, Any]) -> None: """Publish a :class:`PoolCreatedEvent` to all pool listeners.""" event = PoolCreatedEvent(address, options) for subscriber in self.__cmap_listeners: @@ -1698,7 +1795,7 @@ def publish_pool_created(self, address, options): except Exception: _handle_exception() - def publish_pool_ready(self, address): + def publish_pool_ready(self, address: _Address) -> None: """Publish a :class:`PoolReadyEvent` to all pool listeners.""" event = PoolReadyEvent(address) for subscriber in self.__cmap_listeners: @@ -1707,16 +1804,21 @@ def publish_pool_ready(self, address): except Exception: _handle_exception() - def publish_pool_cleared(self, address, service_id): + def publish_pool_cleared( + self, + address: _Address, + service_id: Optional[ObjectId], + interrupt_connections: bool = False, + ) -> None: """Publish a :class:`PoolClearedEvent` to all pool listeners.""" - event = PoolClearedEvent(address, service_id) + event = PoolClearedEvent(address, service_id, interrupt_connections) for subscriber in self.__cmap_listeners: try: subscriber.pool_cleared(event) except Exception: _handle_exception() - def publish_pool_closed(self, address): + def publish_pool_closed(self, address: _Address) -> None: """Publish a :class:`PoolClosedEvent` to all pool listeners.""" event = PoolClosedEvent(address) for subscriber in self.__cmap_listeners: @@ -1725,7 +1827,7 @@ def publish_pool_closed(self, address): except Exception: _handle_exception() - def publish_connection_created(self, address, connection_id): + def publish_connection_created(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCreatedEvent` to all connection listeners. """ @@ -1736,16 +1838,18 @@ def publish_connection_created(self, address, connection_id): except Exception: _handle_exception() - def publish_connection_ready(self, address, connection_id): + def publish_connection_ready( + self, address: _Address, connection_id: int, duration: float + ) -> None: """Publish a :class:`ConnectionReadyEvent` to all connection listeners.""" - event = ConnectionReadyEvent(address, connection_id) + event = ConnectionReadyEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_ready(event) except Exception: _handle_exception() - def publish_connection_closed(self, address, connection_id, reason): + def publish_connection_closed(self, address: _Address, connection_id: int, reason: str) -> None: """Publish a :class:`ConnectionClosedEvent` to all connection listeners. """ @@ -1756,7 +1860,7 @@ def publish_connection_closed(self, address, connection_id, reason): except Exception: _handle_exception() - def publish_connection_check_out_started(self, address): + def publish_connection_check_out_started(self, address: _Address) -> None: """Publish a :class:`ConnectionCheckOutStartedEvent` to all connection listeners. """ @@ -1767,29 +1871,33 @@ def publish_connection_check_out_started(self, address): except Exception: _handle_exception() - def publish_connection_check_out_failed(self, address, reason): + def publish_connection_check_out_failed( + self, address: _Address, reason: str, duration: float + ) -> None: """Publish a :class:`ConnectionCheckOutFailedEvent` to all connection listeners. """ - event = ConnectionCheckOutFailedEvent(address, reason) + event = ConnectionCheckOutFailedEvent(address, reason, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_check_out_failed(event) except Exception: _handle_exception() - def publish_connection_checked_out(self, address, connection_id): + def publish_connection_checked_out( + self, address: _Address, connection_id: int, duration: float + ) -> None: """Publish a :class:`ConnectionCheckedOutEvent` to all connection listeners. """ - event = ConnectionCheckedOutEvent(address, connection_id) + event = ConnectionCheckedOutEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_checked_out(event) except Exception: _handle_exception() - def publish_connection_checked_in(self, address, connection_id): + def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: """Publish a :class:`ConnectionCheckedInEvent` to all connection listeners. """ diff --git a/pymongo/network.py b/pymongo/network.py deleted file mode 100644 index a5c5459e14..0000000000 --- a/pymongo/network.py +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Internal network layer helper methods.""" - -import datetime -import errno -import socket -import struct -import time - -from bson import _decode_all_selective -from pymongo import _csot, helpers, message, ssl_support -from pymongo.common import MAX_MESSAGE_SIZE -from pymongo.compression_support import _NO_COMPRESSION, decompress -from pymongo.errors import ( - NotPrimaryError, - OperationFailure, - ProtocolError, - _OperationCancelled, -) -from pymongo.message import _UNPACK_REPLY, _OpMsg -from pymongo.monitoring import _is_speculative_authenticate -from pymongo.socket_checker import _errno_from_exception - -_UNPACK_HEADER = struct.Struct(" max_bson_size: - message._raise_document_too_large(name, size, max_bson_size) - else: - request_id, msg, size = message._query( - 0, ns, 0, -1, spec, None, codec_options, compression_ctx - ) - - if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: - message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) - - if publish: - encoding_duration = datetime.datetime.now() - start - listeners.publish_command_start( - orig, dbname, request_id, address, service_id=sock_info.service_id - ) - start = datetime.datetime.now() - - try: - sock_info.sock.sendall(msg) - if use_op_msg and unacknowledged: - # Unacknowledged, fake a successful command response. - reply = None - response_doc = {"ok": 1} - else: - reply = receive_message(sock_info, request_id) - sock_info.more_to_come = reply.more_to_come - unpacked_docs = reply.unpack_response( - codec_options=codec_options, user_fields=user_fields - ) - - response_doc = unpacked_docs[0] - if client: - client._process_response(response_doc, session) - if check: - helpers._check_command_response( - response_doc, - sock_info.max_wire_version, - allowable_errors, - parse_write_concern_error=parse_write_concern_error, - ) - except Exception as exc: - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure = exc.details - else: - failure = message._convert_exception(exc) - listeners.publish_command_failure( - duration, failure, name, request_id, address, service_id=sock_info.service_id - ) - raise - if publish: - duration = (datetime.datetime.now() - start) + encoding_duration - listeners.publish_command_success( - duration, - response_doc, - name, - request_id, - address, - service_id=sock_info.service_id, - speculative_hello=speculative_hello, - ) - - if client and client._encrypter and reply: - decrypted = client._encrypter.decrypt(reply.raw_command_response()) - response_doc = _decode_all_selective(decrypted, codec_options, user_fields)[0] - - return response_doc - - -_UNPACK_COMPRESSION_HEADER = struct.Struct(" max_message_size: - raise ProtocolError( - "Message length (%r) is larger than server max " - "message size (%r)" % (length, max_message_size) - ) - if op_code == 2012: - op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER( - _receive_data_on_socket(sock_info, 9, deadline) - ) - data = decompress(_receive_data_on_socket(sock_info, length - 25, deadline), compressor_id) - else: - data = _receive_data_on_socket(sock_info, length - 16, deadline) - - try: - unpack_reply = _UNPACK_REPLY[op_code] - except KeyError: - raise ProtocolError("Got opcode %r but expected %r" % (op_code, _UNPACK_REPLY.keys())) - return unpack_reply(data) - - -_POLL_TIMEOUT = 0.5 - - -def wait_for_read(sock_info, deadline): - """Block until at least one byte is read, or a timeout, or a cancel.""" - context = sock_info.cancel_context - # Only Monitor connections can be cancelled. - if context: - sock = sock_info.sock - timed_out = False - while True: - # SSLSocket can have buffered data which won't be caught by select. - if hasattr(sock, "pending") and sock.pending() > 0: - readable = True - else: - # Wait up to 500ms for the socket to become readable and then - # check for cancellation. - if deadline: - remaining = deadline - time.monotonic() - # When the timeout has expired perform one final check to - # see if the socket is readable. This helps avoid spurious - # timeouts on AWS Lambda and other FaaS environments. - if remaining <= 0: - timed_out = True - timeout = max(min(remaining, _POLL_TIMEOUT), 0) - else: - timeout = _POLL_TIMEOUT - readable = sock_info.socket_checker.select(sock, read=True, timeout=timeout) - if context.cancelled: - raise _OperationCancelled("hello cancelled") - if readable: - return - if timed_out: - raise socket.timeout("timed out") - - -# Errors raised by sockets (and TLS sockets) when in non-blocking mode. -BLOCKING_IO_ERRORS = (BlockingIOError,) + ssl_support.BLOCKING_IO_ERRORS - - -def _receive_data_on_socket(sock_info, length, deadline): - buf = bytearray(length) - mv = memoryview(buf) - bytes_read = 0 - while bytes_read < length: - try: - wait_for_read(sock_info, deadline) - # CSOT: Update timeout. When the timeout has expired perform one - # final non-blocking recv. This helps avoid spurious timeouts when - # the response is actually already buffered on the client. - if _csot.get_timeout(): - sock_info.set_socket_timeout(max(deadline - time.monotonic(), 0)) - chunk_length = sock_info.sock.recv_into(mv[bytes_read:]) - except BLOCKING_IO_ERRORS: - raise socket.timeout("timed out") - except (IOError, OSError) as exc: # noqa: B014 - if _errno_from_exception(exc) == errno.EINTR: - continue - raise - if chunk_length == 0: - raise OSError("connection closed") - - bytes_read += chunk_length - - return mv diff --git a/pymongo/network_layer.py b/pymongo/network_layer.py new file mode 100644 index 0000000000..7c62a251f8 --- /dev/null +++ b/pymongo/network_layer.py @@ -0,0 +1,786 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import asyncio +import collections +import errno +import socket +import struct +import sys +import time +from asyncio import AbstractEventLoop, BaseTransport, BufferedProtocol, Future, Transport +from typing import ( + TYPE_CHECKING, + Any, + Optional, + Union, +) + +from pymongo import _csot, ssl_support +from pymongo._asyncio_task import create_task +from pymongo.common import MAX_MESSAGE_SIZE +from pymongo.compression_support import decompress +from pymongo.errors import ProtocolError, _OperationCancelled +from pymongo.message import _UNPACK_REPLY, _OpMsg, _OpReply +from pymongo.socket_checker import _errno_from_exception + +try: + from ssl import SSLError, SSLSocket + + _HAVE_SSL = True +except ImportError: + _HAVE_SSL = False + +try: + from pymongo.pyopenssl_context import _sslConn + + _HAVE_PYOPENSSL = True +except ImportError: + _HAVE_PYOPENSSL = False + _sslConn = SSLSocket # type: ignore[assignment, misc] + +from pymongo.ssl_support import ( + BLOCKING_IO_LOOKUP_ERROR, + BLOCKING_IO_READ_ERROR, + BLOCKING_IO_WRITE_ERROR, +) + +if TYPE_CHECKING: + from pymongo.asynchronous.pool import AsyncConnection + from pymongo.synchronous.pool import Connection + +_UNPACK_HEADER = struct.Struct(" None: + timeout = sock.gettimeout() + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + await asyncio.wait_for(_async_socket_sendall_ssl(sock, buf, loop), timeout=timeout) + else: + await asyncio.wait_for(loop.sock_sendall(sock, buf), timeout=timeout) # type: ignore[arg-type] + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + finally: + sock.settimeout(timeout) + + +if sys.platform != "win32": + + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, loop: AbstractEventLoop + ) -> None: + view = memoryview(buf) + sent = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while sent < len(buf): + try: + sent += sock.send(view[sent:]) + except BLOCKING_IO_ERRORS as exc: + fd = sock.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, loop: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + + def _is_ready(fut: Future[Any]) -> None: + if fut.done(): + return + fut.set_result(None) + + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + total_read += read + except BLOCKING_IO_ERRORS as exc: + fd = conn.fileno() + # Check for closed socket. + if fd == -1: + raise SSLError("Underlying socket has been closed") from None + if isinstance(exc, BLOCKING_IO_READ_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_reader(fd) + if isinstance(exc, BLOCKING_IO_WRITE_ERROR): + fut = loop.create_future() + loop.add_writer(fd, _is_ready, fut) + try: + await fut + finally: + loop.remove_writer(fd) + if _HAVE_PYOPENSSL and isinstance(exc, BLOCKING_IO_LOOKUP_ERROR): + fut = loop.create_future() + loop.add_reader(fd, _is_ready, fut) + try: + loop.add_writer(fd, _is_ready, fut) + await fut + finally: + loop.remove_reader(fd) + loop.remove_writer(fd) + return mv + +else: + # The default Windows asyncio event loop does not support loop.add_reader/add_writer: + # https://docs.python.org/3/library/asyncio-platforms.html#asyncio-platform-support + # Note: In PYTHON-4493 we plan to replace this code with asyncio streams. + async def _async_socket_sendall_ssl( + sock: Union[socket.socket, _sslConn], buf: bytes, dummy: AbstractEventLoop + ) -> None: + view = memoryview(buf) + total_length = len(buf) + total_sent = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_sent < total_length: + try: + sent = sock.send(view[total_sent:]) + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + sent = 0 + if sent > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_sent += sent + + async def _async_socket_receive_ssl( + conn: _sslConn, length: int, dummy: AbstractEventLoop, once: Optional[bool] = False + ) -> memoryview: + mv = memoryview(bytearray(length)) + total_read = 0 + # Backoff starts at 1ms, doubles on timeout up to 512ms, and halves on success + # down to 1ms. + backoff = 0.001 + while total_read < length: + try: + read = conn.recv_into(mv[total_read:]) + if read == 0: + raise OSError("connection closed") + # KMS responses update their expected size after the first batch, stop reading after one loop + if once: + return mv[:read] + except BLOCKING_IO_ERRORS: + await asyncio.sleep(backoff) + read = 0 + if read > 0: + backoff = max(backoff / 2, 0.001) + else: + backoff = min(backoff * 2, 0.512) + total_read += read + return mv + + +def sendall(sock: Union[socket.socket, _sslConn], buf: bytes) -> None: + sock.sendall(buf) + + +async def _poll_cancellation(conn: AsyncConnection) -> None: + while True: + if conn.cancel_context.cancelled: + return + + await asyncio.sleep(_POLL_TIMEOUT) + + +async def async_receive_data_socket( + sock: Union[socket.socket, _sslConn], length: int +) -> memoryview: + sock_timeout = sock.gettimeout() + timeout = sock_timeout + + sock.settimeout(0.0) + loop = asyncio.get_running_loop() + try: + if _HAVE_SSL and isinstance(sock, (SSLSocket, _sslConn)): + return await asyncio.wait_for( + _async_socket_receive_ssl(sock, length, loop, once=True), # type: ignore[arg-type] + timeout=timeout, + ) + else: + return await asyncio.wait_for( + _async_socket_receive(sock, length, loop), # type: ignore[arg-type] + timeout=timeout, + ) + except asyncio.TimeoutError as err: + raise socket.timeout("timed out") from err + finally: + sock.settimeout(sock_timeout) + + +async def _async_socket_receive( + conn: socket.socket, length: int, loop: AbstractEventLoop +) -> memoryview: + mv = memoryview(bytearray(length)) + bytes_read = 0 + while bytes_read < length: + chunk_length = await loop.sock_recv_into(conn, mv[bytes_read:]) + if chunk_length == 0: + raise OSError("connection closed") + bytes_read += chunk_length + return mv + + +_PYPY = "PyPy" in sys.version +_WINDOWS = sys.platform == "win32" + + +def wait_for_read(conn: Connection, deadline: Optional[float]) -> None: + """Block until at least one byte is read, or a timeout, or a cancel.""" + sock = conn.conn.sock + timed_out = False + # Check if the connection's socket has been manually closed + if sock.fileno() == -1: + return + while True: + # SSLSocket can have buffered data which won't be caught by select. + if hasattr(sock, "pending") and sock.pending() > 0: + readable = True + else: + # Wait up to 500ms for the socket to become readable and then + # check for cancellation. + if deadline: + remaining = deadline - time.monotonic() + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + if remaining <= 0: + timed_out = True + timeout = max(min(remaining, _POLL_TIMEOUT), 0) + else: + timeout = _POLL_TIMEOUT + readable = conn.socket_checker.select(sock, read=True, timeout=timeout) + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") + if readable: + return + if timed_out: + raise socket.timeout("timed out") + + +def receive_data(conn: Connection, length: int, deadline: Optional[float]) -> memoryview: + buf = bytearray(length) + mv = memoryview(buf) + bytes_read = 0 + # To support cancelling a network read, we shorten the socket timeout and + # check for the cancellation signal after each timeout. Alternatively we + # could close the socket but that does not reliably cancel recv() calls + # on all OSes. + # When the timeout has expired we perform one final non-blocking recv. + # This helps avoid spurious timeouts when the response is actually already + # buffered on the client. + orig_timeout = conn.conn.gettimeout() + try: + while bytes_read < length: + try: + # Use the legacy wait_for_read cancellation approach on PyPy due to PYTHON-5011. + # also use it on Windows due to PYTHON-5405 + if _PYPY or _WINDOWS: + wait_for_read(conn, deadline) + if _csot.get_timeout() and deadline is not None: + conn.set_conn_timeout(max(deadline - time.monotonic(), 0)) + else: + if deadline is not None: + short_timeout = min(max(deadline - time.monotonic(), 0), _POLL_TIMEOUT) + else: + short_timeout = _POLL_TIMEOUT + conn.set_conn_timeout(short_timeout) + + chunk_length = conn.conn.recv_into(mv[bytes_read:]) + except BLOCKING_IO_ERRORS: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + # We reached the true deadline. + raise socket.timeout("timed out") from None + except socket.timeout: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + if ( + _PYPY + or _WINDOWS + or not conn.is_sdam + and deadline is not None + and deadline - time.monotonic() < 0 + ): + # We reached the true deadline. + raise + continue + except OSError as exc: + if conn.cancel_context.cancelled: + raise _OperationCancelled("operation cancelled") from None + if _errno_from_exception(exc) == errno.EINTR: + continue + raise + if chunk_length == 0: + raise OSError("connection closed") + + bytes_read += chunk_length + finally: + conn.set_conn_timeout(orig_timeout) + + return mv + + +class NetworkingInterfaceBase: + def __init__(self, conn: Any): + self.conn = conn + + @property + def gettimeout(self) -> Any: + raise NotImplementedError + + def settimeout(self, timeout: float | None) -> None: + raise NotImplementedError + + def close(self) -> Any: + raise NotImplementedError + + def is_closing(self) -> bool: + raise NotImplementedError + + @property + def get_conn(self) -> Any: + raise NotImplementedError + + @property + def sock(self) -> Any: + raise NotImplementedError + + +class AsyncNetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: tuple[Transport, PyMongoProtocol]): + super().__init__(conn) + + @property + def gettimeout(self) -> float | None: + return self.conn[1].gettimeout + + def settimeout(self, timeout: float | None) -> None: + self.conn[1].settimeout(timeout) + + async def close(self) -> None: + self.conn[1].close() + await self.conn[1].wait_closed() + + def is_closing(self) -> bool: + return self.conn[0].is_closing() + + @property + def get_conn(self) -> PyMongoProtocol: + return self.conn[1] + + @property + def sock(self) -> socket.socket: + return self.conn[0].get_extra_info("socket") + + +class NetworkingInterface(NetworkingInterfaceBase): + def __init__(self, conn: Union[socket.socket, _sslConn]): + super().__init__(conn) + + def gettimeout(self) -> float | None: + return self.conn.gettimeout() + + def settimeout(self, timeout: float | None) -> None: + self.conn.settimeout(timeout) + + def close(self) -> None: + self.conn.close() + + def is_closing(self) -> bool: + return self.conn.is_closing() + + @property + def get_conn(self) -> Union[socket.socket, _sslConn]: + return self.conn + + @property + def sock(self) -> Union[socket.socket, _sslConn]: + return self.conn + + def fileno(self) -> int: + return self.conn.fileno() + + def recv_into(self, buffer: bytes | memoryview) -> int: + return self.conn.recv_into(buffer) + + +class PyMongoProtocol(BufferedProtocol): + def __init__(self, timeout: Optional[float] = None): + self.transport: Transport = None # type: ignore[assignment] + # Each message is reader in 2-3 parts: header, compression header, and message body + # The message buffer is allocated after the header is read. + self._header = memoryview(bytearray(16)) + self._header_index = 0 + self._compression_header = memoryview(bytearray(9)) + self._compression_index = 0 + self._message: Optional[memoryview] = None + self._message_index = 0 + # State. TODO: replace booleans with an enum? + self._expecting_header = True + self._expecting_compression = False + self._message_size = 0 + self._op_code = 0 + self._connection_lost = False + self._read_waiter: Optional[Future[Any]] = None + self._timeout = timeout + self._is_compressed = False + self._compressor_id: Optional[int] = None + self._max_message_size = MAX_MESSAGE_SIZE + self._response_to: Optional[int] = None + self._closed = asyncio.get_running_loop().create_future() + self._pending_messages: collections.deque[Future[Any]] = collections.deque() + self._done_messages: collections.deque[Future[Any]] = collections.deque() + + def settimeout(self, timeout: float | None) -> None: + self._timeout = timeout + + @property + def gettimeout(self) -> float | None: + """The configured timeout for the socket that underlies our protocol pair.""" + return self._timeout + + def connection_made(self, transport: BaseTransport) -> None: + """Called exactly once when a connection is made. + The transport argument is the transport representing the write side of the connection. + """ + self.transport = transport # type: ignore[assignment] + self.transport.set_write_buffer_limits(MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE) + + async def write(self, message: bytes) -> None: + """Write a message to this connection's transport.""" + if self.transport.is_closing(): + raise OSError("Connection is closed") + self.transport.write(message) + self.transport.resume_reading() + + async def read(self, request_id: Optional[int], max_message_size: int) -> tuple[bytes, int]: + """Read a single MongoDB Wire Protocol message from this connection.""" + if self.transport: + try: + self.transport.resume_reading() + # Known bug in SSL Protocols, fixed in Python 3.11: https://github.com/python/cpython/issues/89322 + except AttributeError: + raise OSError("connection is already closed") from None + self._max_message_size = max_message_size + if self._done_messages: + message = await self._done_messages.popleft() + else: + if self.transport and self.transport.is_closing(): + raise OSError("connection is already closed") + read_waiter = asyncio.get_running_loop().create_future() + self._pending_messages.append(read_waiter) + try: + message = await read_waiter + finally: + if read_waiter in self._done_messages: + self._done_messages.remove(read_waiter) + if message: + op_code, compressor_id, response_to, data = message + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError( + f"Got response id {response_to!r} but expected {request_id!r}" + ) + if compressor_id is not None: + data = decompress(data, compressor_id) + return data, op_code + raise OSError("connection closed") + + def get_buffer(self, sizehint: int) -> memoryview: + """Called to allocate a new receive buffer. + The asyncio loop calls this method expecting to receive a non-empty buffer to fill with data. + If any data does not fit into the returned buffer, this method will be called again until + either no data remains or an empty buffer is returned. + """ + # Due to a bug, Python <=3.11 will call get_buffer() even after we raise + # ProtocolError in buffer_updated() and call connection_lost(). We allocate + # a temp buffer to drain the waiting data. + if self._connection_lost: + if not self._message: + self._message = memoryview(bytearray(2**14)) + return self._message + # TODO: optimize this by caching pointers to the buffers. + # return self._buffer[self._index:] + if self._expecting_header: + return self._header[self._header_index :] + if self._expecting_compression: + return self._compression_header[self._compression_index :] + return self._message[self._message_index :] # type: ignore[index] + + def buffer_updated(self, nbytes: int) -> None: + """Called when the buffer was updated with the received data""" + # Wrote 0 bytes into a non-empty buffer, signal connection closed + if nbytes == 0: + self.close(OSError("connection closed")) + return + if self._connection_lost: + return + if self._expecting_header: + self._header_index += nbytes + if self._header_index >= 16: + self._expecting_header = False + try: + ( + self._message_size, + self._op_code, + self._response_to, + self._expecting_compression, + ) = self.process_header() + except ProtocolError as exc: + self.close(exc) + return + self._message = memoryview(bytearray(self._message_size)) + return + if self._expecting_compression: + self._compression_index += nbytes + if self._compression_index >= 9: + self._expecting_compression = False + self._op_code, self._compressor_id = self.process_compression_header() + return + + self._message_index += nbytes + if self._message_index >= self._message_size: + self._expecting_header = True + # Pause reading to avoid storing an arbitrary number of messages in memory. + self.transport.pause_reading() + if self._pending_messages: + result = self._pending_messages.popleft() + else: + result = asyncio.get_running_loop().create_future() + # Future has been cancelled, close this connection + if result.done(): + self.close(None) + return + # Necessary values to reconstruct and verify message + result.set_result( + (self._op_code, self._compressor_id, self._response_to, self._message) + ) + self._done_messages.append(result) + # Reset internal state to expect a new message + self._header_index = 0 + self._compression_index = 0 + self._message_index = 0 + self._message_size = 0 + self._message = None + self._op_code = 0 + self._compressor_id = None + self._response_to = None + + def process_header(self) -> tuple[int, int, int, bool]: + """Unpack a MongoDB Wire Protocol header.""" + length, _, response_to, op_code = _UNPACK_HEADER(self._header) + expecting_compression = False + if op_code == 2012: # OP_COMPRESSED + if length <= 25: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard OP_COMPRESSED message header size (25)" + ) + expecting_compression = True + length -= 9 + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > self._max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({self._max_message_size!r})" + ) + + return length - 16, op_code, response_to, expecting_compression + + def process_compression_header(self) -> tuple[int, int]: + """Unpack a MongoDB Wire Protocol compression header.""" + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(self._compression_header) + return op_code, compressor_id + + def _resolve_pending_messages(self, exc: Optional[Exception] = None) -> None: + pending = list(self._pending_messages) + for msg in pending: + if not msg.done(): + if exc is None: + msg.set_result(None) + else: + msg.set_exception(exc) + self._done_messages.append(msg) + + def close(self, exc: Optional[Exception] = None) -> None: + self.transport.abort() + self._resolve_pending_messages(exc) + self._connection_lost = True + + def connection_lost(self, exc: Optional[Exception] = None) -> None: + self._resolve_pending_messages(exc) + if not self._closed.done(): + self._closed.set_result(None) + + async def wait_closed(self) -> None: + await self._closed + + +async def async_sendall(conn: PyMongoProtocol, buf: bytes) -> None: + try: + await asyncio.wait_for(conn.write(buf), timeout=conn.gettimeout) + except asyncio.TimeoutError as exc: + # Convert the asyncio.wait_for timeout error to socket.timeout which pool.py understands. + raise socket.timeout("timed out") from exc + + +async def async_receive_message( + conn: AsyncConnection, + request_id: Optional[int], + max_message_size: int = MAX_MESSAGE_SIZE, +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + timeout: Optional[Union[float, int]] + timeout = conn.conn.gettimeout + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + if deadline: + # When the timeout has expired perform one final check to + # see if the socket is readable. This helps avoid spurious + # timeouts on AWS Lambda and other FaaS environments. + timeout = max(deadline - time.monotonic(), 0) + + cancellation_task = create_task(_poll_cancellation(conn)) + read_task = create_task(conn.conn.get_conn.read(request_id, max_message_size)) + tasks = [read_task, cancellation_task] + try: + done, pending = await asyncio.wait( + tasks, timeout=timeout, return_when=asyncio.FIRST_COMPLETED + ) + for task in pending: + task.cancel() + if pending: + await asyncio.wait(pending) + if len(done) == 0: + raise socket.timeout("timed out") + if read_task in done: + data, op_code = read_task.result() + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) + raise _OperationCancelled("operation cancelled") + except asyncio.CancelledError: + for task in tasks: + task.cancel() + await asyncio.wait(tasks) + raise + + +def receive_message( + conn: Connection, request_id: Optional[int], max_message_size: int = MAX_MESSAGE_SIZE +) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise socket.error.""" + if _csot.get_timeout(): + deadline = _csot.get_deadline() + else: + timeout = conn.conn.gettimeout() + if timeout: + deadline = time.monotonic() + timeout + else: + deadline = None + # Ignore the response's request id. + length, _, response_to, op_code = _UNPACK_HEADER(receive_data(conn, 16, deadline)) + # No request_id for exhaust cursor "getMore". + if request_id is not None: + if request_id != response_to: + raise ProtocolError(f"Got response id {response_to!r} but expected {request_id!r}") + if length <= 16: + raise ProtocolError( + f"Message length ({length!r}) not longer than standard message header size (16)" + ) + if length > max_message_size: + raise ProtocolError( + f"Message length ({length!r}) is larger than server max " + f"message size ({max_message_size!r})" + ) + data: memoryview | bytes + if op_code == 2012: + op_code, _, compressor_id = _UNPACK_COMPRESSION_HEADER(receive_data(conn, 9, deadline)) + data = decompress(receive_data(conn, length - 25, deadline), compressor_id) + else: + data = receive_data(conn, length - 16, deadline) + + try: + unpack_reply = _UNPACK_REPLY[op_code] + except KeyError: + raise ProtocolError( + f"Got opcode {op_code!r} but expected {_UNPACK_REPLY.keys()!r}" + ) from None + return unpack_reply(data) diff --git a/pymongo/ocsp_cache.py b/pymongo/ocsp_cache.py index 24507260ed..2df232848f 100644 --- a/pymongo/ocsp_cache.py +++ b/pymongo/ocsp_cache.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,12 +14,36 @@ """Utilities for caching OCSP responses.""" +from __future__ import annotations + from collections import namedtuple from datetime import datetime as _datetime -from threading import Lock +from datetime import timezone +from typing import TYPE_CHECKING, Any, Optional + +from pymongo.lock import _create_lock + +if TYPE_CHECKING: + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + + +def _next_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's next_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "next_update_utc"): + return value.next_update_utc + return value.next_update -class _OCSPCache(object): +def _this_update(value: OCSPResponse) -> Optional[_datetime]: + """Compat helper to return the response's this_update_utc.""" + # Added in cryptography 43.0.0. + if hasattr(value, "this_update_utc"): + return value.this_update_utc + return value.this_update + + +class _OCSPCache: """A cache for OCSP responses.""" CACHE_KEY_TYPE = namedtuple( # type: ignore @@ -27,12 +51,12 @@ class _OCSPCache(object): ["hash_algorithm", "issuer_name_hash", "issuer_key_hash", "serial_number"], ) - def __init__(self): - self._data = {} + def __init__(self) -> None: + self._data: dict[Any, OCSPResponse] = {} # Hold this lock when accessing _data. - self._lock = Lock() + self._lock = _create_lock() - def _get_cache_key(self, ocsp_request): + def _get_cache_key(self, ocsp_request: OCSPRequest) -> CACHE_KEY_TYPE: return self.CACHE_KEY_TYPE( hash_algorithm=ocsp_request.hash_algorithm.name.lower(), issuer_name_hash=ocsp_request.issuer_name_hash, @@ -40,7 +64,7 @@ def _get_cache_key(self, ocsp_request): serial_number=ocsp_request.serial_number, ) - def __setitem__(self, key, value): + def __setitem__(self, key: OCSPRequest, value: OCSPResponse) -> None: """Add/update a cache entry. 'key' is of type cryptography.x509.ocsp.OCSPRequest @@ -54,21 +78,33 @@ def __setitem__(self, key, value): # As per the OCSP protocol, if the response's nextUpdate field is # not set, the responder is indicating that newer revocation # information is available all the time. - if value.next_update is None: + next_update = _next_update(value) + if next_update is None: self._data.pop(cache_key, None) return + this_update = _this_update(value) + if this_update is None: + return + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) # Do nothing if the response is invalid. - if not (value.this_update <= _datetime.utcnow() < value.next_update): + if not (this_update <= now < next_update): return # Cache new response OR update cached response if new response # has longer validity. cached_value = self._data.get(cache_key, None) - if cached_value is None or cached_value.next_update < value.next_update: + if cached_value is None: + self._data[cache_key] = value + return + cached_next_update = _next_update(cached_value) + if cached_next_update is not None and cached_next_update < next_update: self._data[cache_key] = value - def __getitem__(self, item): + def __getitem__(self, item: OCSPRequest) -> OCSPResponse: """Get a cache entry if it exists. 'item' is of type cryptography.x509.ocsp.OCSPRequest @@ -80,7 +116,15 @@ def __getitem__(self, item): value = self._data[cache_key] # Return cached response if it is still valid. - if value.this_update <= _datetime.utcnow() < value.next_update: + this_update = _this_update(value) + next_update = _next_update(value) + assert this_update is not None + assert next_update is not None + now = _datetime.now(tz=timezone.utc) + if this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) + if this_update <= now < next_update: return value self._data.pop(cache_key, None) diff --git a/pymongo/ocsp_support.py b/pymongo/ocsp_support.py index 3a201f1f5e..8322f821fb 100644 --- a/pymongo/ocsp_support.py +++ b/pymongo/ocsp_support.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,10 +13,13 @@ # permissions and limitations under the License. """Support for requesting and verifying OCSP responses.""" +from __future__ import annotations import logging as _logging import re as _re from datetime import datetime as _datetime +from datetime import timezone +from typing import TYPE_CHECKING, Iterable, Optional, Type, Union from cryptography.exceptions import InvalidSignature as _InvalidSignature from cryptography.hazmat.backends import default_backend as _default_backend @@ -27,6 +30,12 @@ ) from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15 as _PKCS1v15 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey as _RSAPublicKey +from cryptography.hazmat.primitives.asymmetric.x448 import ( + X448PublicKey as _X448PublicKey, +) +from cryptography.hazmat.primitives.asymmetric.x25519 import ( + X25519PublicKey as _X25519PublicKey, +) from cryptography.hazmat.primitives.hashes import SHA1 as _SHA1 from cryptography.hazmat.primitives.hashes import Hash as _Hash from cryptography.hazmat.primitives.serialization import Encoding as _Encoding @@ -49,6 +58,37 @@ from requests.exceptions import RequestException as _RequestException from pymongo import _csot +from pymongo.ocsp_cache import _next_update, _this_update + +if TYPE_CHECKING: + from cryptography.hazmat.primitives.asymmetric import ( + dsa, + ec, + ed448, + ed25519, + rsa, + x448, + x25519, + ) + from cryptography.hazmat.primitives.asymmetric.utils import Prehashed + from cryptography.hazmat.primitives.hashes import HashAlgorithm + from cryptography.x509 import Certificate, Name + from cryptography.x509.extensions import Extension, ExtensionTypeVar + from cryptography.x509.ocsp import OCSPRequest, OCSPResponse + from OpenSSL.SSL import Connection + + from pymongo.ocsp_cache import _OCSPCache + from pymongo.pyopenssl_context import _CallbackData + + CertificateIssuerPublicKeyTypes = Union[ + dsa.DSAPublicKey, + rsa.RSAPublicKey, + ec.EllipticCurvePublicKey, + ed25519.Ed25519PublicKey, + ed448.Ed448PublicKey, + x25519.X25519PublicKey, + x448.X448PublicKey, + ] # Note: the functions in this module generally return 1 or 0. The reason # is simple. The entry point, ocsp_callback, is registered as a callback @@ -62,7 +102,7 @@ ) -def _load_trusted_ca_certs(cafile): +def _load_trusted_ca_certs(cafile: str) -> list[Certificate]: """Parse the tlsCAFile into a list of certificates.""" with open(cafile, "rb") as f: data = f.read() @@ -75,7 +115,9 @@ def _load_trusted_ca_certs(cafile): return trusted_ca_certs -def _get_issuer_cert(cert, chain, trusted_ca_certs): +def _get_issuer_cert( + cert: Certificate, chain: Iterable[Certificate], trusted_ca_certs: Optional[list[Certificate]] +) -> Optional[Certificate]: issuer_name = cert.issuer for candidate in chain: if candidate.subject == issuer_name: @@ -92,16 +134,25 @@ def _get_issuer_cert(cert, chain, trusted_ca_certs): return None -def _verify_signature(key, signature, algorithm, data): +def _verify_signature( + key: CertificateIssuerPublicKeyTypes, + signature: bytes, + algorithm: Union[Prehashed, HashAlgorithm, None], + data: bytes, +) -> int: # See cryptography.x509.Certificate.public_key # for the public key types. try: if isinstance(key, _RSAPublicKey): - key.verify(signature, data, _PKCS1v15(), algorithm) + key.verify(signature, data, _PKCS1v15(), algorithm) # type: ignore[arg-type] elif isinstance(key, _DSAPublicKey): - key.verify(signature, data, algorithm) + key.verify(signature, data, algorithm) # type: ignore[arg-type] elif isinstance(key, _EllipticCurvePublicKey): - key.verify(signature, data, _ECDSA(algorithm)) + key.verify(signature, data, _ECDSA(algorithm)) # type: ignore[arg-type] + elif isinstance( + key, (_X25519PublicKey, _X448PublicKey) + ): # Curve25519 and Curve448 keys do not require verification + return 1 else: key.verify(signature, data) except _InvalidSignature: @@ -109,14 +160,16 @@ def _verify_signature(key, signature, algorithm, data): return 1 -def _get_extension(cert, klass): +def _get_extension( + cert: Certificate, klass: Type[ExtensionTypeVar] +) -> Optional[Extension[ExtensionTypeVar]]: try: return cert.extensions.get_extension_for_class(klass) except _ExtensionNotFound: return None -def _public_key_hash(cert): +def _public_key_hash(cert: Certificate) -> bytes: public_key = cert.public_key() # https://tools.ietf.org/html/rfc2560#section-4.2.1 # "KeyHash ::= OCTET STRING -- SHA-1 hash of responder's public key @@ -128,12 +181,14 @@ def _public_key_hash(cert): pbytes = public_key.public_bytes(_Encoding.X962, _PublicFormat.UncompressedPoint) else: pbytes = public_key.public_bytes(_Encoding.DER, _PublicFormat.SubjectPublicKeyInfo) - digest = _Hash(_SHA1(), backend=_default_backend()) + digest = _Hash(_SHA1(), backend=_default_backend()) # noqa: S303 digest.update(pbytes) return digest.finalize() -def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): +def _get_certs_by_key_hash( + certificates: Iterable[Certificate], issuer: Certificate, responder_key_hash: Optional[bytes] +) -> list[Certificate]: return [ cert for cert in certificates @@ -141,7 +196,9 @@ def _get_certs_by_key_hash(certificates, issuer, responder_key_hash): ] -def _get_certs_by_name(certificates, issuer, responder_name): +def _get_certs_by_name( + certificates: Iterable[Certificate], issuer: Certificate, responder_name: Optional[Name] +) -> list[Certificate]: return [ cert for cert in certificates @@ -149,7 +206,7 @@ def _get_certs_by_name(certificates, issuer, responder_name): ] -def _verify_response_signature(issuer, response): +def _verify_response_signature(issuer: Certificate, response: OCSPResponse) -> int: # Response object will have a responder_name or responder_key_hash # not both. name = response.responder_name @@ -203,14 +260,14 @@ def _verify_response_signature(issuer, response): return ret -def _build_ocsp_request(cert, issuer): +def _build_ocsp_request(cert: Certificate, issuer: Certificate) -> OCSPRequest: # https://cryptography.io/en/latest/x509/ocsp/#creating-requests builder = _OCSPRequestBuilder() - builder = builder.add_certificate(cert, issuer, _SHA1()) + builder = builder.add_certificate(cert, issuer, _SHA1()) # noqa: S303 return builder.build() -def _verify_response(issuer, response): +def _verify_response(issuer: Certificate, response: OCSPResponse) -> int: _LOGGER.debug("Verifying response") # RFC6960, Section 3.2, Number 2, 3 and 4 happen here. res = _verify_response_signature(issuer, response) @@ -219,19 +276,26 @@ def _verify_response(issuer, response): # Note that we are not using a "tolerance period" as discussed in # https://tools.ietf.org/rfc/rfc5019.txt? - now = _datetime.utcnow() + this_update = _this_update(response) + now = _datetime.now(tz=timezone.utc) + if this_update and this_update.tzinfo is None: + # Make naive to match cryptography. + now = now.replace(tzinfo=None) # RFC6960, Section 3.2, Number 5 - if response.this_update > now: + if this_update and this_update > now: _LOGGER.debug("thisUpdate is in the future") return 0 # RFC6960, Section 3.2, Number 6 - if response.next_update and response.next_update < now: + next_update = _next_update(response) + if next_update and next_update < now: _LOGGER.debug("nextUpdate is in the past") return 0 return 1 -def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): +def _get_ocsp_response( + cert: Certificate, issuer: Certificate, uri: Union[str, bytes], ocsp_response_cache: _OCSPCache +) -> Optional[OCSPResponse]: ocsp_request = _build_ocsp_request(cert, issuer) try: ocsp_response = ocsp_response_cache[ocsp_request] @@ -274,30 +338,32 @@ def _get_ocsp_response(cert, issuer, uri, ocsp_response_cache): return ocsp_response -def _ocsp_callback(conn, ocsp_bytes, user_data): +def _ocsp_callback(conn: Connection, ocsp_bytes: bytes, user_data: Optional[_CallbackData]) -> bool: """Callback for use with OpenSSL.SSL.Context.set_ocsp_client_callback.""" - cert = conn.get_peer_certificate() - if cert is None: + # always pass in user_data but OpenSSL requires it be optional + assert user_data + pycert = conn.get_peer_certificate() + if pycert is None: _LOGGER.debug("No peer cert?") - return 0 - cert = cert.to_cryptography() + return False + cert = pycert.to_cryptography() # Use the verified chain when available (pyopenssl>=20.0). if hasattr(conn, "get_verified_chain"): - chain = conn.get_verified_chain() + pychain = conn.get_verified_chain() trusted_ca_certs = None else: - chain = conn.get_peer_cert_chain() + pychain = conn.get_peer_cert_chain() trusted_ca_certs = user_data.trusted_ca_certs - if not chain: + if not pychain: _LOGGER.debug("No peer cert chain?") - return 0 - chain = [cer.to_cryptography() for cer in chain] + return False + chain = [cer.to_cryptography() for cer in pychain] issuer = _get_issuer_cert(cert, chain, trusted_ca_certs) must_staple = False # https://tools.ietf.org/html/rfc7633#section-4.2.3.1 - ext = _get_extension(cert, _TLSFeature) - if ext is not None: - for feature in ext.value: + ext_tls = _get_extension(cert, _TLSFeature) + if ext_tls is not None: + for feature in ext_tls.value: if feature == _TLSFeatureType.status_request: _LOGGER.debug("Peer presented a must-staple cert") must_staple = True @@ -309,29 +375,29 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): _LOGGER.debug("Peer did not staple an OCSP response") if must_staple: _LOGGER.debug("Must-staple cert with no stapled response, hard fail.") - return 0 + return False if not user_data.check_ocsp_endpoint: _LOGGER.debug("OCSP endpoint checking is disabled, soft fail.") - # No stapled OCSP response, checking responder URI diabled, soft fail. - return 1 + # No stapled OCSP response, checking responder URI disabled, soft fail. + return True # https://tools.ietf.org/html/rfc6960#section-3.1 - ext = _get_extension(cert, _AuthorityInformationAccess) - if ext is None: + ext_aia = _get_extension(cert, _AuthorityInformationAccess) + if ext_aia is None: _LOGGER.debug("No authority access information, soft fail") # No stapled OCSP response, no responder URI, soft fail. - return 1 + return True uris = [ desc.access_location.value - for desc in ext.value + for desc in ext_aia.value if desc.access_method == _AuthorityInformationAccessOID.OCSP ] if not uris: _LOGGER.debug("No OCSP URI, soft fail") # No responder URI, soft fail. - return 1 + return True if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False _LOGGER.debug("Requesting OCSP data") # When requesting data from an OCSP endpoint we only fail on # successful, valid responses with a certificate status of REVOKED. @@ -345,28 +411,28 @@ def _ocsp_callback(conn, ocsp_bytes, user_data): continue _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.GOOD: - return 1 + return True if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 + return False # Soft fail if we couldn't get a definitive status. _LOGGER.debug("No definitive OCSP cert status, soft fail") - return 1 + return True _LOGGER.debug("Peer stapled an OCSP response") if issuer is None: _LOGGER.debug("No issuer cert?") - return 0 + return False response = _load_der_ocsp_response(ocsp_bytes) _LOGGER.debug("OCSP response status: %r", response.response_status) # This happens in _request_ocsp when there is no stapled response so # we know if we can compare serial numbers for the request and response. if response.response_status != _OCSPResponseStatus.SUCCESSFUL: - return 0 + return False if not _verify_response(issuer, response): - return 0 + return False # Cache the verified, stapled response. ocsp_response_cache[_build_ocsp_request(cert, issuer)] = response _LOGGER.debug("OCSP cert status: %r", response.certificate_status) if response.certificate_status == _OCSPCertStatus.REVOKED: - return 0 - return 1 + return False + return True diff --git a/pymongo/operations.py b/pymongo/operations.py index 84e8bf4d35..73fb8b5f36 100644 --- a/pymongo/operations.py +++ b/pymongo/operations.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,195 +12,357 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Operation class definitions.""" -from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union - -from pymongo import helpers +"""Operation class definitions. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import enum +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +from bson.raw_bson import RawBSONDocument +from pymongo import helpers_shared from pymongo.collation import validate_collation_or_none -from pymongo.common import validate_boolean, validate_is_mapping, validate_list -from pymongo.helpers import _gen_index_name, _index_document, _index_list -from pymongo.typings import _CollationIn, _DocumentIn, _Pipeline +from pymongo.common import validate_is_mapping, validate_list +from pymongo.errors import InvalidOperation +from pymongo.helpers_shared import _gen_index_name, _index_document, _index_list +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from pymongo.typings import _AgnosticBulk, _AgnosticClientBulk + + +# Hint supports index name, "myIndex", a list of either strings or index pairs: [('x', 1), ('y', -1), 'z''], or a dictionary +_IndexList = Union[ + Sequence[Union[str, Tuple[str, Union[int, str, Mapping[str, Any]]]]], Mapping[str, Any] +] +_IndexKeyHint = Union[str, _IndexList] -class InsertOne(object): +class _Op(str, enum.Enum): + ABORT = "abortTransaction" + AGGREGATE = "aggregate" + BULK_WRITE = "bulkWrite" + COMMIT = "commitTransaction" + COUNT = "count" + CREATE = "create" + CREATE_INDEXES = "createIndexes" + CREATE_SEARCH_INDEXES = "createSearchIndexes" + DELETE = "delete" + DISTINCT = "distinct" + DROP = "drop" + DROP_DATABASE = "dropDatabase" + DROP_INDEXES = "dropIndexes" + DROP_SEARCH_INDEXES = "dropSearchIndexes" + END_SESSIONS = "endSessions" + FIND_AND_MODIFY = "findAndModify" + FIND = "find" + INSERT = "insert" + LIST_COLLECTIONS = "listCollections" + LIST_INDEXES = "listIndexes" + LIST_SEARCH_INDEX = "listSearchIndexes" + LIST_DATABASES = "listDatabases" + UPDATE = "update" + UPDATE_INDEX = "updateIndex" + UPDATE_SEARCH_INDEX = "updateSearchIndex" + RENAME = "rename" + GETMORE = "getMore" + KILL_CURSORS = "killCursors" + TEST = "testOperation" + + +class InsertOne(Generic[_DocumentType]): """Represents an insert_one operation.""" - __slots__ = ("_doc",) + __slots__ = ( + "_doc", + "_namespace", + ) - def __init__(self, document: _DocumentIn) -> None: + def __init__(self, document: _DocumentType, namespace: Optional[str] = None) -> None: """Create an InsertOne instance. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `document`: The document to insert. If the document is missing an + :param document: The document to insert. If the document is missing an _id field one will be added. + :param namespace: (optional) The namespace in which to insert a document. + + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. """ self._doc = document + self._namespace = namespace - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_insert(self._doc) + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_insert(self._doc) # type: ignore[arg-type] - def __repr__(self): - return "InsertOne(%r)" % (self._doc,) + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_insert( + self._namespace, + self._doc, # type: ignore[arg-type] + ) + + def __repr__(self) -> str: + if self._namespace: + return f"{self.__class__.__name__}({self._doc!r}, {self._namespace!r})" + return f"{self.__class__.__name__}({self._doc!r})" def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return other._doc == self._doc + return other._doc == self._doc and other._namespace == self._namespace return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other -_IndexList = Sequence[Tuple[str, Union[int, str, Mapping[str, Any]]]] -_IndexKeyHint = Union[str, _IndexList] +class _DeleteOp: + """Private base class for delete operations.""" + + __slots__ = ( + "_filter", + "_collation", + "_hint", + "_namespace", + ) + + def __init__( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + ) -> None: + if filter is not None: + validate_is_mapping("filter", filter) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + + self._filter = filter + self._collation = collation + self._namespace = namespace + + def __eq__(self, other: Any) -> bool: + if type(other) == type(self): + return ( + other._filter, + other._collation, + other._hint, + other._namespace, + ) == ( + self._filter, + self._collation, + self._hint, + self._namespace, + ) + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._collation, + self._hint, + self._namespace, + ) + return f"{self.__class__.__name__}({self._filter!r}, {self._collation!r}, {self._hint!r})" -class DeleteOne(object): +class DeleteOne(_DeleteOp): """Represents a delete_one operation.""" - __slots__ = ("_filter", "_collation", "_hint") + __slots__ = () def __init__( self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create a DeleteOne instance. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the document to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete a document. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) - self._filter = filter - self._collation = collation - self._hint = hint - - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 1, collation=self._collation, hint=self._hint) + super().__init__(filter, collation, hint, namespace) - def __repr__(self): - return "DeleteOne(%r, %r)" % (self._filter, self._collation) - - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation) == (self._filter, self._collation) - return NotImplemented + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 1, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) - def __ne__(self, other: Any) -> bool: - return not self == other + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_delete( + self._namespace, + self._filter, + multi=False, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) -class DeleteMany(object): +class DeleteMany(_DeleteOp): """Represents a delete_many operation.""" - __slots__ = ("_filter", "_collation", "_hint") + __slots__ = () def __init__( self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create a DeleteMany instance. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `filter`: A query that matches the documents to delete. - - `collation` (optional): An instance of + :param filter: A query that matches the documents to delete. + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.4 and above. + :param namespace: (optional) The namespace in which to delete documents. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 Added the `collation` option. """ - if filter is not None: - validate_is_mapping("filter", filter) - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) - self._filter = filter - self._collation = collation - self._hint = hint - - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" - bulkobj.add_delete(self._filter, 0, collation=self._collation, hint=self._hint) - - def __repr__(self): - return "DeleteMany(%r, %r)" % (self._filter, self._collation) + super().__init__(filter, collation, hint, namespace) - def __eq__(self, other: Any) -> bool: - if type(other) == type(self): - return (other._filter, other._collation) == (self._filter, self._collation) - return NotImplemented + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" + bulkobj.add_delete( + self._filter, + 0, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) - def __ne__(self, other: Any) -> bool: - return not self == other + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_delete( + self._namespace, + self._filter, + multi=True, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + ) -class ReplaceOne(object): +class ReplaceOne(Generic[_DocumentType]): """Represents a replace_one operation.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_hint") + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_hint", + "_namespace", + "_sort", + ) def __init__( self, filter: Mapping[str, Any], - replacement: Mapping[str, Any], - upsert: bool = False, + replacement: Union[_DocumentType, RawBSONDocument], + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Create a ReplaceOne instance. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to replace. - - `replacement`: The new document. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + :param namespace: (optional) The namespace in which to replace a document. + + .. versionchanged:: 4.10 + Added ``sort`` option. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the ``hint`` option. .. versionchanged:: 3.5 @@ -210,72 +372,137 @@ def __init__( validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint + self._sort = sort self._filter = filter self._doc = replacement self._upsert = upsert self._collation = collation - self._hint = hint + self._namespace = namespace - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_replace( - self._filter, self._doc, self._upsert, collation=self._collation, hint=self._hint + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + sort=self._sort, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_replace( + self._namespace, + self._filter, + self._doc, + self._upsert, + collation=validate_collation_or_none(self._collation), + hint=self._hint, + sort=self._sort, ) def __eq__(self, other: Any) -> bool: if type(other) == type(self): - return (other._filter, other._doc, other._upsert, other._collation, other._hint) == ( + return ( + other._filter, + other._doc, + other._upsert, + other._collation, + other._hint, + other._namespace, + other._sort, + ) == ( self._filter, self._doc, self._upsert, self._collation, - other._hint, + self._hint, + self._namespace, + self._sort, ) return NotImplemented def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "%s(%r, %r, %r, %r, %r)" % ( + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._hint, + self._namespace, + self._sort, + ) + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, self._upsert, self._collation, self._hint, + self._sort, ) -class _UpdateOp(object): +class _UpdateOp: """Private base class for update operations.""" - __slots__ = ("_filter", "_doc", "_upsert", "_collation", "_array_filters", "_hint") + __slots__ = ( + "_filter", + "_doc", + "_upsert", + "_collation", + "_array_filters", + "_hint", + "_namespace", + "_sort", + ) - def __init__(self, filter, doc, upsert, collation, array_filters, hint): + def __init__( + self, + filter: Mapping[str, Any], + doc: Union[Mapping[str, Any], _Pipeline], + upsert: Optional[bool], + collation: Optional[_CollationIn], + array_filters: Optional[list[Mapping[str, Any]]], + hint: Optional[_IndexKeyHint], + namespace: Optional[str], + sort: Optional[Mapping[str, Any]], + ): if filter is not None: validate_is_mapping("filter", filter) if upsert is not None: validate_boolean("upsert", upsert) if array_filters is not None: validate_list("array_filters", array_filters) - if hint is not None: - if not isinstance(hint, str): - hint = helpers._index_document(hint) - + if hint is not None and not isinstance(hint, str): + self._hint: Union[str, dict[str, Any], None] = helpers_shared._index_document(hint) + else: + self._hint = hint self._filter = filter self._doc = doc self._upsert = upsert self._collation = collation self._array_filters = array_filters - self._hint = hint + self._namespace = namespace + self._sort = sort - def __eq__(self, other): - if type(other) == type(self): + def __eq__(self, other: object) -> bool: + if isinstance(other, type(self)): return ( other._filter, other._doc, @@ -283,6 +510,8 @@ def __eq__(self, other): other._collation, other._array_filters, other._hint, + other._namespace, + other._sort, ) == ( self._filter, self._doc, @@ -290,14 +519,28 @@ def __eq__(self, other): self._collation, self._array_filters, self._hint, + self._namespace, + self._sort, ) return NotImplemented - def __ne__(self, other): + def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): - return "%s(%r, %r, %r, %r, %r, %r)" % ( + def __repr__(self) -> str: + if self._namespace: + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( + self.__class__.__name__, + self._filter, + self._doc, + self._upsert, + self._collation, + self._array_filters, + self._hint, + self._namespace, + self._sort, + ) + return "{}({!r}, {!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format( self.__class__.__name__, self._filter, self._doc, @@ -305,6 +548,7 @@ def __repr__(self): self._collation, self._array_filters, self._hint, + self._sort, ) @@ -317,31 +561,40 @@ def __init__( self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, + sort: Optional[Mapping[str, Any]] = None, ) -> None: """Represents an update_one operation. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `filter`: A query that matches the document to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. - + :param namespace: The namespace in which to update a document. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + + .. versionchanged:: 4.10 + Added ``sort`` option. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the `hint` option. .. versionchanged:: 3.9 @@ -351,18 +604,37 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateOne, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, sort) - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_update( + self._filter, + self._doc, + False, + bool(self._upsert), + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + sort=self._sort, + ) + + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, self._filter, self._doc, False, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, + sort=self._sort, ) @@ -375,31 +647,35 @@ def __init__( self, filter: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], - upsert: bool = False, + upsert: Optional[bool] = None, collation: Optional[_CollationIn] = None, - array_filters: Optional[List[Mapping[str, Any]]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Optional[_IndexKeyHint] = None, + namespace: Optional[str] = None, ) -> None: """Create an UpdateMany instance. - For use with :meth:`~pymongo.collection.Collection.bulk_write`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.bulk_write`, :meth:`~pymongo.collection.Collection.bulk_write`, + :meth:`~pymongo.asynchronous.mongo_client.AsyncMongoClient.bulk_write` and :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. - :Parameters: - - `filter`: A query that matches the documents to update. - - `update`: The modifications to apply. - - `upsert` (optional): If ``True``, perform an insert if no documents + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents match the filter. - - `collation` (optional): An instance of + :param collation: An instance of :class:`~pymongo.collation.Collation`. - - `array_filters` (optional): A list of filters specifying which + :param array_filters: A list of filters specifying which array elements an update should apply. - - `hint` (optional): An index to use to support the query + :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to - :meth:`~pymongo.collection.Collection.create_index` (e.g. + :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_index` or :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). This option is only supported on MongoDB 4.2 and above. + :param namespace: (optional) The namespace in which to update documents. + .. versionchanged:: 4.9 + Added the `namespace` option to support `MongoClient.bulk_write`. .. versionchanged:: 3.11 Added the `hint` option. .. versionchanged:: 3.9 @@ -409,22 +685,39 @@ def __init__( .. versionchanged:: 3.5 Added the `collation` option. """ - super(UpdateMany, self).__init__(filter, update, upsert, collation, array_filters, hint) + super().__init__(filter, update, upsert, collation, array_filters, hint, namespace, None) - def _add_to_bulk(self, bulkobj): - """Add this operation to the _Bulk instance `bulkobj`.""" + def _add_to_bulk(self, bulkobj: _AgnosticBulk) -> None: + """Add this operation to the _AsyncBulk/_Bulk instance `bulkobj`.""" bulkobj.add_update( self._filter, self._doc, True, self._upsert, - collation=self._collation, + collation=validate_collation_or_none(self._collation), array_filters=self._array_filters, hint=self._hint, ) + def _add_to_client_bulk(self, bulkobj: _AgnosticClientBulk) -> None: + """Add this operation to the _AsyncClientBulk/_ClientBulk instance `bulkobj`.""" + if not self._namespace: + raise InvalidOperation( + "MongoClient.bulk_write requires a namespace to be provided for each write operation" + ) + bulkobj.add_update( + self._namespace, + self._filter, + self._doc, + True, + self._upsert, + collation=validate_collation_or_none(self._collation), + array_filters=self._array_filters, + hint=self._hint, + ) -class IndexModel(object): + +class IndexModel: """Represents an index to create.""" __slots__ = ("__document",) @@ -432,12 +725,13 @@ class IndexModel(object): def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. - For use with :meth:`~pymongo.collection.Collection.create_indexes`. + For use with :meth:`~pymongo.asynchronous.collection.AsyncCollection.create_indexes` and :meth:`~pymongo.collection.Collection.create_indexes`. - Takes either a single key or a list of (key, direction) pairs. - The key(s) must be an instance of :class:`basestring` - (:class:`str` in python 3), and the direction(s) must be one of - (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str`, and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). @@ -475,12 +769,11 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: See the MongoDB documentation for a full list of supported options by server version. - :Parameters: - - `keys`: a single key or a list of (key, direction) - pairs specifying the index to create - - `**kwargs` (optional): any additional index creation + :param keys: a single key or a list containing (key, direction) pairs + or keys specifying the index to create. + :param kwargs: any additional index creation options (see the above list) should be passed as keyword - arguments + arguments. .. versionchanged:: 3.11 Added the ``hidden`` option. @@ -488,10 +781,10 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: Added the ``partialFilterExpression`` option to support partial indexes. - .. _wildcard index: https://mongodb.com/docs/master/core/index-wildcard/ + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ """ keys = _index_list(keys) - if "name" not in kwargs: + if kwargs.get("name") is None: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop("collation", None)) @@ -500,8 +793,61 @@ def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: self.__document["collation"] = collation @property - def document(self) -> Dict[str, Any]: + def document(self) -> dict[str, Any]: """An index document suitable for passing to the createIndexes command. """ return self.__document + + def __repr__(self) -> str: + return "{}({}{})".format( + self.__class__.__name__, + self.document["key"], + "".join([f", {key}={value!r}" for key, value in self.document.items() if key != "key"]), + ) + + +class SearchIndexModel: + """Represents a search index to create.""" + + __slots__ = ("__document",) + + def __init__( + self, + definition: Mapping[str, Any], + name: Optional[str] = None, + type: Optional[str] = None, + **kwargs: Any, + ) -> None: + """Create a Search Index instance. + + For use with :meth:`~pymongo.collection.AsyncCollection.create_search_index` and :meth:`~pymongo.collection.AsyncCollection.create_search_indexes`. + + :param definition: The definition for this index. + :param name: The name for this index, if present. + :param type: The type for this index which defaults to "search". Alternative values include "vectorSearch". + :param kwargs: Keyword arguments supplying any additional options. + + .. note:: Search indexes require a MongoDB server version 7.0+ Atlas cluster. + .. versionadded:: 4.5 + .. versionchanged:: 4.7 + Added the type and kwargs arguments. + """ + self.__document: dict[str, Any] = {} + if name is not None: + self.__document["name"] = name + self.__document["definition"] = definition + if type is not None: + self.__document["type"] = type + self.__document.update(kwargs) + + @property + def document(self) -> Mapping[str, Any]: + """The document for this index.""" + return self.__document + + def __repr__(self) -> str: + return "{}({})".format( + self.__class__.__name__, + ", ".join([f"{key}={value!r}" for key, value in self.document.items()]), + ) diff --git a/pymongo/periodic_executor.py b/pymongo/periodic_executor.py index 2c3727a7a3..82f506f039 100644 --- a/pymongo/periodic_executor.py +++ b/pymongo/periodic_executor.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,24 +14,129 @@ """Run a target function on a background thread.""" +from __future__ import annotations + +import asyncio +import sys import threading import time import weakref from typing import Any, Optional +from pymongo import _csot +from pymongo._asyncio_task import create_task +from pymongo.lock import _create_lock + +_IS_SYNC = False + + +class AsyncPeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background task. + + If the target's return value is false, the executor stops. + + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is + called very often. + :param target: A function. + :param name: A name to give the underlying task. + """ + self._event = False + self._interval = interval + self._min_interval = min_interval + self._target = target + self._stopped = False + self._task: Optional[asyncio.Task[Any]] = None + self._name = name + self._skip_sleep = False + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" + + def open(self) -> None: + """Start. Multiple calls have no effect.""" + self._stopped = False + + if self._task is None or ( + self._task.done() and not self._task.cancelled() and not self._task.cancelling() # type: ignore[unused-ignore, attr-defined] + ): + self._task = create_task(self._run(), name=self._name) + + def close(self, dummy: Any = None) -> None: + """Stop. To restart, call open(). + + The dummy parameter allows an executor's close method to be a weakref + callback; see monitor.py. + """ + self._stopped = True + if self._task is not None: + self._task.cancel() + + async def join(self, timeout: Optional[int] = None) -> None: + if self._task is not None: + await asyncio.wait([self._task], timeout=timeout) # type-ignore: [arg-type] -class PeriodicExecutor(object): - def __init__(self, interval, min_interval, target, name=None): - """ "Run a target function periodically on a background thread. + def wake(self) -> None: + """Execute the target function soon.""" + self._event = True + + def update_interval(self, new_interval: int) -> None: + self._interval = new_interval + + def skip_sleep(self) -> None: + self._skip_sleep = True + + async def _run(self) -> None: + # The CSOT contextvars must be cleared inside the executor task before execution begins + _csot.reset_all() + while not self._stopped: + if self._task and self._task.cancelling(): # type: ignore[unused-ignore, attr-defined] + raise asyncio.CancelledError + try: + if not await self._target(): + self._stopped = True + break + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self._stopped = True + raise + + if self._skip_sleep: + self._skip_sleep = False + else: + deadline = time.monotonic() + self._interval + while not self._stopped and time.monotonic() < deadline: + await asyncio.sleep(self._min_interval) + if self._event: + break # Early wake. + + self._event = False + + +class PeriodicExecutor: + def __init__( + self, + interval: float, + min_interval: float, + target: Any, + name: Optional[str] = None, + ): + """Run a target function periodically on a background thread. If the target's return value is false, the executor stops. - :Parameters: - - `interval`: Seconds between calls to `target`. - - `min_interval`: Minimum seconds between calls if `wake` is + :param interval: Seconds between calls to `target`. + :param min_interval: Minimum seconds between calls if `wake` is called very often. - - `target`: A function. - - `name`: A name to give the underlying thread. + :param target: A function. + :param name: A name to give the underlying thread. """ # threading.Event and its internal condition variable are expensive # in Python 2, see PYTHON-983. Use a boolean to know when to wake. @@ -45,12 +150,11 @@ def __init__(self, interval, min_interval, target, name=None): self._thread: Optional[threading.Thread] = None self._name = name self._skip_sleep = False - self._thread_will_exit = False - self._lock = threading.Lock() + self._lock = _create_lock() - def __repr__(self): - return "<%s(name=%s) object at 0x%x>" % (self.__class__.__name__, self._name, id(self)) + def __repr__(self) -> str: + return f"<{self.__class__.__name__}(name={self._name}) object at 0x{id(self):x}>" def open(self) -> None: """Start. Multiple calls have no effect. @@ -83,7 +187,15 @@ def open(self) -> None: thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) - thread.start() + # Mitigation to RuntimeError firing when thread starts on shutdown + # https://github.com/python/cpython/issues/114570 + try: + thread.start() + except RuntimeError as e: + if "interpreter shutdown" in str(e) or sys.is_finalizing(): + self._thread = None + return + raise def close(self, dummy: Any = None) -> None: """Stop. To restart, call open(). @@ -111,19 +223,20 @@ def update_interval(self, new_interval: int) -> None: def skip_sleep(self) -> None: self._skip_sleep = True - def __should_stop(self): + def _should_stop(self) -> bool: with self._lock: if self._stopped: self._thread_will_exit = True return True return False - def _run(self): - while not self.__should_stop(): + def _run(self) -> None: + while not self._should_stop(): try: if not self._target(): self._stopped = True break + # Catch KeyboardInterrupt, etc. and cleanup. except BaseException: with self._lock: self._stopped = True @@ -152,16 +265,16 @@ def _run(self): _EXECUTORS = set() -def _register_executor(executor): +def _register_executor(executor: PeriodicExecutor) -> None: ref = weakref.ref(executor, _on_executor_deleted) _EXECUTORS.add(ref) -def _on_executor_deleted(ref): +def _on_executor_deleted(ref: weakref.ReferenceType[PeriodicExecutor]) -> None: _EXECUTORS.remove(ref) -def _shutdown_executors(): +def _shutdown_executors() -> None: if _EXECUTORS is None: return diff --git a/pymongo/pool.py b/pymongo/pool.py index 1fab98209f..456ff3df0a 100644 --- a/pymongo/pool.py +++ b/pymongo/pool.py @@ -1,1660 +1,22 @@ -# Copyright 2011-present MongoDB, Inc. +# Copyright 2024-present MongoDB, Inc. # -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -import collections -import contextlib -import copy -import os -import platform -import socket -import ssl -import sys -import threading -import time -import weakref -from typing import Any, NoReturn, Optional +"""Re-import of synchronous Pool API for compatibility.""" +from __future__ import annotations -from bson import DEFAULT_CODEC_OPTIONS -from bson.son import SON -from pymongo import __version__, _csot, auth, helpers -from pymongo.client_session import _validate_session_write_concern -from pymongo.common import ( - MAX_BSON_SIZE, - MAX_CONNECTING, - MAX_IDLE_TIME_SEC, - MAX_MESSAGE_SIZE, - MAX_POOL_SIZE, - MAX_WIRE_VERSION, - MAX_WRITE_BATCH_SIZE, - MIN_POOL_SIZE, - ORDERED_TYPES, - WAIT_QUEUE_TIMEOUT, -) -from pymongo.errors import ( - AutoReconnect, - ConfigurationError, - ConnectionFailure, - DocumentTooLarge, - ExecutionTimeout, - InvalidOperation, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError, - WaitQueueTimeoutError, - _CertificateError, -) -from pymongo.hello import Hello, HelloCompat -from pymongo.monitoring import ConnectionCheckOutFailedReason, ConnectionClosedReason -from pymongo.network import command, receive_message -from pymongo.read_preferences import ReadPreference -from pymongo.server_api import _add_to_command -from pymongo.server_type import SERVER_TYPE -from pymongo.socket_checker import SocketChecker -from pymongo.ssl_support import HAS_SNI, SSLError +from pymongo.synchronous.pool import * # noqa: F403 +from pymongo.synchronous.pool import __doc__ as original_doc -try: - from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl - - def _set_non_inheritable_non_atomic(fd): - """Set the close-on-exec flag on the given file descriptor.""" - flags = fcntl(fd, F_GETFD) - fcntl(fd, F_SETFD, flags | FD_CLOEXEC) - -except ImportError: - # Windows, various platforms we don't claim to support - # (Jython, IronPython, ...), systems that don't provide - # everything we need from fcntl, etc. - def _set_non_inheritable_non_atomic(fd): - """Dummy function for platforms that don't provide fcntl.""" - pass - - -_MAX_TCP_KEEPIDLE = 120 -_MAX_TCP_KEEPINTVL = 10 -_MAX_TCP_KEEPCNT = 9 - -if sys.platform == "win32": - try: - import _winreg as winreg - except ImportError: - import winreg - - def _query(key, name, default): - try: - value, _ = winreg.QueryValueEx(key, name) - # Ensure the value is a number or raise ValueError. - return int(value) - except (OSError, ValueError): - # QueryValueEx raises OSError when the key does not exist (i.e. - # the system is using the Windows default value). - return default - - try: - with winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" - ) as key: - _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) - _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) - except OSError: - # We could not check the default values because winreg.OpenKey failed. - # Assume the system is using the default values. - _WINDOWS_TCP_IDLE_MS = 7200000 - _WINDOWS_TCP_INTERVAL_MS = 1000 - - def _set_keepalive_times(sock): - idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) - interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) - if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: - sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) - -else: - - def _set_tcp_option(sock, tcp_option, max_value): - if hasattr(socket, tcp_option): - sockopt = getattr(socket, tcp_option) - try: - # PYTHON-1350 - NetBSD doesn't implement getsockopt for - # TCP_KEEPIDLE and friends. Don't attempt to set the - # values there. - default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) - if default > max_value: - sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) - except socket.error: - pass - - def _set_keepalive_times(sock): - _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) - _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) - _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) - - -_METADATA: SON[str, Any] = SON( - [ - ("driver", SON([("name", "PyMongo"), ("version", __version__)])), - ] -) - -if sys.platform.startswith("linux"): - # platform.linux_distribution was deprecated in Python 3.5 - # and removed in Python 3.8. Starting in Python 3.5 it - # raises DeprecationWarning - # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 - _name = platform.system() - _METADATA["os"] = SON( - [ - ("type", _name), - ("name", _name), - ("architecture", platform.machine()), - # Kernel version (e.g. 4.4.0-17-generic). - ("version", platform.release()), - ] - ) -elif sys.platform == "darwin": - _METADATA["os"] = SON( - [ - ("type", platform.system()), - ("name", platform.system()), - ("architecture", platform.machine()), - # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin - # kernel version. - ("version", platform.mac_ver()[0]), - ] - ) -elif sys.platform == "win32": - _METADATA["os"] = SON( - [ - ("type", platform.system()), - # "Windows XP", "Windows 7", "Windows 10", etc. - ("name", " ".join((platform.system(), platform.release()))), - ("architecture", platform.machine()), - # Windows patch level (e.g. 5.1.2600-SP3) - ("version", "-".join(platform.win32_ver()[1:3])), - ] - ) -elif sys.platform.startswith("java"): - _name, _ver, _arch = platform.java_ver()[-1] - _METADATA["os"] = SON( - [ - # Linux, Windows 7, Mac OS X, etc. - ("type", _name), - ("name", _name), - # x86, x86_64, AMD64, etc. - ("architecture", _arch), - # Linux kernel version, OSX version, etc. - ("version", _ver), - ] - ) -else: - # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) - _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) - _METADATA["os"] = SON( - [ - ("type", platform.system()), - ("name", " ".join([part for part in _aliased[:2] if part])), - ("architecture", platform.machine()), - ("version", _aliased[2]), - ] - ) - -if platform.python_implementation().startswith("PyPy"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.pypy_version_info)), # type: ignore - "(Python %s)" % ".".join(map(str, sys.version_info)), - ) - ) -elif sys.platform.startswith("java"): - _METADATA["platform"] = " ".join( - ( - platform.python_implementation(), - ".".join(map(str, sys.version_info)), - "(%s)" % " ".join((platform.system(), platform.release())), - ) - ) -else: - _METADATA["platform"] = " ".join( - (platform.python_implementation(), ".".join(map(str, sys.version_info))) - ) - - -# If the first getaddrinfo call of this interpreter's life is on a thread, -# while the main thread holds the import lock, getaddrinfo deadlocks trying -# to import the IDNA codec. Import it here, where presumably we're on the -# main thread, to avoid the deadlock. See PYTHON-607. -"foo".encode("idna") - - -def _raise_connection_failure( - address: Any, error: Exception, msg_prefix: Optional[str] = None -) -> NoReturn: - """Convert a socket.error to ConnectionFailure and raise it.""" - host, port = address - # If connecting to a Unix socket, port will be None. - if port is not None: - msg = "%s:%d: %s" % (host, port, error) - else: - msg = "%s: %s" % (host, error) - if msg_prefix: - msg = msg_prefix + msg - if isinstance(error, socket.timeout): - raise NetworkTimeout(msg) from error - elif isinstance(error, SSLError) and "timed out" in str(error): - # Eventlet does not distinguish TLS network timeouts from other - # SSLErrors (https://github.com/eventlet/eventlet/issues/692). - # Luckily, we can work around this limitation because the phrase - # 'timed out' appears in all the timeout related SSLErrors raised. - raise NetworkTimeout(msg) from error - else: - raise AutoReconnect(msg) from error - - -def _cond_wait(condition, deadline): - timeout = deadline - time.monotonic() if deadline else None - return condition.wait(timeout) - - -class PoolOptions(object): - """Read only connection pool options for a MongoClient. - - Should not be instantiated directly by application developers. Access - a client's pool options via - :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: - - pool_opts = client.options.pool_options - pool_opts.max_pool_size - pool_opts.min_pool_size - - """ - - __slots__ = ( - "__max_pool_size", - "__min_pool_size", - "__max_idle_time_seconds", - "__connect_timeout", - "__socket_timeout", - "__wait_queue_timeout", - "__ssl_context", - "__tls_allow_invalid_hostnames", - "__event_listeners", - "__appname", - "__driver", - "__metadata", - "__compression_settings", - "__max_connecting", - "__pause_enabled", - "__server_api", - "__load_balanced", - "__credentials", - ) - - def __init__( - self, - max_pool_size=MAX_POOL_SIZE, - min_pool_size=MIN_POOL_SIZE, - max_idle_time_seconds=MAX_IDLE_TIME_SEC, - connect_timeout=None, - socket_timeout=None, - wait_queue_timeout=WAIT_QUEUE_TIMEOUT, - ssl_context=None, - tls_allow_invalid_hostnames=False, - event_listeners=None, - appname=None, - driver=None, - compression_settings=None, - max_connecting=MAX_CONNECTING, - pause_enabled=True, - server_api=None, - load_balanced=None, - credentials=None, - ): - self.__max_pool_size = max_pool_size - self.__min_pool_size = min_pool_size - self.__max_idle_time_seconds = max_idle_time_seconds - self.__connect_timeout = connect_timeout - self.__socket_timeout = socket_timeout - self.__wait_queue_timeout = wait_queue_timeout - self.__ssl_context = ssl_context - self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames - self.__event_listeners = event_listeners - self.__appname = appname - self.__driver = driver - self.__compression_settings = compression_settings - self.__max_connecting = max_connecting - self.__pause_enabled = pause_enabled - self.__server_api = server_api - self.__load_balanced = load_balanced - self.__credentials = credentials - self.__metadata = copy.deepcopy(_METADATA) - if appname: - self.__metadata["application"] = {"name": appname} - - # Combine the "driver" MongoClient option with PyMongo's info, like: - # { - # 'driver': { - # 'name': 'PyMongo|MyDriver', - # 'version': '4.2.0|1.2.3', - # }, - # 'platform': 'CPython 3.7.0|MyPlatform' - # } - if driver: - if driver.name: - self.__metadata["driver"]["name"] = "%s|%s" % ( - _METADATA["driver"]["name"], - driver.name, - ) - if driver.version: - self.__metadata["driver"]["version"] = "%s|%s" % ( - _METADATA["driver"]["version"], - driver.version, - ) - if driver.platform: - self.__metadata["platform"] = "%s|%s" % (_METADATA["platform"], driver.platform) - - @property - def _credentials(self): - """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" - return self.__credentials - - @property - def non_default_options(self): - """The non-default options this pool was created with. - - Added for CMAP's :class:`PoolCreatedEvent`. - """ - opts = {} - if self.__max_pool_size != MAX_POOL_SIZE: - opts["maxPoolSize"] = self.__max_pool_size - if self.__min_pool_size != MIN_POOL_SIZE: - opts["minPoolSize"] = self.__min_pool_size - if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: - opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 - if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: - opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 - if self.__max_connecting != MAX_CONNECTING: - opts["maxConnecting"] = self.__max_connecting - return opts - - @property - def max_pool_size(self): - """The maximum allowable number of concurrent connections to each - connected server. Requests to a server will block if there are - `maxPoolSize` outstanding connections to the requested server. - Defaults to 100. Cannot be 0. - - When a server's pool has reached `max_pool_size`, operations for that - server block waiting for a socket to be returned to the pool. If - ``waitQueueTimeoutMS`` is set, a blocked operation will raise - :exc:`~pymongo.errors.ConnectionFailure` after a timeout. - By default ``waitQueueTimeoutMS`` is not set. - """ - return self.__max_pool_size - - @property - def min_pool_size(self): - """The minimum required number of concurrent connections that the pool - will maintain to each connected server. Default is 0. - """ - return self.__min_pool_size - - @property - def max_connecting(self): - """The maximum number of concurrent connection creation attempts per - pool. Defaults to 2. - """ - return self.__max_connecting - - @property - def pause_enabled(self): - return self.__pause_enabled - - @property - def max_idle_time_seconds(self): - """The maximum number of seconds that a connection can remain - idle in the pool before being removed and replaced. Defaults to - `None` (no limit). - """ - return self.__max_idle_time_seconds - - @property - def connect_timeout(self): - """How long a connection can take to be opened before timing out.""" - return self.__connect_timeout - - @property - def socket_timeout(self): - """How long a send or receive on a socket can take before timing out.""" - return self.__socket_timeout - - @property - def wait_queue_timeout(self): - """How long a thread will wait for a socket from the pool if the pool - has no free sockets. - """ - return self.__wait_queue_timeout - - @property - def _ssl_context(self): - """An SSLContext instance or None.""" - return self.__ssl_context - - @property - def tls_allow_invalid_hostnames(self): - """If True skip ssl.match_hostname.""" - return self.__tls_allow_invalid_hostnames - - @property - def _event_listeners(self): - """An instance of pymongo.monitoring._EventListeners.""" - return self.__event_listeners - - @property - def appname(self): - """The application name, for sending with hello in server handshake.""" - return self.__appname - - @property - def driver(self): - """Driver name and version, for sending with hello in handshake.""" - return self.__driver - - @property - def _compression_settings(self): - return self.__compression_settings - - @property - def metadata(self): - """A dict of metadata about the application, driver, os, and platform.""" - return self.__metadata.copy() - - @property - def server_api(self): - """A pymongo.server_api.ServerApi or None.""" - return self.__server_api - - @property - def load_balanced(self): - """True if this Pool is configured in load balanced mode.""" - return self.__load_balanced - - -class _CancellationContext(object): - def __init__(self): - self._cancelled = False - - def cancel(self): - """Cancel this context.""" - self._cancelled = True - - @property - def cancelled(self): - """Was cancel called?""" - return self._cancelled - - -class SocketInfo(object): - """Store a socket with some metadata. - - :Parameters: - - `sock`: a raw socket object - - `pool`: a Pool instance - - `address`: the server's (host, port) - - `id`: the id of this socket in it's pool - """ - - def __init__(self, sock, pool, address, id): - self.pool_ref = weakref.ref(pool) - self.sock = sock - self.address = address - self.id = id - self.authed = set() - self.closed = False - self.last_checkin_time = time.monotonic() - self.performed_handshake = False - self.is_writable = False - self.max_wire_version = MAX_WIRE_VERSION - self.max_bson_size = MAX_BSON_SIZE - self.max_message_size = MAX_MESSAGE_SIZE - self.max_write_batch_size = MAX_WRITE_BATCH_SIZE - self.supports_sessions = False - self.hello_ok = None - self.is_mongos = False - self.op_msg_enabled = False - self.listeners = pool.opts._event_listeners - self.enabled_for_cmap = pool.enabled_for_cmap - self.compression_settings = pool.opts._compression_settings - self.compression_context = None - self.socket_checker = SocketChecker() - # Support for mechanism negotiation on the initial handshake. - self.negotiated_mechs = None - self.auth_ctx = None - - # The pool's generation changes with each reset() so we can close - # sockets created before the last reset. - self.pool_gen = pool.gen - self.generation = self.pool_gen.get_overall() - self.ready = False - self.cancel_context = None - if not pool.handshake: - # This is a Monitor connection. - self.cancel_context = _CancellationContext() - self.opts = pool.opts - self.more_to_come = False - # For load balancer support. - self.service_id = None - # When executing a transaction in load balancing mode, this flag is - # set to true to indicate that the session now owns the connection. - self.pinned_txn = False - self.pinned_cursor = False - self.active = False - self.last_timeout = self.opts.socket_timeout - self.connect_rtt = 0.0 - - def set_socket_timeout(self, timeout): - """Cache last timeout to avoid duplicate calls to sock.settimeout.""" - if timeout == self.last_timeout: - return - self.last_timeout = timeout - self.sock.settimeout(timeout) - - def apply_timeout(self, client, cmd): - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - # Reset the socket timeout unless we're performing a streaming monitor check. - if not self.more_to_come: - self.set_socket_timeout(self.opts.socket_timeout) - return None - # RTT validation. - rtt = _csot.get_rtt() - if rtt is None: - rtt = self.connect_rtt - max_time_ms = timeout - rtt - if max_time_ms < 0: - # CSOT: raise an error without running the command since we know it will time out. - errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f}" - raise ExecutionTimeout( - errmsg, 50, {"ok": 0, "errmsg": errmsg, "code": 50}, self.max_wire_version - ) - if cmd is not None: - cmd["maxTimeMS"] = int(max_time_ms * 1000) - self.set_socket_timeout(timeout) - return timeout - - def pin_txn(self): - self.pinned_txn = True - assert not self.pinned_cursor - - def pin_cursor(self): - self.pinned_cursor = True - assert not self.pinned_txn - - def unpin(self): - pool = self.pool_ref() - if pool: - pool.return_socket(self) - else: - self.close_socket(ConnectionClosedReason.STALE) - - def hello_cmd(self): - # Handshake spec requires us to use OP_MSG+hello command for the - # initial handshake in load balanced or stable API mode. - if self.opts.server_api or self.hello_ok or self.opts.load_balanced: - self.op_msg_enabled = True - return SON([(HelloCompat.CMD, 1)]) - else: - return SON([(HelloCompat.LEGACY_CMD, 1), ("helloOk", True)]) - - def hello(self): - return self._hello(None, None, None) - - def _hello(self, cluster_time, topology_version, heartbeat_frequency): - cmd = self.hello_cmd() - performing_handshake = not self.performed_handshake - awaitable = False - if performing_handshake: - self.performed_handshake = True - cmd["client"] = self.opts.metadata - if self.compression_settings: - cmd["compression"] = self.compression_settings.compressors - if self.opts.load_balanced: - cmd["loadBalanced"] = True - elif topology_version is not None: - cmd["topologyVersion"] = topology_version - cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) - awaitable = True - # If connect_timeout is None there is no timeout. - if self.opts.connect_timeout: - self.set_socket_timeout(self.opts.connect_timeout + heartbeat_frequency) - - if not performing_handshake and cluster_time is not None: - cmd["$clusterTime"] = cluster_time - - creds = self.opts._credentials - if creds: - if creds.mechanism == "DEFAULT" and creds.username: - cmd["saslSupportedMechs"] = creds.source + "." + creds.username - auth_ctx = auth._AuthContext.from_credentials(creds) - if auth_ctx: - cmd["speculativeAuthenticate"] = auth_ctx.speculate_command() - else: - auth_ctx = None - - if performing_handshake: - start = time.monotonic() - doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) - if performing_handshake: - self.connect_rtt = time.monotonic() - start - hello = Hello(doc, awaitable=awaitable) - self.is_writable = hello.is_writable - self.max_wire_version = hello.max_wire_version - self.max_bson_size = hello.max_bson_size - self.max_message_size = hello.max_message_size - self.max_write_batch_size = hello.max_write_batch_size - self.supports_sessions = hello.logical_session_timeout_minutes is not None - self.hello_ok = hello.hello_ok - self.is_repl = hello.server_type in ( - SERVER_TYPE.RSPrimary, - SERVER_TYPE.RSSecondary, - SERVER_TYPE.RSArbiter, - SERVER_TYPE.RSOther, - SERVER_TYPE.RSGhost, - ) - self.is_standalone = hello.server_type == SERVER_TYPE.Standalone - self.is_mongos = hello.server_type == SERVER_TYPE.Mongos - if performing_handshake and self.compression_settings: - ctx = self.compression_settings.get_compression_context(hello.compressors) - self.compression_context = ctx - - self.op_msg_enabled = True - if creds: - self.negotiated_mechs = hello.sasl_supported_mechs - if auth_ctx: - auth_ctx.parse_response(hello) - if auth_ctx.speculate_succeeded(): - self.auth_ctx = auth_ctx - if self.opts.load_balanced: - if not hello.service_id: - raise ConfigurationError( - "Driver attempted to initialize in load balancing mode," - " but the server does not support this mode" - ) - self.service_id = hello.service_id - self.generation = self.pool_gen.get(self.service_id) - return hello - - def _next_reply(self): - reply = self.receive_message(None) - self.more_to_come = reply.more_to_come - unpacked_docs = reply.unpack_response() - response_doc = unpacked_docs[0] - helpers._check_command_response(response_doc, self.max_wire_version) - return response_doc - - def command( - self, - dbname, - spec, - read_preference=ReadPreference.PRIMARY, - codec_options=DEFAULT_CODEC_OPTIONS, - check=True, - allowable_errors=None, - read_concern=None, - write_concern=None, - parse_write_concern_error=False, - collation=None, - session=None, - client=None, - retryable_write=False, - publish_events=True, - user_fields=None, - exhaust_allowed=False, - ): - """Execute a command or raise an error. - - :Parameters: - - `dbname`: name of the database on which to run the command - - `spec`: a command document as a dict, SON, or mapping object - - `read_preference`: a read preference - - `codec_options`: a CodecOptions instance - - `check`: raise OperationFailure if there are errors - - `allowable_errors`: errors to ignore if `check` is True - - `read_concern`: The read concern for this command. - - `write_concern`: The write concern for this command. - - `parse_write_concern_error`: Whether to parse the - ``writeConcernError`` field in the command response. - - `collation`: The collation for this command. - - `session`: optional ClientSession instance. - - `client`: optional MongoClient for gossipping $clusterTime. - - `retryable_write`: True if this command is a retryable write. - - `publish_events`: Should we publish events for this command? - - `user_fields` (optional): Response fields that should be decoded - using the TypeDecoders from codec_options, passed to - bson._decode_all_selective. - """ - self.validate_session(client, session) - session = _validate_session_write_concern(session, write_concern) - - # Ensure command name remains in first place. - if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] - spec = SON(spec) - - if not (write_concern is None or write_concern.acknowledged or collation is None): - raise ConfigurationError("Collation is unsupported for unacknowledged writes.") - - self.add_server_api(spec) - if session: - session._apply_to(spec, retryable_write, read_preference, self) - self.send_cluster_time(spec, session, client) - listeners = self.listeners if publish_events else None - unacknowledged = write_concern and not write_concern.acknowledged - if self.op_msg_enabled: - self._raise_if_not_writable(unacknowledged) - try: - return command( - self, - dbname, - spec, - self.is_mongos, - read_preference, - codec_options, - session, - client, - check, - allowable_errors, - self.address, - listeners, - self.max_bson_size, - read_concern, - parse_write_concern_error=parse_write_concern_error, - collation=collation, - compression_ctx=self.compression_context, - use_op_msg=self.op_msg_enabled, - unacknowledged=unacknowledged, - user_fields=user_fields, - exhaust_allowed=exhaust_allowed, - write_concern=write_concern, - ) - except (OperationFailure, NotPrimaryError): - raise - # Catch socket.error, KeyboardInterrupt, etc. and close ourselves. - except BaseException as error: - self._raise_connection_failure(error) - - def send_message(self, message, max_doc_size): - """Send a raw BSON message or raise ConnectionFailure. - - If a network exception is raised, the socket is closed. - """ - if self.max_bson_size is not None and max_doc_size > self.max_bson_size: - raise DocumentTooLarge( - "BSON document too large (%d bytes) - the connected server " - "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) - ) - - try: - self.sock.sendall(message) - except BaseException as error: - self._raise_connection_failure(error) - - def receive_message(self, request_id): - """Receive a raw BSON message or raise ConnectionFailure. - - If any exception is raised, the socket is closed. - """ - try: - return receive_message(self, request_id, self.max_message_size) - except BaseException as error: - self._raise_connection_failure(error) - - def _raise_if_not_writable(self, unacknowledged): - """Raise NotPrimaryError on unacknowledged write if this socket is not - writable. - """ - if unacknowledged and not self.is_writable: - # Write won't succeed, bail as if we'd received a not primary error. - raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) - - def unack_write(self, msg, max_doc_size): - """Send unack OP_MSG. - - Can raise ConnectionFailure or InvalidDocument. - - :Parameters: - - `msg`: bytes, an OP_MSG message. - - `max_doc_size`: size in bytes of the largest document in `msg`. - """ - self._raise_if_not_writable(True) - self.send_message(msg, max_doc_size) - - def write_command(self, request_id, msg, codec_options): - """Send "insert" etc. command, returning response as a dict. - - Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `request_id`: an int. - - `msg`: bytes, the command message. - """ - self.send_message(msg, 0) - reply = self.receive_message(request_id) - result = reply.command_response(codec_options) - - # Raises NotPrimaryError or OperationFailure. - helpers._check_command_response(result, self.max_wire_version) - return result - - def authenticate(self): - """Authenticate to the server if needed. - - Can raise ConnectionFailure or OperationFailure. - """ - # CMAP spec says to publish the ready event only after authenticating - # the connection. - if not self.ready: - creds = self.opts._credentials - if creds: - auth.authenticate(creds, self) - self.ready = True - if self.enabled_for_cmap: - self.listeners.publish_connection_ready(self.address, self.id) - - def validate_session(self, client, session): - """Validate this session before use with client. - - Raises error if the client is not the one that created the session. - """ - if session: - if session._client is not client: - raise InvalidOperation("Can only use session with the MongoClient that started it") - - def close_socket(self, reason): - """Close this connection with a reason.""" - if self.closed: - return - self._close_socket() - if reason and self.enabled_for_cmap: - self.listeners.publish_connection_closed(self.address, self.id, reason) - - def _close_socket(self): - """Close this connection.""" - if self.closed: - return - self.closed = True - if self.cancel_context: - self.cancel_context.cancel() - # Note: We catch exceptions to avoid spurious errors on interpreter - # shutdown. - try: - self.sock.close() - except Exception: - pass - - def socket_closed(self): - """Return True if we know socket has been closed, False otherwise.""" - return self.socket_checker.socket_closed(self.sock) - - def send_cluster_time(self, command, session, client): - """Add $clusterTime.""" - if client: - client._send_cluster_time(command, session) - - def add_server_api(self, command): - """Add server_api parameters.""" - if self.opts.server_api: - _add_to_command(command, self.opts.server_api) - - def update_last_checkin_time(self): - self.last_checkin_time = time.monotonic() - - def update_is_writable(self, is_writable): - self.is_writable = is_writable - - def idle_time_seconds(self): - """Seconds since this socket was last checked into its pool.""" - return time.monotonic() - self.last_checkin_time - - def _raise_connection_failure(self, error): - # Catch *all* exceptions from socket methods and close the socket. In - # regular Python, socket operations only raise socket.error, even if - # the underlying cause was a Ctrl-C: a signal raised during socket.recv - # is expressed as an EINTR error from poll. See internal_select_ex() in - # socketmodule.c. All error codes from poll become socket.error at - # first. Eventually in PyEval_EvalFrameEx the interpreter checks for - # signals and throws KeyboardInterrupt into the current frame on the - # main thread. - # - # But in Gevent and Eventlet, the polling mechanism (epoll, kqueue, - # ...) is called in Python code, which experiences the signal as a - # KeyboardInterrupt from the start, rather than as an initial - # socket.error, so we catch that, close the socket, and reraise it. - # - # The connection closed event will be emitted later in return_socket. - if self.ready: - reason = None - else: - reason = ConnectionClosedReason.ERROR - self.close_socket(reason) - # SSLError from PyOpenSSL inherits directly from Exception. - if isinstance(error, (IOError, OSError, SSLError)): - _raise_connection_failure(self.address, error) - else: - raise - - def __eq__(self, other): - return self.sock == other.sock - - def __ne__(self, other): - return not self == other - - def __hash__(self): - return hash(self.sock) - - def __repr__(self): - return "SocketInfo(%s)%s at %s" % ( - repr(self.sock), - self.closed and " CLOSED" or "", - id(self), - ) - - -def _create_connection(address, options): - """Given (host, port) and PoolOptions, connect and return a socket object. - - Can raise socket.error. - - This is a modified version of create_connection from CPython >= 2.7. - """ - host, port = address - - # Check if dealing with a unix domain socket - if host.endswith(".sock"): - if not hasattr(socket, "AF_UNIX"): - raise ConnectionFailure("UNIX-sockets are not supported on this system") - sock = socket.socket(socket.AF_UNIX) - # SOCK_CLOEXEC not supported for Unix sockets. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.connect(host) - return sock - except socket.error: - sock.close() - raise - - # Don't try IPv6 if we don't support it. Also skip it if host - # is 'localhost' (::1 is fine). Avoids slow connect issues - # like PYTHON-356. - family = socket.AF_INET - if socket.has_ipv6 and host != "localhost": - family = socket.AF_UNSPEC - - err = None - for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM): - af, socktype, proto, dummy, sa = res - # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited - # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 - # all file descriptors are created non-inheritable. See PEP 446. - try: - sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) - except socket.error: - # Can SOCK_CLOEXEC be defined even if the kernel doesn't support - # it? - sock = socket.socket(af, socktype, proto) - # Fallback when SOCK_CLOEXEC isn't available. - _set_non_inheritable_non_atomic(sock.fileno()) - try: - sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - # CSOT: apply timeout to socket connect. - timeout = _csot.remaining() - if timeout is None: - timeout = options.connect_timeout - elif timeout <= 0: - raise socket.timeout("timed out") - sock.settimeout(timeout) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) - _set_keepalive_times(sock) - sock.connect(sa) - return sock - except socket.error as e: - err = e - sock.close() - - if err is not None: - raise err - else: - # This likely means we tried to connect to an IPv6 only - # host with an OS/kernel or Python interpreter that doesn't - # support IPv6. The test case is Jython2.5.1 which doesn't - # support IPv6 at all. - raise socket.error("getaddrinfo failed") - - -def _configured_socket(address, options): - """Given (host, port) and PoolOptions, return a configured socket. - - Can raise socket.error, ConnectionFailure, or _CertificateError. - - Sets socket's SSL and timeout options. - """ - sock = _create_connection(address, options) - ssl_context = options._ssl_context - - if ssl_context is not None: - host = address[0] - try: - # We have to pass hostname / ip address to wrap_socket - # to use SSLContext.check_hostname. - if HAS_SNI: - sock = ssl_context.wrap_socket(sock, server_hostname=host) - else: - sock = ssl_context.wrap_socket(sock) - except _CertificateError: - sock.close() - # Raise _CertificateError directly like we do after match_hostname - # below. - raise - except (IOError, OSError, SSLError) as exc: # noqa: B014 - sock.close() - # We raise AutoReconnect for transient and permanent SSL handshake - # failures alike. Permanent handshake failures, like protocol - # mismatch, will be turned into ServerSelectionTimeoutErrors later. - _raise_connection_failure(address, exc, "SSL handshake failed: ") - if ( - ssl_context.verify_mode - and not ssl_context.check_hostname - and not options.tls_allow_invalid_hostnames - ): - try: - ssl.match_hostname(sock.getpeercert(), hostname=host) - except _CertificateError: - sock.close() - raise - - sock.settimeout(options.socket_timeout) - return sock - - -class _PoolClosedError(PyMongoError): - """Internal error raised when a thread tries to get a connection from a - closed pool. - """ - - pass - - -class _PoolGeneration(object): - def __init__(self): - # Maps service_id to generation. - self._generations = collections.defaultdict(int) - # Overall pool generation. - self._generation = 0 - - def get(self, service_id): - """Get the generation for the given service_id.""" - if service_id is None: - return self._generation - return self._generations[service_id] - - def get_overall(self): - """Get the Pool's overall generation.""" - return self._generation - - def inc(self, service_id): - """Increment the generation for the given service_id.""" - self._generation += 1 - if service_id is None: - for service_id in self._generations: - self._generations[service_id] += 1 - else: - self._generations[service_id] += 1 - - def stale(self, gen, service_id): - """Return if the given generation for a given service_id is stale.""" - return gen != self.get(service_id) - - -class PoolState(object): - PAUSED = 1 - READY = 2 - CLOSED = 3 - - -# Do *not* explicitly inherit from object or Jython won't call __del__ -# http://bugs.jython.org/issue1057 -class Pool: - def __init__(self, address, options, handshake=True): - """ - :Parameters: - - `address`: a (hostname, port) tuple - - `options`: a PoolOptions instance - - `handshake`: whether to call hello for each new SocketInfo - """ - if options.pause_enabled: - self.state = PoolState.PAUSED - else: - self.state = PoolState.READY - # Check a socket's health with socket_closed() every once in a while. - # Can override for testing: 0 to always check, None to never check. - self._check_interval_seconds = 1 - # LIFO pool. Sockets are ordered on idle time. Sockets claimed - # and returned to pool from the left side. Stale sockets removed - # from the right side. - self.sockets: collections.deque = collections.deque() - self.lock = threading.Lock() - self.active_sockets = 0 - # Monotonically increasing connection ID required for CMAP Events. - self.next_connection_id = 1 - # Track whether the sockets in this pool are writeable or not. - self.is_writable = None - - # Keep track of resets, so we notice sockets created before the most - # recent reset and close them. - # self.generation = 0 - self.gen = _PoolGeneration() - self.pid = os.getpid() - self.address = address - self.opts = options - self.handshake = handshake - # Don't publish events in Monitor pools. - self.enabled_for_cmap = ( - self.handshake - and self.opts._event_listeners is not None - and self.opts._event_listeners.enabled_for_cmap - ) - - # The first portion of the wait queue. - # Enforces: maxPoolSize - # Also used for: clearing the wait queue - self.size_cond = threading.Condition(self.lock) - self.requests = 0 - self.max_pool_size = self.opts.max_pool_size - if not self.max_pool_size: - self.max_pool_size = float("inf") - # The second portion of the wait queue. - # Enforces: maxConnecting - # Also used for: clearing the wait queue - self._max_connecting_cond = threading.Condition(self.lock) - self._max_connecting = self.opts.max_connecting - self._pending = 0 - if self.enabled_for_cmap: - self.opts._event_listeners.publish_pool_created( - self.address, self.opts.non_default_options - ) - # Similar to active_sockets but includes threads in the wait queue. - self.operation_count = 0 - # Retain references to pinned connections to prevent the CPython GC - # from thinking that a cursor's pinned connection can be GC'd when the - # cursor is GC'd (see PYTHON-2751). - self.__pinned_sockets = set() - self.ncursors = 0 - self.ntxns = 0 - - def ready(self): - # Take the lock to avoid the race condition described in PYTHON-2699. - with self.lock: - if self.state != PoolState.READY: - self.state = PoolState.READY - if self.enabled_for_cmap: - self.opts._event_listeners.publish_pool_ready(self.address) - - @property - def closed(self): - return self.state == PoolState.CLOSED - - def _reset(self, close, pause=True, service_id=None): - old_state = self.state - with self.size_cond: - if self.closed: - return - if self.opts.pause_enabled and pause and not self.opts.load_balanced: - old_state, self.state = self.state, PoolState.PAUSED - self.gen.inc(service_id) - newpid = os.getpid() - if self.pid != newpid: - self.pid = newpid - self.active_sockets = 0 - self.operation_count = 0 - if service_id is None: - sockets, self.sockets = self.sockets, collections.deque() - else: - discard: collections.deque = collections.deque() - keep: collections.deque = collections.deque() - for sock_info in self.sockets: - if sock_info.service_id == service_id: - discard.append(sock_info) - else: - keep.append(sock_info) - sockets = discard - self.sockets = keep - - if close: - self.state = PoolState.CLOSED - # Clear the wait queue - self._max_connecting_cond.notify_all() - self.size_cond.notify_all() - - listeners = self.opts._event_listeners - # CMAP spec says that close() MUST close sockets before publishing the - # PoolClosedEvent but that reset() SHOULD close sockets *after* - # publishing the PoolClearedEvent. - if close: - for sock_info in sockets: - sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - if self.enabled_for_cmap: - listeners.publish_pool_closed(self.address) - else: - if old_state != PoolState.PAUSED and self.enabled_for_cmap: - listeners.publish_pool_cleared(self.address, service_id=service_id) - for sock_info in sockets: - sock_info.close_socket(ConnectionClosedReason.STALE) - - def update_is_writable(self, is_writable): - """Updates the is_writable attribute on all sockets currently in the - Pool. - """ - self.is_writable = is_writable - with self.lock: - for _socket in self.sockets: - _socket.update_is_writable(self.is_writable) - - def reset(self, service_id=None): - self._reset(close=False, service_id=service_id) - - def reset_without_pause(self): - self._reset(close=False, pause=False) - - def close(self): - self._reset(close=True) - - def stale_generation(self, gen, service_id): - return self.gen.stale(gen, service_id) - - def remove_stale_sockets(self, reference_generation): - """Removes stale sockets then adds new ones if pool is too small and - has not been reset. The `reference_generation` argument specifies the - `generation` at the point in time this operation was requested on the - pool. - """ - # Take the lock to avoid the race condition described in PYTHON-2699. - with self.lock: - if self.state != PoolState.READY: - return - - if self.opts.max_idle_time_seconds is not None: - with self.lock: - while ( - self.sockets - and self.sockets[-1].idle_time_seconds() > self.opts.max_idle_time_seconds - ): - sock_info = self.sockets.pop() - sock_info.close_socket(ConnectionClosedReason.IDLE) - - while True: - with self.size_cond: - # There are enough sockets in the pool. - if len(self.sockets) + self.active_sockets >= self.opts.min_pool_size: - return - if self.requests >= self.opts.min_pool_size: - return - self.requests += 1 - incremented = False - try: - with self._max_connecting_cond: - # If maxConnecting connections are already being created - # by this pool then try again later instead of waiting. - if self._pending >= self._max_connecting: - return - self._pending += 1 - incremented = True - sock_info = self.connect() - with self.lock: - # Close connection and return if the pool was reset during - # socket creation or while acquiring the pool lock. - if self.gen.get_overall() != reference_generation: - sock_info.close_socket(ConnectionClosedReason.STALE) - return - self.sockets.appendleft(sock_info) - finally: - if incremented: - # Notify after adding the socket to the pool. - with self._max_connecting_cond: - self._pending -= 1 - self._max_connecting_cond.notify() - - with self.size_cond: - self.requests -= 1 - self.size_cond.notify() - - def connect(self, handler=None): - """Connect to Mongo and return a new SocketInfo. - - Can raise ConnectionFailure. - - Note that the pool does not keep a reference to the socket -- you - must call return_socket() when you're done with it. - """ - with self.lock: - conn_id = self.next_connection_id - self.next_connection_id += 1 - - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - listeners.publish_connection_created(self.address, conn_id) - - try: - sock = _configured_socket(self.address, self.opts) - except BaseException as error: - if self.enabled_for_cmap: - listeners.publish_connection_closed( - self.address, conn_id, ConnectionClosedReason.ERROR - ) - - if isinstance(error, (IOError, OSError, SSLError)): - _raise_connection_failure(self.address, error) - - raise - - sock_info = SocketInfo(sock, self, self.address, conn_id) - try: - if self.handshake: - sock_info.hello() - self.is_writable = sock_info.is_writable - if handler: - handler.contribute_socket(sock_info, completed_handshake=False) - - sock_info.authenticate() - except BaseException: - sock_info.close_socket(ConnectionClosedReason.ERROR) - raise - - return sock_info - - @contextlib.contextmanager - def get_socket(self, handler=None): - """Get a socket from the pool. Use with a "with" statement. - - Returns a :class:`SocketInfo` object wrapping a connected - :class:`socket.socket`. - - This method should always be used in a with-statement:: - - with pool.get_socket() as socket_info: - socket_info.send_message(msg) - data = socket_info.receive_message(op_code, request_id) - - Can raise ConnectionFailure or OperationFailure. - - :Parameters: - - `handler` (optional): A _MongoClientErrorHandler. - """ - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - listeners.publish_connection_check_out_started(self.address) - - sock_info = self._get_socket(handler=handler) - - if self.enabled_for_cmap: - listeners.publish_connection_checked_out(self.address, sock_info.id) - try: - yield sock_info - except BaseException: - # Exception in caller. Ensure the connection gets returned. - # Note that when pinned is True, the session owns the - # connection and it is responsible for checking the connection - # back into the pool. - pinned = sock_info.pinned_txn or sock_info.pinned_cursor - if handler: - # Perform SDAM error handling rules while the connection is - # still checked out. - exc_type, exc_val, _ = sys.exc_info() - handler.handle(exc_type, exc_val) - if not pinned and sock_info.active: - self.return_socket(sock_info) - raise - if sock_info.pinned_txn: - with self.lock: - self.__pinned_sockets.add(sock_info) - self.ntxns += 1 - elif sock_info.pinned_cursor: - with self.lock: - self.__pinned_sockets.add(sock_info) - self.ncursors += 1 - elif sock_info.active: - self.return_socket(sock_info) - - def _raise_if_not_ready(self, emit_event): - if self.state != PoolState.READY: - if self.enabled_for_cmap and emit_event: - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR - ) - _raise_connection_failure(self.address, AutoReconnect("connection pool paused")) - - def _get_socket(self, handler=None): - """Get or create a SocketInfo. Can raise ConnectionFailure.""" - # We use the pid here to avoid issues with fork / multiprocessing. - # See test.test_client:TestClient.test_fork for an example of - # what could go wrong otherwise - if self.pid != os.getpid(): - self.reset_without_pause() - - if self.closed: - if self.enabled_for_cmap: - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.POOL_CLOSED - ) - raise _PoolClosedError( - "Attempted to check out a connection from closed connection pool" - ) - - with self.lock: - self.operation_count += 1 - - # Get a free socket or create one. - if _csot.get_timeout(): - deadline = _csot.get_deadline() - elif self.opts.wait_queue_timeout: - deadline = time.monotonic() + self.opts.wait_queue_timeout - else: - deadline = None - - with self.size_cond: - self._raise_if_not_ready(emit_event=True) - while not (self.requests < self.max_pool_size): - if not _cond_wait(self.size_cond, deadline): - # Timed out, notify the next thread to ensure a - # timeout doesn't consume the condition. - if self.requests < self.max_pool_size: - self.size_cond.notify() - self._raise_wait_queue_timeout() - self._raise_if_not_ready(emit_event=True) - self.requests += 1 - - # We've now acquired the semaphore and must release it on error. - sock_info = None - incremented = False - emitted_event = False - try: - with self.lock: - self.active_sockets += 1 - incremented = True - - while sock_info is None: - # CMAP: we MUST wait for either maxConnecting OR for a socket - # to be checked back into the pool. - with self._max_connecting_cond: - self._raise_if_not_ready(emit_event=False) - while not (self.sockets or self._pending < self._max_connecting): - if not _cond_wait(self._max_connecting_cond, deadline): - # Timed out, notify the next thread to ensure a - # timeout doesn't consume the condition. - if self.sockets or self._pending < self._max_connecting: - self._max_connecting_cond.notify() - emitted_event = True - self._raise_wait_queue_timeout() - self._raise_if_not_ready(emit_event=False) - - try: - sock_info = self.sockets.popleft() - except IndexError: - self._pending += 1 - if sock_info: # We got a socket from the pool - if self._perished(sock_info): - sock_info = None - continue - else: # We need to create a new connection - try: - sock_info = self.connect(handler=handler) - finally: - with self._max_connecting_cond: - self._pending -= 1 - self._max_connecting_cond.notify() - except BaseException: - if sock_info: - # We checked out a socket but authentication failed. - sock_info.close_socket(ConnectionClosedReason.ERROR) - with self.size_cond: - self.requests -= 1 - if incremented: - self.active_sockets -= 1 - self.size_cond.notify() - - if self.enabled_for_cmap and not emitted_event: - self.opts._event_listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.CONN_ERROR - ) - raise - - sock_info.active = True - return sock_info - - def return_socket(self, sock_info): - """Return the socket to the pool, or if it's closed discard it. - - :Parameters: - - `sock_info`: The socket to check into the pool. - """ - txn = sock_info.pinned_txn - cursor = sock_info.pinned_cursor - sock_info.active = False - sock_info.pinned_txn = False - sock_info.pinned_cursor = False - self.__pinned_sockets.discard(sock_info) - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - listeners.publish_connection_checked_in(self.address, sock_info.id) - if self.pid != os.getpid(): - self.reset_without_pause() - else: - if self.closed: - sock_info.close_socket(ConnectionClosedReason.POOL_CLOSED) - elif sock_info.closed: - # CMAP requires the closed event be emitted after the check in. - if self.enabled_for_cmap: - listeners.publish_connection_closed( - self.address, sock_info.id, ConnectionClosedReason.ERROR - ) - else: - with self.lock: - # Hold the lock to ensure this section does not race with - # Pool.reset(). - if self.stale_generation(sock_info.generation, sock_info.service_id): - sock_info.close_socket(ConnectionClosedReason.STALE) - else: - sock_info.update_last_checkin_time() - sock_info.update_is_writable(self.is_writable) - self.sockets.appendleft(sock_info) - # Notify any threads waiting to create a connection. - self._max_connecting_cond.notify() - - with self.size_cond: - if txn: - self.ntxns -= 1 - elif cursor: - self.ncursors -= 1 - self.requests -= 1 - self.active_sockets -= 1 - self.operation_count -= 1 - self.size_cond.notify() - - def _perished(self, sock_info): - """Return True and close the connection if it is "perished". - - This side-effecty function checks if this socket has been idle for - for longer than the max idle time, or if the socket has been closed by - some external network error, or if the socket's generation is outdated. - - Checking sockets lets us avoid seeing *some* - :class:`~pymongo.errors.AutoReconnect` exceptions on server - hiccups, etc. We only check if the socket was closed by an external - error if it has been > 1 second since the socket was checked into the - pool, to keep performance reasonable - we can't avoid AutoReconnects - completely anyway. - """ - idle_time_seconds = sock_info.idle_time_seconds() - # If socket is idle, open a new one. - if ( - self.opts.max_idle_time_seconds is not None - and idle_time_seconds > self.opts.max_idle_time_seconds - ): - sock_info.close_socket(ConnectionClosedReason.IDLE) - return True - - if self._check_interval_seconds is not None and ( - 0 == self._check_interval_seconds or idle_time_seconds > self._check_interval_seconds - ): - if sock_info.socket_closed(): - sock_info.close_socket(ConnectionClosedReason.ERROR) - return True - - if self.stale_generation(sock_info.generation, sock_info.service_id): - sock_info.close_socket(ConnectionClosedReason.STALE) - return True - - return False - - def _raise_wait_queue_timeout(self) -> NoReturn: - listeners = self.opts._event_listeners - if self.enabled_for_cmap: - listeners.publish_connection_check_out_failed( - self.address, ConnectionCheckOutFailedReason.TIMEOUT - ) - timeout = _csot.get_timeout() or self.opts.wait_queue_timeout - if self.opts.load_balanced: - other_ops = self.active_sockets - self.ncursors - self.ntxns - raise WaitQueueTimeoutError( - "Timeout waiting for connection from the connection pool. " - "maxPoolSize: %s, connections in use by cursors: %s, " - "connections in use by transactions: %s, connections in use " - "by other operations: %s, timeout: %s" - % ( - self.opts.max_pool_size, - self.ncursors, - self.ntxns, - other_ops, - timeout, - ) - ) - raise WaitQueueTimeoutError( - "Timed out while checking out a connection from connection pool. " - "maxPoolSize: %s, timeout: %s" % (self.opts.max_pool_size, timeout) - ) - - def __del__(self): - # Avoid ResourceWarnings in Python 3 - # Close all sockets without calling reset() or close() because it is - # not safe to acquire a lock in __del__. - for sock_info in self.sockets: - sock_info.close_socket(None) +__doc__ = original_doc +__all__ = ["PoolOptions"] # noqa: F405 diff --git a/pymongo/pool_options.py b/pymongo/pool_options.py new file mode 100644 index 0000000000..a5d76007b0 --- /dev/null +++ b/pymongo/pool_options.py @@ -0,0 +1,537 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Pool options for AsyncMongoClient/MongoClient. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import copy +import os +import platform +import sys +from pathlib import Path +from typing import TYPE_CHECKING, Any, MutableMapping, Optional + +import bson +from pymongo import __version__ +from pymongo.common import ( + MAX_CONNECTING, + MAX_IDLE_TIME_SEC, + MAX_POOL_SIZE, + MIN_POOL_SIZE, + WAIT_QUEUE_TIMEOUT, + has_c, +) + +if TYPE_CHECKING: + from pymongo.auth_shared import MongoCredential + from pymongo.compression_support import CompressionSettings + from pymongo.driver_info import DriverInfo + from pymongo.monitoring import _EventListeners + from pymongo.pyopenssl_context import SSLContext + from pymongo.server_api import ServerApi + + +_METADATA: dict[str, Any] = {"driver": {"name": "PyMongo", "version": __version__}} + +if sys.platform.startswith("linux"): + # platform.linux_distribution was deprecated in Python 3.5 + # and removed in Python 3.8. Starting in Python 3.5 it + # raises DeprecationWarning + # DeprecationWarning: dist() and linux_distribution() functions are deprecated in Python 3.5 + _name = platform.system() + _METADATA["os"] = { + "type": _name, + "name": _name, + "architecture": platform.machine(), + # Kernel version (e.g. 4.4.0-17-generic). + "version": platform.release(), + } +elif sys.platform == "darwin": + _METADATA["os"] = { + "type": platform.system(), + "name": platform.system(), + "architecture": platform.machine(), + # (mac|i|tv)OS(X) version (e.g. 10.11.6) instead of darwin + # kernel version. + "version": platform.mac_ver()[0], + } +elif sys.platform == "win32": + _ver = sys.getwindowsversion() + _METADATA["os"] = { + "type": "Windows", + "name": "Windows", + # Avoid using platform calls, see PYTHON-4455. + "architecture": os.environ.get("PROCESSOR_ARCHITECTURE") or platform.machine(), + # Windows patch level (e.g. 10.0.17763-SP0). + "version": ".".join(map(str, _ver[:3])) + f"-SP{_ver[-1] or '0'}", + } +elif sys.platform.startswith("java"): + _name, _ver, _arch = platform.java_ver()[-1] + _METADATA["os"] = { + # Linux, Windows 7, Mac OS X, etc. + "type": _name, + "name": _name, + # x86, x86_64, AMD64, etc. + "architecture": _arch, + # Linux kernel version, OSX version, etc. + "version": _ver, + } +else: + # Get potential alias (e.g. SunOS 5.11 becomes Solaris 2.11) + _aliased = platform.system_alias(platform.system(), platform.release(), platform.version()) + _METADATA["os"] = { + "type": platform.system(), + "name": " ".join([part for part in _aliased[:2] if part]), + "architecture": platform.machine(), + "version": _aliased[2], + } + +if platform.python_implementation().startswith("PyPy"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.pypy_version_info)), # type: ignore + "(Python %s)" % ".".join(map(str, sys.version_info)), + ) + ) +elif sys.platform.startswith("java"): + _METADATA["platform"] = " ".join( + ( + platform.python_implementation(), + ".".join(map(str, sys.version_info)), + "(%s)" % " ".join((platform.system(), platform.release())), + ) + ) +else: + _METADATA["platform"] = " ".join( + (platform.python_implementation(), ".".join(map(str, sys.version_info))) + ) + +DOCKER_ENV_PATH = "/.dockerenv" +ENV_VAR_K8S = "KUBERNETES_SERVICE_HOST" + +RUNTIME_NAME_DOCKER = "docker" +ORCHESTRATOR_NAME_K8S = "kubernetes" + + +def get_container_env_info() -> dict[str, str]: + """Returns the runtime and orchestrator of a container. + If neither value is present, the metadata client.env.container field will be omitted.""" + container = {} + + if Path(DOCKER_ENV_PATH).exists(): + container["runtime"] = RUNTIME_NAME_DOCKER + if os.getenv(ENV_VAR_K8S): + container["orchestrator"] = ORCHESTRATOR_NAME_K8S + + return container + + +def _is_lambda() -> bool: + if os.getenv("AWS_LAMBDA_RUNTIME_API"): + return True + env = os.getenv("AWS_EXECUTION_ENV") + if env: + return env.startswith("AWS_Lambda_") + return False + + +def _is_azure_func() -> bool: + return bool(os.getenv("FUNCTIONS_WORKER_RUNTIME")) + + +def _is_gcp_func() -> bool: + return bool(os.getenv("K_SERVICE") or os.getenv("FUNCTION_NAME")) + + +def _is_vercel() -> bool: + return bool(os.getenv("VERCEL")) + + +def _is_faas() -> bool: + return _is_lambda() or _is_azure_func() or _is_gcp_func() or _is_vercel() + + +def _getenv_int(key: str) -> Optional[int]: + """Like os.getenv but returns an int, or None if the value is missing/malformed.""" + val = os.getenv(key) + if not val: + return None + try: + return int(val) + except ValueError: + return None + + +def _metadata_env() -> dict[str, Any]: + env: dict[str, Any] = {} + container = get_container_env_info() + if container: + env["container"] = container + # Skip if multiple (or no) envs are matched. + if (_is_lambda(), _is_azure_func(), _is_gcp_func(), _is_vercel()).count(True) != 1: + return env + if _is_lambda(): + env["name"] = "aws.lambda" + region = os.getenv("AWS_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("AWS_LAMBDA_FUNCTION_MEMORY_SIZE") + if memory_mb is not None: + env["memory_mb"] = memory_mb + elif _is_azure_func(): + env["name"] = "azure.func" + elif _is_gcp_func(): + env["name"] = "gcp.func" + region = os.getenv("FUNCTION_REGION") + if region: + env["region"] = region + memory_mb = _getenv_int("FUNCTION_MEMORY_MB") + if memory_mb is not None: + env["memory_mb"] = memory_mb + timeout_sec = _getenv_int("FUNCTION_TIMEOUT_SEC") + if timeout_sec is not None: + env["timeout_sec"] = timeout_sec + elif _is_vercel(): + env["name"] = "vercel" + region = os.getenv("VERCEL_REGION") + if region: + env["region"] = region + return env + + +_MAX_METADATA_SIZE = 512 + + +# See: https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#limitations +def _truncate_metadata(metadata: MutableMapping[str, Any]) -> None: + """Perform metadata truncation.""" + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 1. Omit fields from env except env.name. + env_name = metadata.get("env", {}).get("name") + if env_name: + metadata["env"] = {"name": env_name} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 2. Omit fields from os except os.type. + os_type = metadata.get("os", {}).get("type") + if os_type: + metadata["os"] = {"type": os_type} + if len(bson.encode(metadata)) <= _MAX_METADATA_SIZE: + return + # 3. Omit the env document entirely. + metadata.pop("env", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 4. Truncate platform. + overflow = encoded_size - _MAX_METADATA_SIZE + plat = metadata.get("platform", "") + if plat: + plat = plat[:-overflow] + if plat: + metadata["platform"] = plat + else: + metadata.pop("platform", None) + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # 5. Truncate driver info. + overflow = encoded_size - _MAX_METADATA_SIZE + driver = metadata.get("driver", {}) + if driver: + # Truncate driver version. + driver_version = driver.get("version")[:-overflow] + if len(driver_version) >= len(_METADATA["driver"]["version"]): + metadata["driver"]["version"] = driver_version + else: + metadata["driver"]["version"] = _METADATA["driver"]["version"] + encoded_size = len(bson.encode(metadata)) + if encoded_size <= _MAX_METADATA_SIZE: + return + # Truncate driver name. + overflow = encoded_size - _MAX_METADATA_SIZE + driver_name = driver.get("name")[:-overflow] + if len(driver_name) >= len(_METADATA["driver"]["name"]): + metadata["driver"]["name"] = driver_name + else: + metadata["driver"]["name"] = _METADATA["driver"]["name"] + + +# If the first getaddrinfo call of this interpreter's life is on a thread, +# while the main thread holds the import lock, getaddrinfo deadlocks trying +# to import the IDNA codec. Import it here, where presumably we're on the +# main thread, to avoid the deadlock. See PYTHON-607. +"foo".encode("idna") + + +class PoolOptions: + """Read only connection pool options for an AsyncMongoClient/MongoClient. + + Should not be instantiated directly by application developers. Access + a client's pool options via + :attr:`~pymongo.client_options.ClientOptions.pool_options` instead:: + + pool_opts = client.options.pool_options + pool_opts.max_pool_size + pool_opts.min_pool_size + + """ + + __slots__ = ( + "__max_pool_size", + "__min_pool_size", + "__max_idle_time_seconds", + "__connect_timeout", + "__socket_timeout", + "__wait_queue_timeout", + "__ssl_context", + "__tls_allow_invalid_hostnames", + "__event_listeners", + "__appname", + "__driver", + "__metadata", + "__compression_settings", + "__max_connecting", + "__pause_enabled", + "__server_api", + "__load_balanced", + "__credentials", + ) + + def __init__( + self, + max_pool_size: int = MAX_POOL_SIZE, + min_pool_size: int = MIN_POOL_SIZE, + max_idle_time_seconds: Optional[int] = MAX_IDLE_TIME_SEC, + connect_timeout: Optional[float] = None, + socket_timeout: Optional[float] = None, + wait_queue_timeout: Optional[int] = WAIT_QUEUE_TIMEOUT, + ssl_context: Optional[SSLContext] = None, + tls_allow_invalid_hostnames: bool = False, + event_listeners: Optional[_EventListeners] = None, + appname: Optional[str] = None, + driver: Optional[DriverInfo] = None, + compression_settings: Optional[CompressionSettings] = None, + max_connecting: int = MAX_CONNECTING, + pause_enabled: bool = True, + server_api: Optional[ServerApi] = None, + load_balanced: Optional[bool] = None, + credentials: Optional[MongoCredential] = None, + is_sync: Optional[bool] = True, + ): + self.__max_pool_size = max_pool_size + self.__min_pool_size = min_pool_size + self.__max_idle_time_seconds = max_idle_time_seconds + self.__connect_timeout = connect_timeout + self.__socket_timeout = socket_timeout + self.__wait_queue_timeout = wait_queue_timeout + self.__ssl_context = ssl_context + self.__tls_allow_invalid_hostnames = tls_allow_invalid_hostnames + self.__event_listeners = event_listeners + self.__appname = appname + self.__driver = driver + self.__compression_settings = compression_settings + self.__max_connecting = max_connecting + self.__pause_enabled = pause_enabled + self.__server_api = server_api + self.__load_balanced = load_balanced + self.__credentials = credentials + self.__metadata = copy.deepcopy(_METADATA) + + if appname: + self.__metadata["application"] = {"name": appname} + + # Combine the "driver" AsyncMongoClient option with PyMongo's info, like: + # { + # 'driver': { + # 'name': 'PyMongo|MyDriver', + # 'version': '4.2.0|1.2.3', + # }, + # 'platform': 'CPython 3.8.0|MyPlatform' + # } + if has_c(): + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "c", + ) + if not is_sync: + self.__metadata["driver"]["name"] = "{}|{}".format( + self.__metadata["driver"]["name"], + "async", + ) + if driver: + self._update_metadata(driver) + + env = _metadata_env() + if env: + self.__metadata["env"] = env + + _truncate_metadata(self.__metadata) + + def _update_metadata(self, driver: DriverInfo) -> None: + """Updates the client's metadata""" + if driver.name and driver.name.lower() in self.__metadata["driver"]["name"].lower().split( + "|" + ): + return + + metadata = copy.deepcopy(self.__metadata) + + if driver.name: + metadata["driver"]["name"] = "{}|{}".format( + metadata["driver"]["name"], + driver.name, + ) + if driver.version: + metadata["driver"]["version"] = "{}|{}".format( + metadata["driver"]["version"], + driver.version, + ) + if driver.platform: + metadata["platform"] = "{}|{}".format(metadata["platform"], driver.platform) + + self.__metadata = metadata + + @property + def _credentials(self) -> Optional[MongoCredential]: + """A :class:`~pymongo.auth.MongoCredentials` instance or None.""" + return self.__credentials + + @property + def non_default_options(self) -> dict[str, Any]: + """The non-default options this pool was created with. + + Added for CMAP's :class:`PoolCreatedEvent`. + """ + opts = {} + if self.__max_pool_size != MAX_POOL_SIZE: + opts["maxPoolSize"] = self.__max_pool_size + if self.__min_pool_size != MIN_POOL_SIZE: + opts["minPoolSize"] = self.__min_pool_size + if self.__max_idle_time_seconds != MAX_IDLE_TIME_SEC: + assert self.__max_idle_time_seconds is not None + opts["maxIdleTimeMS"] = self.__max_idle_time_seconds * 1000 + if self.__wait_queue_timeout != WAIT_QUEUE_TIMEOUT: + assert self.__wait_queue_timeout is not None + opts["waitQueueTimeoutMS"] = self.__wait_queue_timeout * 1000 + if self.__max_connecting != MAX_CONNECTING: + opts["maxConnecting"] = self.__max_connecting + return opts + + @property + def max_pool_size(self) -> float: + """The maximum allowable number of concurrent connections to each + connected server. Requests to a server will block if there are + `maxPoolSize` outstanding connections to the requested server. + Defaults to 100. Cannot be 0. + + When a server's pool has reached `max_pool_size`, operations for that + server block waiting for a socket to be returned to the pool. If + ``waitQueueTimeoutMS`` is set, a blocked operation will raise + :exc:`~pymongo.errors.ConnectionFailure` after a timeout. + By default ``waitQueueTimeoutMS`` is not set. + """ + return self.__max_pool_size + + @property + def min_pool_size(self) -> int: + """The minimum required number of concurrent connections that the pool + will maintain to each connected server. Default is 0. + """ + return self.__min_pool_size + + @property + def max_connecting(self) -> int: + """The maximum number of concurrent connection creation attempts per + pool. Defaults to 2. + """ + return self.__max_connecting + + @property + def pause_enabled(self) -> bool: + return self.__pause_enabled + + @property + def max_idle_time_seconds(self) -> Optional[int]: + """The maximum number of seconds that a connection can remain + idle in the pool before being removed and replaced. Defaults to + `None` (no limit). + """ + return self.__max_idle_time_seconds + + @property + def connect_timeout(self) -> Optional[float]: + """How long a connection can take to be opened before timing out.""" + return self.__connect_timeout + + @property + def socket_timeout(self) -> Optional[float]: + """How long a send or receive on a socket can take before timing out.""" + return self.__socket_timeout + + @property + def wait_queue_timeout(self) -> Optional[int]: + """How long a thread will wait for a socket from the pool if the pool + has no free sockets. + """ + return self.__wait_queue_timeout + + @property + def _ssl_context(self) -> Optional[SSLContext]: + """An SSLContext instance or None.""" + return self.__ssl_context + + @property + def tls_allow_invalid_hostnames(self) -> bool: + """If True skip ssl.match_hostname.""" + return self.__tls_allow_invalid_hostnames + + @property + def _event_listeners(self) -> Optional[_EventListeners]: + """An instance of pymongo.monitoring._EventListeners.""" + return self.__event_listeners + + @property + def appname(self) -> Optional[str]: + """The application name, for sending with hello in server handshake.""" + return self.__appname + + @property + def driver(self) -> Optional[DriverInfo]: + """Driver name and version, for sending with hello in handshake.""" + return self.__driver + + @property + def _compression_settings(self) -> Optional[CompressionSettings]: + return self.__compression_settings + + @property + def metadata(self) -> dict[str, Any]: + """A dict of metadata about the application, driver, os, and platform.""" + return self.__metadata.copy() + + @property + def server_api(self) -> Optional[ServerApi]: + """A pymongo.server_api.ServerApi or None.""" + return self.__server_api + + @property + def load_balanced(self) -> Optional[bool]: + """True if this Pool is configured in load balanced mode.""" + return self.__load_balanced diff --git a/pymongo/pool_shared.py b/pymongo/pool_shared.py new file mode 100644 index 0000000000..8db26ccead --- /dev/null +++ b/pymongo/pool_shared.py @@ -0,0 +1,521 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Pool utilities and shared helper methods.""" +from __future__ import annotations + +import asyncio +import functools +import socket +import ssl +import sys +from typing import ( + TYPE_CHECKING, + Any, + NoReturn, + Optional, + Union, +) + +from pymongo import _csot +from pymongo.asynchronous.helpers import _getaddrinfo +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConnectionFailure, + NetworkTimeout, + _CertificateError, +) +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.network_layer import AsyncNetworkingInterface, NetworkingInterface, PyMongoProtocol +from pymongo.pool_options import PoolOptions +from pymongo.ssl_support import PYSSLError, SSLError, _has_sni + +SSLErrors = (PYSSLError, SSLError) +if TYPE_CHECKING: + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_MAX_TCP_KEEPIDLE = 120 +_MAX_TCP_KEEPINTVL = 10 +_MAX_TCP_KEEPCNT = 9 + +if sys.platform == "win32": + try: + import _winreg as winreg + except ImportError: + import winreg + + def _query(key, name, default): + try: + value, _ = winreg.QueryValueEx(key, name) + # Ensure the value is a number or raise ValueError. + return int(value) + except (OSError, ValueError): + # QueryValueEx raises OSError when the key does not exist (i.e. + # the system is using the Windows default value). + return default + + try: + with winreg.OpenKey( + winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters" + ) as key: + _WINDOWS_TCP_IDLE_MS = _query(key, "KeepAliveTime", 7200000) + _WINDOWS_TCP_INTERVAL_MS = _query(key, "KeepAliveInterval", 1000) + except OSError: + # We could not check the default values because winreg.OpenKey failed. + # Assume the system is using the default values. + _WINDOWS_TCP_IDLE_MS = 7200000 + _WINDOWS_TCP_INTERVAL_MS = 1000 + + def _set_keepalive_times(sock): + idle_ms = min(_WINDOWS_TCP_IDLE_MS, _MAX_TCP_KEEPIDLE * 1000) + interval_ms = min(_WINDOWS_TCP_INTERVAL_MS, _MAX_TCP_KEEPINTVL * 1000) + if idle_ms < _WINDOWS_TCP_IDLE_MS or interval_ms < _WINDOWS_TCP_INTERVAL_MS: + sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle_ms, interval_ms)) + +else: + + def _set_tcp_option(sock: socket.socket, tcp_option: str, max_value: int) -> None: + if hasattr(socket, tcp_option): + sockopt = getattr(socket, tcp_option) + try: + # PYTHON-1350 - NetBSD doesn't implement getsockopt for + # TCP_KEEPIDLE and friends. Don't attempt to set the + # values there. + default = sock.getsockopt(socket.IPPROTO_TCP, sockopt) + if default > max_value: + sock.setsockopt(socket.IPPROTO_TCP, sockopt, max_value) + except OSError: + pass + + def _set_keepalive_times(sock: socket.socket) -> None: + _set_tcp_option(sock, "TCP_KEEPIDLE", _MAX_TCP_KEEPIDLE) + _set_tcp_option(sock, "TCP_KEEPINTVL", _MAX_TCP_KEEPINTVL) + _set_tcp_option(sock, "TCP_KEEPCNT", _MAX_TCP_KEEPCNT) + + +def _raise_connection_failure( + address: Any, + error: Exception, + msg_prefix: Optional[str] = None, + timeout_details: Optional[dict[str, float]] = None, +) -> NoReturn: + """Convert a socket.error to ConnectionFailure and raise it.""" + host, port = address + # If connecting to a Unix socket, port will be None. + if port is not None: + msg = "%s:%d: %s" % (host, port, error) + else: + msg = f"{host}: {error}" + if msg_prefix: + msg = msg_prefix + msg + if "configured timeouts" not in msg: + msg += format_timeout_details(timeout_details) + if ( + isinstance(error, socket.timeout) + or isinstance(error, SSLErrors) + and "timed out" in str(error) + ): + raise NetworkTimeout(msg) from error + else: + raise AutoReconnect(msg) from error + + +class _CancellationContext: + def __init__(self) -> None: + self._cancelled = False + + def cancel(self) -> None: + """Cancel this context.""" + self._cancelled = True + + @property + def cancelled(self) -> bool: + """Was cancel called?""" + return self._cancelled + + +async def _async_create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setblocking(False) + await asyncio.get_running_loop().sock_connect(sock, host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in await _getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + # Socket needs to be non-blocking during connection to not block the event loop + sock.setblocking(False) + await asyncio.wait_for( + asyncio.get_running_loop().sock_connect(sock, sa), timeout=timeout + ) + sock.settimeout(timeout) + return sock + except asyncio.TimeoutError as e: + sock.close() + err = socket.timeout("timed out") + err.__cause__ = e + except OSError as e: + sock.close() + err = e # type: ignore[assignment] + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +async def _async_configured_socket( + address: _Address, options: PoolOptions +) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(False): + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor( + None, + functools.partial(ssl_context.wrap_socket, sock, server_hostname=host), # type: ignore[assignment, misc, unused-ignore] + ) + else: + loop = asyncio.get_running_loop() + ssl_sock = await loop.run_in_executor(None, ssl_context.wrap_socket, sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +async def _configured_protocol_interface( + address: _Address, options: PoolOptions +) -> AsyncNetworkingInterface: + """Given (host, port) and PoolOptions, return a configured AsyncNetworkingInterface. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets protocol's SSL and timeout options. + """ + sock = await _async_create_connection(address, options) + ssl_context = options._ssl_context + timeout = options.socket_timeout + + if ssl_context is None: + return AsyncNetworkingInterface( + await asyncio.get_running_loop().create_connection( + lambda: PyMongoProtocol(timeout=timeout), sock=sock + ) + ) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + transport, protocol = await asyncio.get_running_loop().create_connection( # type: ignore[call-overload] + lambda: PyMongoProtocol(timeout=timeout), + sock=sock, + server_hostname=host, + ssl=ssl_context, + ) + except _CertificateError: + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(transport.get_extra_info("peercert"), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + transport.abort() + raise + + return AsyncNetworkingInterface((transport, protocol)) + + +def _create_connection(address: _Address, options: PoolOptions) -> socket.socket: + """Given (host, port) and PoolOptions, connect and return a raw socket object. + + Can raise socket.error. + + This is a modified version of create_connection from CPython >= 2.7. + """ + host, port = address + + # Check if dealing with a unix domain socket + if host.endswith(".sock"): + if not hasattr(socket, "AF_UNIX"): + raise ConnectionFailure("UNIX-sockets are not supported on this system") + sock = socket.socket(socket.AF_UNIX) + # SOCK_CLOEXEC not supported for Unix sockets. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.connect(host) + return sock + except OSError: + sock.close() + raise + + # Don't try IPv6 if we don't support it. Also skip it if host + # is 'localhost' (::1 is fine). Avoids slow connect issues + # like PYTHON-356. + family = socket.AF_INET + if socket.has_ipv6 and host != "localhost": + family = socket.AF_UNSPEC + + err = None + for res in socket.getaddrinfo(host, port, family=family, type=socket.SOCK_STREAM): # type: ignore[attr-defined, unused-ignore] + af, socktype, proto, dummy, sa = res + # SOCK_CLOEXEC was new in CPython 3.2, and only available on a limited + # number of platforms (newer Linux and *BSD). Starting with CPython 3.4 + # all file descriptors are created non-inheritable. See PEP 446. + try: + sock = socket.socket(af, socktype | getattr(socket, "SOCK_CLOEXEC", 0), proto) + except OSError: + # Can SOCK_CLOEXEC be defined even if the kernel doesn't support + # it? + sock = socket.socket(af, socktype, proto) + # Fallback when SOCK_CLOEXEC isn't available. + _set_non_inheritable_non_atomic(sock.fileno()) + try: + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + # CSOT: apply timeout to socket connect. + timeout = _csot.remaining() + if timeout is None: + timeout = options.connect_timeout + elif timeout <= 0: + raise socket.timeout("timed out") + sock.settimeout(timeout) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True) + _set_keepalive_times(sock) + sock.connect(sa) + return sock + except OSError as e: + err = e + sock.close() + + if err is not None: + raise err + else: + # This likely means we tried to connect to an IPv6 only + # host with an OS/kernel or Python interpreter that doesn't + # support IPv6. The test case is Jython2.5.1 which doesn't + # support IPv6 at all. + raise OSError("getaddrinfo failed") + + +def _configured_socket(address: _Address, options: PoolOptions) -> Union[socket.socket, _sslConn]: + """Given (host, port) and PoolOptions, return a raw configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return sock + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(True): + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) # type: ignore[assignment, misc, unused-ignore] + else: + ssl_sock = ssl_context.wrap_socket(sock) # type: ignore[assignment, misc, unused-ignore] + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined, unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return ssl_sock + + +def _configured_socket_interface(address: _Address, options: PoolOptions) -> NetworkingInterface: + """Given (host, port) and PoolOptions, return a NetworkingInterface wrapping a configured socket. + + Can raise socket.error, ConnectionFailure, or _CertificateError. + + Sets socket's SSL and timeout options. + """ + sock = _create_connection(address, options) + ssl_context = options._ssl_context + + if ssl_context is None: + sock.settimeout(options.socket_timeout) + return NetworkingInterface(sock) + + host = address[0] + try: + # We have to pass hostname / ip address to wrap_socket + # to use SSLContext.check_hostname. + if _has_sni(True): + ssl_sock = ssl_context.wrap_socket(sock, server_hostname=host) + else: + ssl_sock = ssl_context.wrap_socket(sock) + except _CertificateError: + sock.close() + # Raise _CertificateError directly like we do after match_hostname + # below. + raise + except (OSError, *SSLErrors) as exc: + sock.close() + # We raise AutoReconnect for transient and permanent SSL handshake + # failures alike. Permanent handshake failures, like protocol + # mismatch, will be turned into ServerSelectionTimeoutErrors later. + details = _get_timeout_details(options) + _raise_connection_failure(address, exc, "SSL handshake failed: ", timeout_details=details) + if ( + ssl_context.verify_mode + and not ssl_context.check_hostname + and not options.tls_allow_invalid_hostnames + ): + try: + ssl.match_hostname(ssl_sock.getpeercert(), hostname=host) # type:ignore[attr-defined,unused-ignore] + except _CertificateError: + ssl_sock.close() + raise + + ssl_sock.settimeout(options.socket_timeout) + return NetworkingInterface(ssl_sock) diff --git a/pymongo/pyopenssl_context.py b/pymongo/pyopenssl_context.py index 758a741b6f..08fe99c889 100644 --- a/pymongo/pyopenssl_context.py +++ b/pymongo/pyopenssl_context.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,10 @@ """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. + +Due to limitations of the CPython asyncio.Protocol implementation for SSL, the async API does not support PyOpenSSL. """ +from __future__ import annotations import socket as _socket import ssl as _stdlibssl @@ -22,21 +25,26 @@ import time as _time from errno import EINTR as _EINTR from ipaddress import ip_address as _ip_address +from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar, Union -from cryptography.x509 import load_der_x509_certificate as _load_der_x509_certificate +import cryptography.x509 as x509 +import service_identity from OpenSSL import SSL as _SSL from OpenSSL import crypto as _crypto -from service_identity import CertificateError as _SICertificateError -from service_identity import VerificationError as _SIVerificationError -from service_identity.pyopenssl import verify_hostname as _verify_hostname -from service_identity.pyopenssl import verify_ip_address as _verify_ip_address from pymongo.errors import ConfigurationError as _ConfigurationError -from pymongo.errors import _CertificateError +from pymongo.errors import _CertificateError # type:ignore[attr-defined] from pymongo.ocsp_cache import _OCSPCache from pymongo.ocsp_support import _load_trusted_ca_certs, _ocsp_callback from pymongo.socket_checker import SocketChecker as _SocketChecker from pymongo.socket_checker import _errno_from_exception +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from ssl import VerifyMode + + +_T = TypeVar("_T") try: import certifi @@ -67,25 +75,28 @@ _stdlibssl.CERT_REQUIRED: _SSL.VERIFY_PEER | _SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } -_REVERSE_VERIFY_MAP = dict((value, key) for key, value in _VERIFY_MAP.items()) +_REVERSE_VERIFY_MAP = {value: key for key, value in _VERIFY_MAP.items()} # For SNI support. According to RFC6066, section 3, IPv4 and IPv6 literals are # not permitted for SNI hostname. -def _is_ip_address(address): +def _is_ip_address(address: Any) -> bool: try: _ip_address(address) return True - except (ValueError, UnicodeError): # noqa: B014 + except (ValueError, UnicodeError): return False -# According to the docs for Connection.send it can raise +# According to the docs for socket.send it can raise # WantX509LookupError and should be retried. BLOCKING_IO_ERRORS = (_SSL.WantReadError, _SSL.WantWriteError, _SSL.WantX509LookupError) +BLOCKING_IO_READ_ERROR = _SSL.WantReadError +BLOCKING_IO_WRITE_ERROR = _SSL.WantWriteError +BLOCKING_IO_LOOKUP_ERROR = _SSL.WantX509LookupError -def _ragged_eof(exc): +def _ragged_eof(exc: BaseException) -> bool: """Return True if the OpenSSL.SSL.SysCallError is a ragged EOF.""" return exc.args == (-1, "Unexpected EOF") @@ -94,12 +105,17 @@ def _ragged_eof(exc): # https://github.com/pyca/pyopenssl/issues/176 # https://docs.python.org/3/library/ssl.html#notes-on-non-blocking-sockets class _sslConn(_SSL.Connection): - def __init__(self, ctx, sock, suppress_ragged_eofs): + def __init__( + self, + ctx: _SSL.Context, + sock: Optional[_socket.socket], + suppress_ragged_eofs: bool, + ): self.socket_checker = _SocketChecker() self.suppress_ragged_eofs = suppress_ragged_eofs - super(_sslConn, self).__init__(ctx, sock) + super().__init__(ctx, sock) - def _call(self, call, *args, **kwargs): + def _call(self, call: Callable[..., _T], *args: Any, **kwargs: Any) -> _T: timeout = self.gettimeout() if timeout: start = _time.monotonic() @@ -107,6 +123,14 @@ def _call(self, call, *args, **kwargs): try: return call(*args, **kwargs) except BLOCKING_IO_ERRORS as exc: + # Do not retry if the connection is in non-blocking mode. + if timeout == 0: + raise exc + # Check for closed socket. + if self.fileno() == -1: + if timeout and _time.monotonic() - start > timeout: + raise _socket.timeout("timed out") from None + raise SSLError("Underlying socket has been closed") from None if isinstance(exc, _SSL.WantReadError): want_read = True want_write = False @@ -118,71 +142,68 @@ def _call(self, call, *args, **kwargs): want_write = True self.socket_checker.select(self, want_read, want_write, timeout) if timeout and _time.monotonic() - start > timeout: - raise _socket.timeout("timed out") + raise _socket.timeout("timed out") from None continue - def do_handshake(self, *args, **kwargs): - return self._call(super(_sslConn, self).do_handshake, *args, **kwargs) + def do_handshake(self, *args: Any, **kwargs: Any) -> None: + return self._call(super().do_handshake, *args, **kwargs) - def recv(self, *args, **kwargs): + def recv(self, *args: Any, **kwargs: Any) -> bytes: try: - return self._call(super(_sslConn, self).recv, *args, **kwargs) + return self._call(super().recv, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): return b"" raise - def recv_into(self, *args, **kwargs): + def recv_into(self, *args: Any, **kwargs: Any) -> int: try: - return self._call(super(_sslConn, self).recv_into, *args, **kwargs) # type: ignore + return self._call(super().recv_into, *args, **kwargs) except _SSL.SysCallError as exc: # Suppress ragged EOFs to match the stdlib. if self.suppress_ragged_eofs and _ragged_eof(exc): return 0 raise - def sendall(self, buf, flags=0): + def sendall(self, buf: bytes, flags: int = 0) -> None: # type: ignore[override] view = memoryview(buf) total_length = len(buf) total_sent = 0 - sent = 0 while total_sent < total_length: try: - sent = self._call( - super(_sslConn, self).send, view[total_sent:], flags # type: ignore - ) + sent = self._call(super().send, view[total_sent:], flags) # XXX: It's not clear if this can actually happen. PyOpenSSL # doesn't appear to have any interrupt handling, nor any interrupt # errors for OpenSSL connections. - except (IOError, OSError) as exc: # noqa: B014 + except OSError as exc: if _errno_from_exception(exc) == _EINTR: continue raise # https://github.com/pyca/pyopenssl/blob/19.1.0/src/OpenSSL/SSL.py#L1756 # https://www.openssl.org/docs/man1.0.2/man3/SSL_write.html if sent <= 0: - raise Exception("Connection closed") + raise OSError("connection closed") total_sent += sent -class _CallbackData(object): +class _CallbackData: """Data class which is passed to the OCSP callback.""" - def __init__(self): - self.trusted_ca_certs = None - self.check_ocsp_endpoint = None + def __init__(self) -> None: + self.trusted_ca_certs: Optional[list[x509.Certificate]] = None + self.check_ocsp_endpoint: Optional[bool] = None self.ocsp_response_cache = _OCSPCache() -class SSLContext(object): +class SSLContext: """A CPython compatible SSLContext implementation wrapping PyOpenSSL's context. """ __slots__ = ("_protocol", "_ctx", "_callback_data", "_check_hostname") - def __init__(self, protocol): + def __init__(self, protocol: int): self._protocol = protocol self._ctx = _SSL.Context(self._protocol) self._callback_data = _CallbackData() @@ -195,68 +216,80 @@ def __init__(self, protocol): self._ctx.set_ocsp_client_callback(callback=_ocsp_callback, data=self._callback_data) @property - def protocol(self): + def protocol(self) -> int: """The protocol version chosen when constructing the context. This attribute is read-only. """ return self._protocol - def __get_verify_mode(self): + def __get_verify_mode(self) -> VerifyMode: """Whether to try to verify other peers' certificates and how to behave if verification fails. This attribute must be one of ssl.CERT_NONE, ssl.CERT_OPTIONAL or ssl.CERT_REQUIRED. """ return _REVERSE_VERIFY_MAP[self._ctx.get_verify_mode()] - def __set_verify_mode(self, value): + def __set_verify_mode(self, value: VerifyMode) -> None: """Setter for verify_mode.""" - def _cb(connobj, x509obj, errnum, errdepth, retcode): + def _cb( + _connobj: _SSL.Connection, + _x509obj: _crypto.X509, + _errnum: int, + _errdepth: int, + retcode: int, + ) -> bool: # It seems we don't need to do anything here. Twisted doesn't, # and OpenSSL's SSL_CTX_set_verify let's you pass NULL # for the callback option. It's weird that PyOpenSSL requires # this. - return retcode + # This is optional in pyopenssl >= 20 and can be removed once minimum + # supported version is bumped + # See: pyopenssl.org/en/latest/changelog.html#id47 + return bool(retcode) self._ctx.set_verify(_VERIFY_MAP[value], _cb) verify_mode = property(__get_verify_mode, __set_verify_mode) - def __get_check_hostname(self): + def __get_check_hostname(self) -> bool: return self._check_hostname - def __set_check_hostname(self, value): - if not isinstance(value, bool): - raise TypeError("check_hostname must be True or False") + def __set_check_hostname(self, value: Any) -> None: + validate_boolean("check_hostname", value) self._check_hostname = value check_hostname = property(__get_check_hostname, __set_check_hostname) - def __get_check_ocsp_endpoint(self): + def __get_check_ocsp_endpoint(self) -> Optional[bool]: return self._callback_data.check_ocsp_endpoint - def __set_check_ocsp_endpoint(self, value): - if not isinstance(value, bool): - raise TypeError("check_ocsp must be True or False") + def __set_check_ocsp_endpoint(self, value: bool) -> None: + validate_boolean("check_ocsp", value) self._callback_data.check_ocsp_endpoint = value check_ocsp_endpoint = property(__get_check_ocsp_endpoint, __set_check_ocsp_endpoint) - def __get_options(self): + def __get_options(self) -> int: # Calling set_options adds the option to the existing bitmask and # returns the new bitmask. # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_options return self._ctx.set_options(0) - def __set_options(self, value): - # Explcitly convert to int, since newer CPython versions + def __set_options(self, value: int) -> None: + # Explicitly convert to int, since newer CPython versions # use enum.IntFlag for options. The values are the same # regardless of implementation. self._ctx.set_options(int(value)) options = property(__get_options, __set_options) - def load_cert_chain(self, certfile, keyfile=None, password=None): + def load_cert_chain( + self, + certfile: Union[str, bytes], + keyfile: Union[str, bytes, None] = None, + password: Optional[str] = None, + ) -> None: """Load a private key and the corresponding certificate. The certfile string must be the path to a single file in PEM format containing the certificate as well as any number of CA certificates needed to @@ -269,10 +302,11 @@ def load_cert_chain(self, certfile, keyfile=None, password=None): # Password callback MUST be set first or it will be ignored. if password: - def _pwcb(max_length, prompt_twice, user_data): + def _pwcb(_max_length: int, _prompt_twice: bool, _user_data: Optional[bytes]) -> bytes: # XXX:We could check the password length against what OpenSSL # tells us is the max, but we can't raise an exception, so... # warn? + assert password is not None return password.encode("utf-8") self._ctx.set_passwd_cb(_pwcb) @@ -280,7 +314,9 @@ def _pwcb(max_length, prompt_twice, user_data): self._ctx.use_privatekey_file(keyfile or certfile) self._ctx.check_privatekey() - def load_verify_locations(self, cafile=None, capath=None): + def load_verify_locations( + self, cafile: Optional[str] = None, capath: Optional[str] = None + ) -> None: """Load a set of "certification authority"(CA) certificates used to validate other peers' certificates when `~verify_mode` is other than ssl.CERT_NONE. @@ -288,9 +324,10 @@ def load_verify_locations(self, cafile=None, capath=None): self._ctx.load_verify_locations(cafile, capath) # Manually load the CA certs when get_verified_chain is not available (pyopenssl<20). if not hasattr(_SSL.Connection, "get_verified_chain"): + assert cafile is not None self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile) - def _load_certifi(self): + def _load_certifi(self) -> None: """Attempt to load CA certs from certifi.""" if _HAVE_CERTIFI: self.load_verify_locations(certifi.where()) @@ -302,18 +339,20 @@ def _load_certifi(self): "the tlsCAFile option" ) - def _load_wincerts(self, store): + def _load_wincerts(self, store: str) -> None: """Attempt to load CA certs from Windows trust store.""" cert_store = self._ctx.get_cert_store() + assert cert_store is not None oid = _stdlibssl.Purpose.SERVER_AUTH.oid + for cert, encoding, trust in _stdlibssl.enum_certificates(store): # type: ignore if encoding == "x509_asn": if trust is True or oid in trust: cert_store.add_cert( - _crypto.X509.from_cryptography(_load_der_x509_certificate(cert)) + _crypto.X509.from_cryptography(x509.load_der_x509_certificate(cert)) ) - def load_default_certs(self): + def load_default_certs(self) -> None: """A PyOpenSSL version of load_default_certs from CPython.""" # PyOpenSSL is incapable of loading CA certs from Windows, and mostly # incapable on macOS. @@ -329,23 +368,24 @@ def load_default_certs(self): self._load_certifi() self._ctx.set_default_verify_paths() - def set_default_verify_paths(self): + def set_default_verify_paths(self) -> None: """Specify that the platform provided CA certificates are to be used - for verification purposes.""" + for verification purposes. + """ # Note: See PyOpenSSL's docs for limitations, which are similar # but not that same as CPython's. self._ctx.set_default_verify_paths() def wrap_socket( self, - sock, - server_side=False, - do_handshake_on_connect=True, - suppress_ragged_eofs=True, - server_hostname=None, - session=None, - ): - """Wrap an existing Python socket sock and return a TLS socket + sock: _socket.socket, + server_side: bool = False, + do_handshake_on_connect: bool = True, + suppress_ragged_eofs: bool = True, + server_hostname: Optional[str] = None, + session: Optional[_SSL.Session] = None, + ) -> _sslConn: + """Wrap an existing Python socket connection and return a TLS socket object. """ ssl_conn = _sslConn(self._ctx, sock, suppress_ragged_eofs) @@ -373,11 +413,16 @@ def wrap_socket( # XXX: Do this in a callback registered with # SSLContext.set_info_callback? See Twisted for an example. if self.check_hostname and server_hostname is not None: + from service_identity import pyopenssl + try: if _is_ip_address(server_hostname): - _verify_ip_address(ssl_conn, server_hostname) + pyopenssl.verify_ip_address(ssl_conn, server_hostname) else: - _verify_hostname(ssl_conn, server_hostname) - except (_SICertificateError, _SIVerificationError) as exc: - raise _CertificateError(str(exc)) + pyopenssl.verify_hostname(ssl_conn, server_hostname) + except ( + service_identity.CertificateError, + service_identity.VerificationError, + ) as exc: + raise _CertificateError(str(exc)) from None return ssl_conn diff --git a/pymongo/read_concern.py b/pymongo/read_concern.py index dfb3930ab0..2adc403366 100644 --- a/pymongo/read_concern.py +++ b/pymongo/read_concern.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with read concerns.""" +"""Tools for working with read concerns. -from typing import Any, Dict, Optional +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations +from typing import Any, Optional -class ReadConcern(object): + +class ReadConcern: """ReadConcern - :Parameters: - - `level`: (string) The read concern level specifies the level of + :param level: (string) The read concern level specifies the level of isolation for read operations. For example, a read operation using a read concern level of ``majority`` will only return data that has been written to a majority of nodes. If the level is left unspecified, the @@ -35,7 +38,7 @@ def __init__(self, level: Optional[str] = None) -> None: if level is None or isinstance(level, str): self.__level = level else: - raise TypeError("level must be a string or None.") + raise TypeError(f"level must be a string or None, not {type(level)}") @property def level(self) -> Optional[str]: @@ -45,11 +48,12 @@ def level(self) -> Optional[str]: @property def ok_for_legacy(self) -> bool: """Return ``True`` if this read concern is compatible with - old wire protocol versions.""" + old wire protocol versions. + """ return self.level is None or self.level == "local" @property - def document(self) -> Dict[str, Any]: + def document(self) -> dict[str, Any]: """The document representation of this read concern. .. note:: @@ -66,7 +70,7 @@ def __eq__(self, other: Any) -> bool: return self.document == other.document return NotImplemented - def __repr__(self): + def __repr__(self) -> str: if self.level: return "ReadConcern(%s)" % self.level return "ReadConcern()" diff --git a/pymongo/read_preferences.py b/pymongo/read_preferences.py index ccb635bec0..35b92c4d01 100644 --- a/pymongo/read_preferences.py +++ b/pymongo/read_preferences.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,10 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for choosing which member of a replica set to read from.""" +"""Utilities for choosing which member of a replica set to read from. +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" + +from __future__ import annotations + +import warnings from collections import abc -from typing import Any, Dict, Mapping, Optional, Sequence +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence from pymongo import max_staleness_selectors from pymongo.errors import ConfigurationError @@ -24,6 +30,11 @@ secondary_with_tags_server_selector, ) +if TYPE_CHECKING: + from pymongo.server_selectors import Selection + from pymongo.topology_description import TopologyDescription + + _PRIMARY = 0 _PRIMARY_PREFERRED = 1 _SECONDARY = 2 @@ -39,36 +50,39 @@ "nearest", ) +_Hedge = Mapping[str, Any] +_TagSets = Sequence[Mapping[str, Any]] -def _validate_tag_sets(tag_sets): + +def _validate_tag_sets(tag_sets: Optional[_TagSets]) -> Optional[_TagSets]: """Validate tag sets for a MongoClient.""" if tag_sets is None: return tag_sets if not isinstance(tag_sets, (list, tuple)): - raise TypeError(("Tag sets %r invalid, must be a sequence") % (tag_sets,)) + raise TypeError(f"Tag sets {tag_sets!r} invalid, must be a sequence") if len(tag_sets) == 0: raise ValueError( - ("Tag sets %r invalid, must be None or contain at least one set of tags") % (tag_sets,) + f"Tag sets {tag_sets!r} invalid, must be None or contain at least one set of tags" ) for tags in tag_sets: if not isinstance(tags, abc.Mapping): raise TypeError( - "Tag set %r invalid, must be an instance of dict, " + f"Tag set {tags!r} invalid, must be an instance of dict, " "bson.son.SON or other type that inherits from " - "collection.Mapping" % (tags,) + "collection.Mapping" ) return list(tag_sets) -def _invalid_max_staleness_msg(max_staleness): +def _invalid_max_staleness_msg(max_staleness: Any) -> str: return "maxStalenessSeconds must be a positive integer, not %s" % max_staleness # Some duplication with common.py to avoid import cycle. -def _validate_max_staleness(max_staleness): +def _validate_max_staleness(max_staleness: Any) -> int: """Validate max_staleness.""" if max_staleness == -1: return -1 @@ -82,22 +96,23 @@ def _validate_max_staleness(max_staleness): return max_staleness -def _validate_hedge(hedge): +def _validate_hedge(hedge: Optional[_Hedge]) -> Optional[_Hedge]: """Validate hedge.""" if hedge is None: return None if not isinstance(hedge, dict): - raise TypeError("hedge must be a dictionary, not %r" % (hedge,)) + raise TypeError(f"hedge must be a dictionary, not {hedge!r}") + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=4, + ) return hedge -_Hedge = Mapping[str, Any] -_TagSets = Sequence[Mapping[str, Any]] - - -class _ServerMode(object): +class _ServerMode: """Base class for all read preferences.""" __slots__ = ("__mongos_mode", "__mode", "__tag_sets", "__max_staleness", "__hedge") @@ -126,9 +141,9 @@ def mongos_mode(self) -> str: return self.__mongos_mode @property - def document(self) -> Dict[str, Any]: + def document(self) -> dict[str, Any]: """Read preference as a document.""" - doc: Dict[str, Any] = {"mode": self.__mongos_mode} + doc: dict[str, Any] = {"mode": self.__mongos_mode} if self.__tag_sets not in (None, [{}]): doc["tags"] = self.__tag_sets if self.__max_staleness != -1: @@ -151,6 +166,13 @@ def tag_sets(self) -> _TagSets: set, ``{}``, means "read from any member that matches the mode, ignoring tags." MongoClient tries each set of tags in turn until it finds a set of tags with at least one matching member. + For example, to only send a query to an analytic node:: + + Nearest(tag_sets=[{"node":"analytics"}]) + + Or using :class:`SecondaryPreferred`:: + + SecondaryPreferred(tag_sets=[{"node":"analytics"}]) .. seealso:: `Data-Center Awareness `_ @@ -161,12 +183,15 @@ def tag_sets(self) -> _TagSets: def max_staleness(self) -> int: """The maximum estimated length of time (in seconds) a replica set secondary can fall behind the primary in replication before it will - no longer be selected for operations, or -1 for no maximum.""" + no longer be selected for operations, or -1 for no maximum. + """ return self.__max_staleness @property def hedge(self) -> Optional[_Hedge]: - """The read preference ``hedge`` parameter. + """**DEPRECATED** - The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0. + + The read preference ``hedge`` parameter. A dictionary that configures how the server will perform hedged reads. It consists of the following keys: @@ -186,6 +211,12 @@ def hedge(self) -> Optional[_Hedge]: .. versionadded:: 3.11 """ + if self.__hedge is not None: + warnings.warn( + "The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.", + DeprecationWarning, + stacklevel=2, + ) return self.__hedge @property @@ -201,8 +232,8 @@ def min_wire_version(self) -> int: """ return 0 if self.__max_staleness == -1 else 5 - def __repr__(self): - return "%s(tag_sets=%r, max_staleness=%r, hedge=%r)" % ( + def __repr__(self) -> str: + return "{}(tag_sets={!r}, max_staleness={!r}, hedge={!r})".format( self.name, self.__tag_sets, self.__max_staleness, @@ -222,7 +253,7 @@ def __eq__(self, other: Any) -> bool: def __ne__(self, other: Any) -> bool: return not self == other - def __getstate__(self): + def __getstate__(self) -> dict[str, Any]: """Return value of object for pickling. Needed explicitly because __slots__() defined. @@ -234,7 +265,7 @@ def __getstate__(self): "hedge": self.__hedge, } - def __setstate__(self, value): + def __setstate__(self, value: Mapping[str, Any]) -> None: """Restore from pickling.""" self.__mode = value["mode"] self.__mongos_mode = _MONGOS_MODES[self.__mode] @@ -242,6 +273,9 @@ def __setstate__(self, value): self.__max_staleness = _validate_max_staleness(value["max_staleness"]) self.__hedge = _validate_hedge(value["hedge"]) + def __call__(self, selection: Selection) -> Selection: + return selection + class Primary(_ServerMode): """Primary read preference. @@ -256,13 +290,13 @@ class Primary(_ServerMode): __slots__ = () def __init__(self) -> None: - super(Primary, self).__init__(_PRIMARY) + super().__init__(_PRIMARY) - def __call__(self, selection: Any) -> Any: + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to a Selection.""" return selection.primary_selection - def __repr__(self): + def __repr__(self) -> str: return "Primary()" def __eq__(self, other: Any) -> bool: @@ -285,15 +319,14 @@ class PrimaryPreferred(_ServerMode): created reads will be routed to an available secondary until the primary of the replica set is discovered. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` to use if the primary is not + :param tag_sets: The :attr:`~tag_sets` to use if the primary is not available. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` to use if the primary is not available. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -307,9 +340,9 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(PrimaryPreferred, self).__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) + super().__init__(_PRIMARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection: Any) -> Any: + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" if selection.primary: return selection.primary_selection @@ -329,14 +362,13 @@ class Secondary(_ServerMode): * When connected to a replica set queries are distributed among secondaries. An error is raised if no secondaries are available. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -350,9 +382,9 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Secondary, self).__init__(_SECONDARY, tag_sets, max_staleness, hedge) + super().__init__(_SECONDARY, tag_sets, max_staleness, hedge) - def __call__(self, selection: Any) -> Any: + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" return secondary_with_tags_server_selector( self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) @@ -373,14 +405,13 @@ class SecondaryPreferred(_ServerMode): created reads will be routed to the primary of the replica set until an available secondary is discovered. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -394,11 +425,9 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(SecondaryPreferred, self).__init__( - _SECONDARY_PREFERRED, tag_sets, max_staleness, hedge - ) + super().__init__(_SECONDARY_PREFERRED, tag_sets, max_staleness, hedge) - def __call__(self, selection: Any) -> Any: + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" secondaries = secondary_with_tags_server_selector( self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) @@ -420,14 +449,13 @@ class Nearest(_ServerMode): * When connected to a replica set queries are distributed among all members. - :Parameters: - - `tag_sets`: The :attr:`~tag_sets` for this read preference. - - `max_staleness`: (integer, in seconds) The maximum estimated + :param tag_sets: The :attr:`~tag_sets` for this read preference. + :param max_staleness: (integer, in seconds) The maximum estimated length of time a replica set secondary can fall behind the primary in replication before it will no longer be selected for operations. Default -1, meaning no maximum. If it is set, it must be at least 90 seconds. - - `hedge`: The :attr:`~hedge` for this read preference. + :param hedge: **DEPRECATED** - The :attr:`~hedge` for this read preference. .. versionchanged:: 3.11 Added ``hedge`` parameter. @@ -441,9 +469,9 @@ def __init__( max_staleness: int = -1, hedge: Optional[_Hedge] = None, ) -> None: - super(Nearest, self).__init__(_NEAREST, tag_sets, max_staleness, hedge) + super().__init__(_NEAREST, tag_sets, max_staleness, hedge) - def __call__(self, selection: Any) -> Any: + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to Selection.""" return member_with_tags_server_selector( self.tag_sets, max_staleness_selectors.select(self.max_staleness, selection) @@ -457,17 +485,16 @@ class _AggWritePref: primary read preference. * Otherwise use `pref` read preference. - :Parameters: - - `pref`: The read preference to use on MongoDB 5.0+. + :param pref: The read preference to use on MongoDB 5.0+. """ __slots__ = ("pref", "effective_pref") - def __init__(self, pref): + def __init__(self, pref: _ServerMode): self.pref = pref - self.effective_pref = ReadPreference.PRIMARY + self.effective_pref: _ServerMode = ReadPreference.PRIMARY - def selection_hook(self, topology_description): + def selection_hook(self, topology_description: TopologyDescription) -> None: common_wv = topology_description.common_wire_version if ( topology_description.has_readable_server(ReadPreference.PRIMARY_PREFERRED) @@ -478,16 +505,16 @@ def selection_hook(self, topology_description): else: self.effective_pref = self.pref - def __call__(self, selection): + def __call__(self, selection: Selection) -> Selection: """Apply this read preference to a Selection.""" return self.effective_pref(selection) - def __repr__(self): - return "_AggWritePref(pref=%r)" % (self.pref,) + def __repr__(self) -> str: + return f"_AggWritePref(pref={self.pref!r})" # Proxy other calls to the effective_pref so that _AggWritePref can be # used in place of an actual read preference. - def __getattr__(self, name): + def __getattr__(self, name: str) -> Any: return getattr(self.effective_pref, name) @@ -517,10 +544,14 @@ def make_read_preference( ) -class ReadPreference(object): - """An enum that defines the read preference modes supported by PyMongo. +class ReadPreference: + """An enum that defines some commonly used read preference modes. + + Apps can also create a custom read preference, for example:: + + Nearest(tag_sets=[{"node":"analytics"}]) - See :doc:`/examples/high_availability` for code examples. + See `Read and Write Settings `_ for code examples. A read preference is used in three cases: @@ -580,7 +611,7 @@ def read_pref_mode_from_name(name: str) -> int: return _MONGOS_MODES.index(name) -class MovingAverage(object): +class MovingAverage: """Tracks an exponentially-weighted moving average.""" average: Optional[float] @@ -590,10 +621,7 @@ def __init__(self) -> None: def add_sample(self, sample: float) -> None: if sample < 0: - # Likely system time change while waiting for hello response - # and not using time.monotonic. Ignore it, the next one will - # probably be valid. - return + raise ValueError(f"duration cannot be negative {sample}") if self.average is None: self.average = sample else: diff --git a/pymongo/response.py b/pymongo/response.py index 1369eac4e0..211ddf2354 100644 --- a/pymongo/response.py +++ b/pymongo/response.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,20 +13,36 @@ # limitations under the License. """Represent a response from the server.""" +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Union -class Response(object): +if TYPE_CHECKING: + from datetime import timedelta + + from pymongo.message import _OpMsg, _OpReply + from pymongo.typings import _Address, _AgnosticConnection, _DocumentOut + + +class Response: __slots__ = ("_data", "_address", "_request_id", "_duration", "_from_command", "_docs") - def __init__(self, data, address, request_id, duration, from_command, docs): + def __init__( + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: Sequence[Mapping[str, Any]], + ): """Represent a response from the server. - :Parameters: - - `data`: A network response message. - - `address`: (host, port) of the source server. - - `request_id`: The request id of this operation. - - `duration`: The duration of the operation. - - `from_command`: if the response is the result of a db command. + :param data: A network response message. + :param address: (host, port) of the source server. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: if the response is the result of a db command. """ self._data = data self._address = address @@ -36,74 +52,79 @@ def __init__(self, data, address, request_id, duration, from_command, docs): self._docs = docs @property - def data(self): + def data(self) -> Union[_OpMsg, _OpReply]: """Server response's raw BSON bytes.""" return self._data @property - def address(self): + def address(self) -> _Address: """(host, port) of the source server.""" return self._address @property - def request_id(self): + def request_id(self) -> int: """The request id of this operation.""" return self._request_id @property - def duration(self): + def duration(self) -> Optional[timedelta]: """The duration of the operation.""" return self._duration @property - def from_command(self): + def from_command(self) -> bool: """If the response is a result from a db command.""" return self._from_command @property - def docs(self): + def docs(self) -> Sequence[Mapping[str, Any]]: """The decoded document(s).""" return self._docs class PinnedResponse(Response): - __slots__ = ("_socket_info", "_more_to_come") + __slots__ = ("_conn", "_more_to_come") def __init__( - self, data, address, socket_info, request_id, duration, from_command, docs, more_to_come + self, + data: Union[_OpMsg, _OpReply], + address: _Address, + conn: _AgnosticConnection, + request_id: int, + duration: Optional[timedelta], + from_command: bool, + docs: list[_DocumentOut], + more_to_come: bool, ): """Represent a response to an exhaust cursor's initial query. - :Parameters: - - `data`: A network response message. - - `address`: (host, port) of the source server. - - `socket_info`: The SocketInfo used for the initial query. - - `pool`: The Pool from which the SocketInfo came. - - `request_id`: The request id of this operation. - - `duration`: The duration of the operation. - - `from_command`: If the response is the result of a db command. - - `docs`: List of documents. - - `more_to_come`: Bool indicating whether cursor is ready to be + :param data: A network response message. + :param address: (host, port) of the source server. + :param conn: The AsyncConnection/Connection used for the initial query. + :param request_id: The request id of this operation. + :param duration: The duration of the operation. + :param from_command: If the response is the result of a db command. + :param docs: List of documents. + :param more_to_come: Bool indicating whether cursor is ready to be exhausted. """ - super(PinnedResponse, self).__init__( - data, address, request_id, duration, from_command, docs - ) - self._socket_info = socket_info + super().__init__(data, address, request_id, duration, from_command, docs) + self._conn = conn self._more_to_come = more_to_come @property - def socket_info(self): - """The SocketInfo used for the initial query. + def conn(self) -> _AgnosticConnection: + """The AsyncConnection/Connection used for the initial query. The server will send batches on this socket, without waiting for getMores from the client, until the result set is exhausted or there is an error. """ - return self._socket_info + return self._conn @property - def more_to_come(self): + def more_to_come(self) -> bool: """If true, server is ready to send batches on the socket until the - result set is exhausted or there is an error.""" + result set is exhausted or there is an error. + """ return self._more_to_come diff --git a/pymongo/results.py b/pymongo/results.py index 5803900398..bcce121fe7 100644 --- a/pymongo/results.py +++ b/pymongo/results.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,13 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Result class definitions.""" -from typing import Any, Dict, List, Optional, cast +"""Result class definitions. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Mapping, MutableMapping, Optional, cast from pymongo.errors import InvalidOperation -class _WriteResult(object): +class _WriteResult: """Base class for write result classes.""" __slots__ = ("__acknowledged",) @@ -26,14 +31,17 @@ class _WriteResult(object): def __init__(self, acknowledged: bool) -> None: self.__acknowledged = acknowledged - def _raise_if_unacknowledged(self, property_name): + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__acknowledged})" + + def _raise_if_unacknowledged(self, property_name: str) -> None: """Raise an exception on property access if unacknowledged.""" if not self.__acknowledged: raise InvalidOperation( - "A value for %s is not available when " + f"A value for {property_name} is not available when " "the write is unacknowledged. Check the " "acknowledged attribute to avoid this " - "error." % (property_name,) + "error." ) @property @@ -45,7 +53,7 @@ def acknowledged(self) -> bool: .. note:: If the :attr:`acknowledged` attribute is ``False`` all other - attibutes of this class will raise + attributes of this class will raise :class:`~pymongo.errors.InvalidOperation` when accessed. Values for other attributes cannot be determined if the write operation was unacknowledged. @@ -57,13 +65,20 @@ def acknowledged(self) -> bool: class InsertOneResult(_WriteResult): - """The return type for :meth:`~pymongo.collection.Collection.insert_one`.""" + """The return type for :meth:`~pymongo.collection.Collection.insert_one` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ __slots__ = ("__inserted_id",) def __init__(self, inserted_id: Any, acknowledged: bool) -> None: self.__inserted_id = inserted_id - super(InsertOneResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_id!r}, acknowledged={self.acknowledged})" + ) @property def inserted_id(self) -> Any: @@ -76,12 +91,17 @@ class InsertManyResult(_WriteResult): __slots__ = ("__inserted_ids",) - def __init__(self, inserted_ids: List[Any], acknowledged: bool) -> None: + def __init__(self, inserted_ids: list[Any], acknowledged: bool) -> None: self.__inserted_ids = inserted_ids - super(InsertManyResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.__inserted_ids!r}, acknowledged={self.acknowledged})" + ) @property - def inserted_ids(self) -> List: + def inserted_ids(self) -> list[Any]: """A list of _ids of the inserted documents, in the order provided. .. note:: If ``False`` is passed for the `ordered` parameter to @@ -95,17 +115,30 @@ def inserted_ids(self) -> List: class UpdateResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.update_one`, :meth:`~pymongo.collection.Collection.update_many`, and - :meth:`~pymongo.collection.Collection.replace_one`. + :meth:`~pymongo.collection.Collection.replace_one`, and as part of + :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. """ - __slots__ = ("__raw_result",) - - def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: + __slots__ = ( + "__raw_result", + "__in_client_bulk", + ) + + def __init__( + self, + raw_result: Optional[Mapping[str, Any]], + acknowledged: bool, + in_client_bulk: bool = False, + ): self.__raw_result = raw_result - super(UpdateResult, self).__init__(acknowledged) + self.__in_client_bulk = in_client_bulk + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" @property - def raw_result(self) -> Dict[str, Any]: + def raw_result(self) -> Optional[Mapping[str, Any]]: """The raw result document returned by the server.""" return self.__raw_result @@ -113,7 +146,8 @@ def raw_result(self) -> Dict[str, Any]: def matched_count(self) -> int: """The number of documents matched for this update.""" self._raise_if_unacknowledged("matched_count") - if self.upserted_id is not None: + assert self.__raw_result is not None + if not self.__in_client_bulk and self.upserted_id is not None: return 0 return self.__raw_result.get("n", 0) @@ -121,6 +155,7 @@ def matched_count(self) -> int: def modified_count(self) -> int: """The number of documents modified.""" self._raise_if_unacknowledged("modified_count") + assert self.__raw_result is not None return cast(int, self.__raw_result.get("nModified")) @property @@ -129,21 +164,38 @@ def upserted_id(self) -> Any: ``None``. """ self._raise_if_unacknowledged("upserted_id") - return self.__raw_result.get("upserted") + assert self.__raw_result is not None + if self.__in_client_bulk and self.__raw_result.get("upserted"): + return self.__raw_result["upserted"]["_id"] + return self.__raw_result.get("upserted", None) + + @property + def did_upsert(self) -> bool: + """Whether an upsert took place. + + .. versionadded:: 4.9 + """ + assert self.__raw_result is not None + return "upserted" in self.__raw_result class DeleteResult(_WriteResult): """The return type for :meth:`~pymongo.collection.Collection.delete_one` - and :meth:`~pymongo.collection.Collection.delete_many`""" + and :meth:`~pymongo.collection.Collection.delete_many` + and as part of :meth:`~pymongo.mongo_client.MongoClient.bulk_write`. + """ __slots__ = ("__raw_result",) - def __init__(self, raw_result: Dict[str, Any], acknowledged: bool) -> None: + def __init__(self, raw_result: Mapping[str, Any], acknowledged: bool) -> None: self.__raw_result = raw_result - super(DeleteResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__raw_result!r}, acknowledged={self.acknowledged})" @property - def raw_result(self) -> Dict[str, Any]: + def raw_result(self) -> Mapping[str, Any]: """The raw result document returned by the server.""" return self.__raw_result @@ -154,26 +206,21 @@ def deleted_count(self) -> int: return self.__raw_result.get("n", 0) -class BulkWriteResult(_WriteResult): - """An object wrapper for bulk API write results.""" +class _BulkWriteResultBase(_WriteResult): + """Private base class for bulk write API results.""" __slots__ = ("__bulk_api_result",) - def __init__(self, bulk_api_result: Dict[str, Any], acknowledged: bool) -> None: - """Create a BulkWriteResult instance. - - :Parameters: - - `bulk_api_result`: A result dict from the bulk API - - `acknowledged`: Was this write result acknowledged? If ``False`` - then all properties of this object will raise - :exc:`~pymongo.errors.InvalidOperation`. - """ + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: self.__bulk_api_result = bulk_api_result - super(BulkWriteResult, self).__init__(acknowledged) + super().__init__(acknowledged) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.__bulk_api_result!r}, acknowledged={self.acknowledged})" @property - def bulk_api_result(self) -> Dict[str, Any]: - """The raw bulk API result.""" + def bulk_api_result(self) -> dict[str, Any]: + """The raw bulk write API result.""" return self.__bulk_api_result @property @@ -198,7 +245,10 @@ def modified_count(self) -> int: def deleted_count(self) -> int: """The number of documents deleted.""" self._raise_if_unacknowledged("deleted_count") - return cast(int, self.__bulk_api_result.get("nRemoved")) + if "nRemoved" in self.__bulk_api_result: + return cast(int, self.__bulk_api_result.get("nRemoved")) + else: + return cast(int, self.__bulk_api_result.get("nDeleted")) @property def upserted_count(self) -> int: @@ -206,12 +256,112 @@ def upserted_count(self) -> int: self._raise_if_unacknowledged("upserted_count") return cast(int, self.__bulk_api_result.get("nUpserted")) + +class BulkWriteResult(_BulkWriteResultBase): + """An object wrapper for collection-level bulk write API results.""" + + __slots__ = () + + def __init__(self, bulk_api_result: dict[str, Any], acknowledged: bool) -> None: + """Create a BulkWriteResult instance. + + :param bulk_api_result: A result dict from the collection-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + super().__init__(bulk_api_result, acknowledged) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}({self.bulk_api_result!r}, acknowledged={self.acknowledged})" + ) + @property - def upserted_ids(self) -> Optional[Dict[int, Any]]: + def upserted_ids(self) -> Optional[dict[int, Any]]: """A map of operation index to the _id of the upserted document.""" self._raise_if_unacknowledged("upserted_ids") - if self.__bulk_api_result: - return dict( - (upsert["index"], upsert["_id"]) for upsert in self.bulk_api_result["upserted"] - ) + if self.bulk_api_result: + return {upsert["index"]: upsert["_id"] for upsert in self.bulk_api_result["upserted"]} return None + + +class ClientBulkWriteResult(_BulkWriteResultBase): + """An object wrapper for client-level bulk write API results.""" + + __slots__ = ("__has_verbose_results",) + + def __init__( + self, + bulk_api_result: MutableMapping[str, Any], + acknowledged: bool, + has_verbose_results: bool, + ) -> None: + """Create a ClientBulkWriteResult instance. + + :param bulk_api_result: A result dict from the client-level bulk write API + :param acknowledged: Was this write result acknowledged? If ``False`` + then all properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + :param has_verbose_results: Should the returned result be verbose? + If ``False``, then the ``insert_results``, ``update_results``, and + ``delete_results`` properties of this object will raise + :exc:`~pymongo.errors.InvalidOperation`. + """ + self.__has_verbose_results = has_verbose_results + super().__init__( + bulk_api_result, # type: ignore[arg-type] + acknowledged, + ) + + def __repr__(self) -> str: + return "{}({!r}, acknowledged={}, verbose={})".format( + self.__class__.__name__, + self.bulk_api_result, + self.acknowledged, + self.has_verbose_results, + ) + + def _raise_if_not_verbose(self, property_name: str) -> None: + """Raise an exception on property access if verbose results are off.""" + if not self.__has_verbose_results: + raise InvalidOperation( + f"A value for {property_name} is not available when " + "the results are not set to be verbose. Check the " + "verbose_results attribute to avoid this error." + ) + + @property + def has_verbose_results(self) -> bool: + """Whether the returned results should be verbose.""" + return self.__has_verbose_results + + @property + def insert_results(self) -> Mapping[int, InsertOneResult]: + """A map of successful insertion operations to their results.""" + self._raise_if_unacknowledged("insert_results") + self._raise_if_not_verbose("insert_results") + return cast( + Mapping[int, InsertOneResult], + self.bulk_api_result.get("insertResults"), + ) + + @property + def update_results(self) -> Mapping[int, UpdateResult]: + """A map of successful update operations to their results.""" + self._raise_if_unacknowledged("update_results") + self._raise_if_not_verbose("update_results") + return cast( + Mapping[int, UpdateResult], + self.bulk_api_result.get("updateResults"), + ) + + @property + def delete_results(self) -> Mapping[int, DeleteResult]: + """A map of successful delete operations to their results.""" + self._raise_if_unacknowledged("delete_results") + self._raise_if_not_verbose("delete_results") + return cast( + Mapping[int, DeleteResult], + self.bulk_api_result.get("deleteResults"), + ) diff --git a/pymongo/saslprep.py b/pymongo/saslprep.py index b96d6fcb56..9cef22419e 100644 --- a/pymongo/saslprep.py +++ b/pymongo/saslprep.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,6 +13,8 @@ # limitations under the License. """An implementation of RFC4013 SASLprep.""" +from __future__ import annotations + from typing import Any, Optional try: @@ -20,7 +22,10 @@ except ImportError: HAVE_STRINGPREP = False - def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: + def saslprep( + data: Any, + prohibit_unassigned_code_points: Optional[bool] = True, # noqa: ARG001 + ) -> Any: """SASLprep dummy""" if isinstance(data, str): raise TypeError( @@ -49,21 +54,19 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) stringprep.in_table_c9, ) - def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> str: + def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) -> Any: """An implementation of RFC4013 SASLprep. - :Parameters: - - `data`: The string to SASLprep. Unicode strings + :param data: The string to SASLprep. Unicode strings (:class:`str`) are supported. Byte strings (:class:`bytes`) are ignored. - - `prohibit_unassigned_code_points`: True / False. RFC 3454 + :param prohibit_unassigned_code_points: True / False. RFC 3454 and RFCs for various SASL mechanisms distinguish between `queries` (unassigned code points allowed) and `stored strings` (unassigned code points prohibited). Defaults to ``True`` (unassigned code points are prohibited). - :Returns: - The SASLprep'ed version of `data`. + :return: The SASLprep'ed version of `data`. """ prohibited: Any @@ -71,7 +74,7 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) return data if prohibit_unassigned_code_points: - prohibited = _PROHIBITED + (stringprep.in_table_a1,) + prohibited = (*_PROHIBITED, stringprep.in_table_a1) else: prohibited = _PROHIBITED @@ -98,12 +101,12 @@ def saslprep(data: Any, prohibit_unassigned_code_points: Optional[bool] = True) raise ValueError("SASLprep: failed bidirectional check") # RFC3454, Section 6, #2. If a string contains any RandALCat # character, it MUST NOT contain any LCat character. - prohibited = prohibited + (stringprep.in_table_d2,) + prohibited = (*prohibited, stringprep.in_table_d2) else: # RFC3454, Section 6, #3. Following the logic of #3, if # the first character is not a RandALCat, no other character # can be either. - prohibited = prohibited + (in_table_d1,) + prohibited = (*prohibited, in_table_d1) # RFC3454 section 2, step 3 and 4 - Prohibit and check bidi for char in data: diff --git a/pymongo/server.py b/pymongo/server.py deleted file mode 100644 index f26f473c32..0000000000 --- a/pymongo/server.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Communicate with one MongoDB server in a topology.""" - -from datetime import datetime - -from bson import _decode_all_selective -from pymongo.errors import NotPrimaryError, OperationFailure -from pymongo.helpers import _check_command_response -from pymongo.message import _convert_exception, _OpMsg -from pymongo.response import PinnedResponse, Response - -_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} - - -class Server(object): - def __init__( - self, server_description, pool, monitor, topology_id=None, listeners=None, events=None - ): - """Represent one MongoDB server.""" - self._description = server_description - self._pool = pool - self._monitor = monitor - self._topology_id = topology_id - self._publish = listeners is not None and listeners.enabled_for_server - self._listener = listeners - self._events = None - if self._publish: - self._events = events() - - def open(self): - """Start monitoring, or restart after a fork. - - Multiple calls have no effect. - """ - if not self._pool.opts.load_balanced: - self._monitor.open() - - def reset(self, service_id=None): - """Clear the connection pool.""" - self.pool.reset(service_id) - - def close(self): - """Clear the connection pool and stop the monitor. - - Reconnect with open(). - """ - if self._publish: - assert self._listener is not None - assert self._events is not None - self._events.put( - ( - self._listener.publish_server_closed, - (self._description.address, self._topology_id), - ) - ) - self._monitor.close() - self._pool.reset_without_pause() - - def request_check(self): - """Check the server's state soon.""" - self._monitor.request_check() - - def run_operation(self, sock_info, operation, read_preference, listeners, unpack_res): - """Run a _Query or _GetMore operation and return a Response object. - - This method is used only to run _Query/_GetMore operations from - cursors. - Can raise ConnectionFailure, OperationFailure, etc. - - :Parameters: - - `sock_info`: A SocketInfo instance. - - `operation`: A _Query or _GetMore object. - - `set_secondary_okay`: Pass to operation.get_message. - - `listeners`: Instance of _EventListeners or None. - - `unpack_res`: A callable that decodes the wire protocol response. - """ - duration = None - publish = listeners.enabled_for_commands - if publish: - start = datetime.now() - - use_cmd = operation.use_command(sock_info) - more_to_come = operation.sock_mgr and operation.sock_mgr.more_to_come - if more_to_come: - request_id = 0 - else: - message = operation.get_message(read_preference, sock_info, use_cmd) - request_id, data, max_doc_size = self._split_message(message) - - if publish: - cmd, dbn = operation.as_command(sock_info) - listeners.publish_command_start( - cmd, dbn, request_id, sock_info.address, service_id=sock_info.service_id - ) - start = datetime.now() - - try: - if more_to_come: - reply = sock_info.receive_message(None) - else: - sock_info.send_message(data, max_doc_size) - reply = sock_info.receive_message(request_id) - - # Unpack and check for command errors. - if use_cmd: - user_fields = _CURSOR_DOC_FIELDS - legacy_response = False - else: - user_fields = None - legacy_response = True - docs = unpack_res( - reply, - operation.cursor_id, - operation.codec_options, - legacy_response=legacy_response, - user_fields=user_fields, - ) - if use_cmd: - first = docs[0] - operation.client._process_response(first, operation.session) - _check_command_response(first, sock_info.max_wire_version) - except Exception as exc: - if publish: - duration = datetime.now() - start - if isinstance(exc, (NotPrimaryError, OperationFailure)): - failure = exc.details - else: - failure = _convert_exception(exc) - listeners.publish_command_failure( - duration, - failure, - operation.name, - request_id, - sock_info.address, - service_id=sock_info.service_id, - ) - raise - - if publish: - duration = datetime.now() - start - # Must publish in find / getMore / explain command response - # format. - if use_cmd: - res = docs[0] - elif operation.name == "explain": - res = docs[0] if docs else {} - else: - res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} - if operation.name == "find": - res["cursor"]["firstBatch"] = docs - else: - res["cursor"]["nextBatch"] = docs - listeners.publish_command_success( - duration, - res, - operation.name, - request_id, - sock_info.address, - service_id=sock_info.service_id, - ) - - # Decrypt response. - client = operation.client - if client and client._encrypter: - if use_cmd: - decrypted = client._encrypter.decrypt(reply.raw_command_response()) - docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) - - response: Response - - if client._should_pin_cursor(operation.session) or operation.exhaust: - sock_info.pin_cursor() - if isinstance(reply, _OpMsg): - # In OP_MSG, the server keeps sending only if the - # more_to_come flag is set. - more_to_come = reply.more_to_come - else: - # In OP_REPLY, the server keeps sending until cursor_id is 0. - more_to_come = bool(operation.exhaust and reply.cursor_id) - if operation.sock_mgr: - operation.sock_mgr.update_exhaust(more_to_come) - response = PinnedResponse( - data=reply, - address=self._description.address, - socket_info=sock_info, - duration=duration, - request_id=request_id, - from_command=use_cmd, - docs=docs, - more_to_come=more_to_come, - ) - else: - response = Response( - data=reply, - address=self._description.address, - duration=duration, - request_id=request_id, - from_command=use_cmd, - docs=docs, - ) - - return response - - def get_socket(self, handler=None): - return self.pool.get_socket(handler) - - @property - def description(self): - return self._description - - @description.setter - def description(self, server_description): - assert server_description.address == self._description.address - self._description = server_description - - @property - def pool(self): - return self._pool - - def _split_message(self, message): - """Return request_id, data, max_doc_size. - - :Parameters: - - `message`: (request_id, data, max_doc_size) or (request_id, data) - """ - if len(message) == 3: - return message - else: - # get_more and kill_cursors messages don't include BSON documents. - request_id, data = message - return request_id, data, 0 - - def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, self._description) diff --git a/pymongo/server_api.py b/pymongo/server_api.py index e92d6e6179..40bb1aac3e 100644 --- a/pymongo/server_api.py +++ b/pymongo/server_api.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -83,6 +83,9 @@ Classes ======= """ +from __future__ import annotations + +from typing import Any, MutableMapping, Optional class ServerApiVersion: @@ -95,41 +98,42 @@ class ServerApiVersion: """Server API version "1".""" -class ServerApi(object): +class ServerApi: """MongoDB Stable API.""" - def __init__(self, version, strict=None, deprecation_errors=None): + def __init__( + self, version: str, strict: Optional[bool] = None, deprecation_errors: Optional[bool] = None + ): """Options to configure MongoDB Stable API. - :Parameters: - - `version`: The API version string. Must be one of the values in + :param version: The API version string. Must be one of the values in :class:`ServerApiVersion`. - - `strict` (optional): Set to ``True`` to enable API strict mode. + :param strict: Set to ``True`` to enable API strict mode. Defaults to ``None`` which means "use the server's default". - - `deprecation_errors` (optional): Set to ``True`` to enable + :param deprecation_errors: Set to ``True`` to enable deprecation errors. Defaults to ``None`` which means "use the server's default". .. versionadded:: 3.12 """ if version != ServerApiVersion.V1: - raise ValueError("Unknown ServerApi version: %s" % (version,)) + raise ValueError(f"Unknown ServerApi version: {version}") if strict is not None and not isinstance(strict, bool): raise TypeError( "Wrong type for ServerApi strict, value must be an instance " - "of bool, not %s" % (type(strict),) + f"of bool, not {type(strict)}" ) if deprecation_errors is not None and not isinstance(deprecation_errors, bool): raise TypeError( "Wrong type for ServerApi deprecation_errors, value must be " - "an instance of bool, not %s" % (type(deprecation_errors),) + f"an instance of bool, not {type(deprecation_errors)}" ) self._version = version self._strict = strict self._deprecation_errors = deprecation_errors @property - def version(self): + def version(self) -> str: """The API version setting. This value is sent to the server in the "apiVersion" field. @@ -137,7 +141,7 @@ def version(self): return self._version @property - def strict(self): + def strict(self) -> Optional[bool]: """The API strict mode setting. When set, this value is sent to the server in the "apiStrict" field. @@ -145,7 +149,7 @@ def strict(self): return self._strict @property - def deprecation_errors(self): + def deprecation_errors(self) -> Optional[bool]: """The API deprecation errors setting. When set, this value is sent to the server in the @@ -154,12 +158,11 @@ def deprecation_errors(self): return self._deprecation_errors -def _add_to_command(cmd, server_api): +def _add_to_command(cmd: MutableMapping[str, Any], server_api: Optional[ServerApi]) -> None: """Internal helper which adds API versioning options to a command. - :Parameters: - - `cmd`: The command. - - `server_api` (optional): A :class:`ServerApi` or ``None``. + :param cmd: The command. + :param server_api: A :class:`ServerApi` or ``None``. """ if not server_api: return diff --git a/pymongo/server_description.py b/pymongo/server_description.py index 47e27c531b..d038c04b1c 100644 --- a/pymongo/server_description.py +++ b/pymongo/server_description.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,26 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Represent one server the driver is connected to.""" +"""Represent one server the driver is connected to. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations import time -from typing import Any, Dict, Mapping, Optional, Set, Tuple +import warnings +from typing import Any, Mapping, Optional from bson import EPOCH_NAIVE from bson.objectid import ObjectId from pymongo.hello import Hello from pymongo.server_type import SERVER_TYPE -from pymongo.typings import _Address +from pymongo.typings import ClusterTime, _Address -class ServerDescription(object): +class ServerDescription: """Immutable representation of one server. - :Parameters: - - `address`: A (host, port) pair - - `hello`: Optional Hello instance - - `round_trip_time`: Optional float - - `error`: Optional, the last error attempting to connect to the server + :param address: A (host, port) pair + :param hello: Optional Hello instance + :param round_trip_time: Optional float + :param error: Optional, the last error attempting to connect to the server + :param round_trip_time: Optional float, the min latency from the most recent samples """ __slots__ = ( @@ -47,6 +52,7 @@ class ServerDescription(object): "_min_wire_version", "_max_wire_version", "_round_trip_time", + "_min_round_trip_time", "_me", "_is_writable", "_is_readable", @@ -63,9 +69,10 @@ class ServerDescription(object): def __init__( self, address: _Address, - hello: Optional[Hello] = None, + hello: Optional[Hello[dict[str, Any]]] = None, round_trip_time: Optional[float] = None, error: Optional[Exception] = None, + min_round_trip_time: float = 0.0, ) -> None: self._address = address if not hello: @@ -88,6 +95,7 @@ def __init__( self._is_readable = hello.is_readable self._ls_timeout_minutes = hello.logical_session_timeout_minutes self._round_trip_time = round_trip_time + self._min_round_trip_time = min_round_trip_time self._me = hello.me self._last_update_time = time.monotonic() self._error = error @@ -124,7 +132,7 @@ def server_type_name(self) -> str: return SERVER_TYPE._fields[self._server_type] @property - def all_hosts(self) -> Set[Tuple[str, int]]: + def all_hosts(self) -> set[tuple[str, int]]: """List of hosts, passives, and arbiters known to this server.""" return self._all_hosts @@ -138,7 +146,7 @@ def replica_set_name(self) -> Optional[str]: return self._replica_set_name @property - def primary(self) -> Optional[Tuple[str, int]]: + def primary(self) -> Optional[tuple[str, int]]: """This server's opinion about who the primary is, or None.""" return self._primary @@ -171,15 +179,20 @@ def election_id(self) -> Optional[ObjectId]: return self._election_id @property - def cluster_time(self) -> Optional[Mapping[str, Any]]: + def cluster_time(self) -> Optional[ClusterTime]: return self._cluster_time @property - def election_tuple(self) -> Tuple[Optional[int], Optional[ObjectId]]: + def election_tuple(self) -> tuple[Optional[int], Optional[ObjectId]]: + warnings.warn( + "'election_tuple' is deprecated, use 'set_version' and 'election_id' instead", + DeprecationWarning, + stacklevel=2, + ) return self._set_version, self._election_id @property - def me(self) -> Optional[Tuple[str, int]]: + def me(self) -> Optional[tuple[str, int]]: return self._me @property @@ -203,6 +216,11 @@ def round_trip_time(self) -> Optional[float]: return self._round_trip_time + @property + def min_round_trip_time(self) -> float: + """The min latency from the most recent samples.""" + return self._min_round_trip_time + @property def error(self) -> Optional[Exception]: """The last error attempting to connect to the server, or None.""" @@ -228,8 +246,7 @@ def is_server_type_known(self) -> bool: def retryable_writes_supported(self) -> bool: """Checks if this server supports retryable writes.""" return ( - self._ls_timeout_minutes is not None - and self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) + self._server_type in (SERVER_TYPE.Mongos, SERVER_TYPE.RSPrimary) ) or self._server_type == SERVER_TYPE.LoadBalancer @property @@ -241,7 +258,7 @@ def retryable_reads_supported(self) -> bool: def topology_version(self) -> Optional[Mapping[str, Any]]: return self._topology_version - def to_unknown(self, error: Optional[Exception] = None) -> "ServerDescription": + def to_unknown(self, error: Optional[Exception] = None) -> ServerDescription: unknown = ServerDescription(self.address, error=error) unknown._topology_version = self.topology_version return unknown @@ -269,11 +286,11 @@ def __eq__(self, other: Any) -> bool: def __ne__(self, other: Any) -> bool: return not self == other - def __repr__(self): + def __repr__(self) -> str: errmsg = "" if self.error: - errmsg = ", error=%r" % (self.error,) - return "<%s %s server_type: %s, rtt: %s%s>" % ( + errmsg = f", error={self.error!r}" + return "<{} {} server_type: {}, rtt: {}{}>".format( self.__class__.__name__, self.address, self.server_type_name, @@ -282,4 +299,4 @@ def __repr__(self): ) # For unittesting only. Use under no circumstances! - _host_to_round_trip_time: Dict = {} + _host_to_round_trip_time: dict = {} # type: ignore[type-arg] diff --git a/pymongo/server_selectors.py b/pymongo/server_selectors.py index 313566cb83..0d1425ab31 100644 --- a/pymongo/server_selectors.py +++ b/pymongo/server_selectors.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,15 +13,27 @@ # permissions and limitations under the License. """Criteria to select some ServerDescriptions from a TopologyDescription.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, TypeVar, cast from pymongo.server_type import SERVER_TYPE +if TYPE_CHECKING: + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + + +T = TypeVar("T") +TagSet = Mapping[str, Any] +TagSets = Sequence[TagSet] + -class Selection(object): +class Selection: """Input or output of a server selector function.""" @classmethod - def from_topology_description(cls, topology_description): + def from_topology_description(cls, topology_description: TopologyDescription) -> Selection: known_servers = topology_description.known_servers primary = None for sd in known_servers: @@ -36,76 +48,85 @@ def from_topology_description(cls, topology_description): primary, ) - def __init__(self, topology_description, server_descriptions, common_wire_version, primary): + def __init__( + self, + topology_description: TopologyDescription, + server_descriptions: list[ServerDescription], + common_wire_version: Optional[int], + primary: Optional[ServerDescription], + ): self.topology_description = topology_description self.server_descriptions = server_descriptions self.primary = primary self.common_wire_version = common_wire_version - def with_server_descriptions(self, server_descriptions): + def with_server_descriptions(self, server_descriptions: list[ServerDescription]) -> Selection: return Selection( self.topology_description, server_descriptions, self.common_wire_version, self.primary ) - def secondary_with_max_last_write_date(self): + def secondary_with_max_last_write_date(self) -> Optional[ServerDescription]: secondaries = secondary_server_selector(self) if secondaries.server_descriptions: - return max(secondaries.server_descriptions, key=lambda sd: sd.last_write_date) + return max( + secondaries.server_descriptions, key=lambda sd: cast(float, sd.last_write_date) + ) + return None @property - def primary_selection(self): + def primary_selection(self) -> Selection: primaries = [self.primary] if self.primary else [] return self.with_server_descriptions(primaries) @property - def heartbeat_frequency(self): + def heartbeat_frequency(self) -> int: return self.topology_description.heartbeat_frequency @property - def topology_type(self): + def topology_type(self) -> int: return self.topology_description.topology_type - def __bool__(self): + def __bool__(self) -> bool: return bool(self.server_descriptions) - def __getitem__(self, item): + def __getitem__(self, item: int) -> ServerDescription: return self.server_descriptions[item] -def any_server_selector(selection): +def any_server_selector(selection: T) -> T: return selection -def readable_server_selector(selection): +def readable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_readable] ) -def writable_server_selector(selection): +def writable_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.is_writable] ) -def secondary_server_selector(selection): +def secondary_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSSecondary] ) -def arbiter_server_selector(selection): +def arbiter_server_selector(selection: Selection) -> Selection: return selection.with_server_descriptions( [s for s in selection.server_descriptions if s.server_type == SERVER_TYPE.RSArbiter] ) -def writable_preferred_server_selector(selection): +def writable_preferred_server_selector(selection: Selection) -> Selection: """Like PrimaryPreferred but doesn't use tags or latency.""" return writable_server_selector(selection) or secondary_server_selector(selection) -def apply_single_tag_set(tag_set, selection): +def apply_single_tag_set(tag_set: TagSet, selection: Selection) -> Selection: """All servers matching one tag set. A tag set is a dict. A server matches if its tags are a superset: @@ -114,7 +135,7 @@ def apply_single_tag_set(tag_set, selection): The empty tag set {} matches any server. """ - def tags_match(server_tags): + def tags_match(server_tags: Mapping[str, Any]) -> bool: for key, value in tag_set.items(): if key not in server_tags or server_tags[key] != value: return False @@ -126,7 +147,7 @@ def tags_match(server_tags): ) -def apply_tag_sets(tag_sets, selection): +def apply_tag_sets(tag_sets: TagSets, selection: Selection) -> Selection: """All servers match a list of tag sets. tag_sets is a list of dicts. The empty tag set {} matches any server, @@ -143,11 +164,11 @@ def apply_tag_sets(tag_sets, selection): return selection.with_server_descriptions([]) -def secondary_with_tags_server_selector(tag_sets, selection): +def secondary_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: """All near-enough secondaries matching the tag sets.""" return apply_tag_sets(tag_sets, secondary_server_selector(selection)) -def member_with_tags_server_selector(tag_sets, selection): +def member_with_tags_server_selector(tag_sets: TagSets, selection: Selection) -> Selection: """All near-enough members matching the tag sets.""" return apply_tag_sets(tag_sets, readable_server_selector(selection)) diff --git a/pymongo/server_type.py b/pymongo/server_type.py index ee53b6b97d..7a6d2aaf14 100644 --- a/pymongo/server_type.py +++ b/pymongo/server_type.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,6 +13,7 @@ # limitations under the License. """Type codes for MongoDB servers.""" +from __future__ import annotations from typing import NamedTuple diff --git a/pymongo/settings.py b/pymongo/settings.py deleted file mode 100644 index 2bd2527cdf..0000000000 --- a/pymongo/settings.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Represent MongoClient's configuration.""" - -import threading -import traceback - -from bson.objectid import ObjectId -from pymongo import common, monitor, pool -from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT -from pymongo.errors import ConfigurationError -from pymongo.pool import PoolOptions -from pymongo.server_description import ServerDescription -from pymongo.topology_description import TOPOLOGY_TYPE - - -class TopologySettings(object): - def __init__( - self, - seeds=None, - replica_set_name=None, - pool_class=None, - pool_options=None, - monitor_class=None, - condition_class=None, - local_threshold_ms=LOCAL_THRESHOLD_MS, - server_selection_timeout=SERVER_SELECTION_TIMEOUT, - heartbeat_frequency=common.HEARTBEAT_FREQUENCY, - server_selector=None, - fqdn=None, - direct_connection=False, - load_balanced=None, - srv_service_name=common.SRV_SERVICE_NAME, - srv_max_hosts=0, - ): - """Represent MongoClient's configuration. - - Take a list of (host, port) pairs and optional replica set name. - """ - if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: - raise ConfigurationError( - "heartbeatFrequencyMS cannot be less than %d" - % (common.MIN_HEARTBEAT_INTERVAL * 1000,) - ) - - self._seeds = seeds or [("localhost", 27017)] - self._replica_set_name = replica_set_name - self._pool_class = pool_class or pool.Pool - self._pool_options = pool_options or PoolOptions() - self._monitor_class = monitor_class or monitor.Monitor - self._condition_class = condition_class or threading.Condition - self._local_threshold_ms = local_threshold_ms - self._server_selection_timeout = server_selection_timeout - self._server_selector = server_selector - self._fqdn = fqdn - self._heartbeat_frequency = heartbeat_frequency - self._direct = direct_connection - self._load_balanced = load_balanced - self._srv_service_name = srv_service_name - self._srv_max_hosts = srv_max_hosts or 0 - - self._topology_id = ObjectId() - # Store the allocation traceback to catch unclosed clients in the - # test suite. - self._stack = "".join(traceback.format_stack()) - - @property - def seeds(self): - """List of server addresses.""" - return self._seeds - - @property - def replica_set_name(self): - return self._replica_set_name - - @property - def pool_class(self): - return self._pool_class - - @property - def pool_options(self): - return self._pool_options - - @property - def monitor_class(self): - return self._monitor_class - - @property - def condition_class(self): - return self._condition_class - - @property - def local_threshold_ms(self): - return self._local_threshold_ms - - @property - def server_selection_timeout(self): - return self._server_selection_timeout - - @property - def server_selector(self): - return self._server_selector - - @property - def heartbeat_frequency(self): - return self._heartbeat_frequency - - @property - def fqdn(self): - return self._fqdn - - @property - def direct(self): - """Connect directly to a single server, or use a set of servers? - - True if there is one seed and no replica_set_name. - """ - return self._direct - - @property - def load_balanced(self): - """True if the client was configured to connect to a load balancer.""" - return self._load_balanced - - @property - def srv_service_name(self): - """The srvServiceName.""" - return self._srv_service_name - - @property - def srv_max_hosts(self): - """The srvMaxHosts.""" - return self._srv_max_hosts - - def get_topology_type(self): - if self.load_balanced: - return TOPOLOGY_TYPE.LoadBalanced - elif self.direct: - return TOPOLOGY_TYPE.Single - elif self.replica_set_name is not None: - return TOPOLOGY_TYPE.ReplicaSetNoPrimary - else: - return TOPOLOGY_TYPE.Unknown - - def get_server_descriptions(self): - """Initial dict of (address, ServerDescription) for all seeds.""" - return dict([(address, ServerDescription(address)) for address in self.seeds]) diff --git a/pymongo/socket_checker.py b/pymongo/socket_checker.py index 420953db2e..78861854ab 100644 --- a/pymongo/socket_checker.py +++ b/pymongo/socket_checker.py @@ -13,11 +13,12 @@ # limitations under the License. """Select / poll helper""" +from __future__ import annotations import errno import select import sys -from typing import Any, Optional +from typing import Any, Optional, cast # PYTHON-2320: Jython does not fully support poll on SSL sockets, # https://bugs.jython.org/issue2900 @@ -25,15 +26,15 @@ _SelectError = getattr(select, "error", OSError) -def _errno_from_exception(exc): +def _errno_from_exception(exc: BaseException) -> Optional[int]: if hasattr(exc, "errno"): - return exc.errno + return cast(int, exc.errno) if exc.args: - return exc.args[0] + return cast(int, exc.args[0]) return None -class SocketChecker(object): +class SocketChecker: def __init__(self) -> None: self._poller: Optional[select.poll] if _HAVE_POLL: @@ -78,7 +79,7 @@ def select( # ready: subsets of the first three arguments. Return # True if any of the lists are not empty. return any(res) - except (_SelectError, IOError) as exc: # type: ignore + except (_SelectError, OSError) as exc: # type: ignore if _errno_from_exception(exc) in (errno.EINTR, errno.EAGAIN): continue raise diff --git a/pymongo/srv_resolver.py b/pymongo/srv_resolver.py deleted file mode 100644 index fe2dd49aa0..0000000000 --- a/pymongo/srv_resolver.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2019-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Support for resolving hosts and options from mongodb+srv:// URIs.""" - -import ipaddress -import random - -try: - from dns import resolver - - _HAVE_DNSPYTHON = True -except ImportError: - _HAVE_DNSPYTHON = False - -from pymongo.common import CONNECT_TIMEOUT -from pymongo.errors import ConfigurationError - - -# dnspython can return bytes or str from various parts -# of its API depending on version. We always want str. -def maybe_decode(text): - if isinstance(text, bytes): - return text.decode() - return text - - -# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. -def _resolve(*args, **kwargs): - if hasattr(resolver, "resolve"): - # dnspython >= 2 - return resolver.resolve(*args, **kwargs) - # dnspython 1.X - return resolver.query(*args, **kwargs) - - -_INVALID_HOST_MSG = ( - "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " - "Did you mean to use 'mongodb://'?" -) - - -class _SrvResolver(object): - def __init__(self, fqdn, connect_timeout, srv_service_name, srv_max_hosts=0): - self.__fqdn = fqdn - self.__srv = srv_service_name - self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT - self.__srv_max_hosts = srv_max_hosts or 0 - # Validate the fully qualified domain name. - try: - ipaddress.ip_address(fqdn) - raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) - except ValueError: - pass - - try: - self.__plist = self.__fqdn.split(".")[1:] - except Exception: - raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) - self.__slen = len(self.__plist) - if self.__slen < 2: - raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) - - def get_options(self): - try: - results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) - except (resolver.NoAnswer, resolver.NXDOMAIN): - # No TXT records - return None - except Exception as exc: - raise ConfigurationError(str(exc)) - if len(results) > 1: - raise ConfigurationError("Only one TXT record is supported") - return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") - - def _resolve_uri(self, encapsulate_errors): - try: - results = _resolve( - "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout - ) - except Exception as exc: - if not encapsulate_errors: - # Raise the original error. - raise - # Else, raise all errors as ConfigurationError. - raise ConfigurationError(str(exc)) - return results - - def _get_srv_response_and_hosts(self, encapsulate_errors): - results = self._resolve_uri(encapsulate_errors) - - # Construct address tuples - nodes = [ - (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) for res in results - ] - - # Validate hosts - for node in nodes: - try: - nlist = node[0].split(".")[1:][-self.__slen :] - except Exception: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) - if self.__plist != nlist: - raise ConfigurationError("Invalid SRV host: %s" % (node[0],)) - if self.__srv_max_hosts: - nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) - return results, nodes - - def get_hosts(self): - _, nodes = self._get_srv_response_and_hosts(True) - return nodes - - def get_hosts_and_min_ttl(self): - results, nodes = self._get_srv_response_and_hosts(False) - return nodes, results.rrset.ttl diff --git a/pymongo/ssl_context.py b/pymongo/ssl_context.py index 63970cb5e2..2ff7428cab 100644 --- a/pymongo/ssl_context.py +++ b/pymongo/ssl_context.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,6 +13,7 @@ # permissions and limitations under the License. """A fake SSLContext implementation.""" +from __future__ import annotations import ssl as _ssl @@ -29,6 +30,8 @@ # Errors raised by SSL sockets when in non-blocking mode. BLOCKING_IO_ERRORS = (_ssl.SSLWantReadError, _ssl.SSLWantWriteError) +BLOCKING_IO_READ_ERROR = _ssl.SSLWantReadError +BLOCKING_IO_WRITE_ERROR = _ssl.SSLWantWriteError # Base Exception class SSLError = _ssl.SSLError diff --git a/pymongo/ssl_support.py b/pymongo/ssl_support.py index 13c5315eee..7dbd0f2148 100644 --- a/pymongo/ssl_support.py +++ b/pymongo/ssl_support.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,18 +13,35 @@ # permissions and limitations under the License. """Support for SSL in PyMongo.""" +from __future__ import annotations + +import types +import warnings +from typing import Any, Optional, Union from pymongo.errors import ConfigurationError HAVE_SSL = True +HAVE_PYSSL = True try: - import pymongo.pyopenssl_context as _ssl + import pymongo.pyopenssl_context as _pyssl +except (ImportError, AttributeError) as exc: + HAVE_PYSSL = False + if isinstance(exc, AttributeError): + warnings.warn( + "Failed to use the installed version of PyOpenSSL. " + "Falling back to stdlib ssl, disabling OCSP support. " + "This is likely caused by incompatible versions " + "of PyOpenSSL < 23.2.0 and cryptography >= 42.0.0. " + "Try updating PyOpenSSL >= 23.2.0 to enable OCSP.", + UserWarning, + stacklevel=2, + ) +try: + import pymongo.ssl_context as _ssl except ImportError: - try: - import pymongo.ssl_context as _ssl # type: ignore[no-redef] - except ImportError: - HAVE_SSL = False + HAVE_SSL = False if HAVE_SSL: @@ -32,26 +49,54 @@ # CPython ssl module constants to configure certificate verification # at a high level. This is legacy behavior, but requires us to # import the ssl module even if we're only using it for this purpose. - import ssl as _stdlibssl # noqa + import ssl as _stdlibssl # noqa: F401 from ssl import CERT_NONE, CERT_REQUIRED - HAS_SNI = _ssl.HAS_SNI IPADDR_SAFE = True + + if HAVE_PYSSL: + PYSSLError: Any = _pyssl.SSLError + BLOCKING_IO_ERRORS: tuple = ( # type: ignore[type-arg] + _ssl.BLOCKING_IO_ERRORS + _pyssl.BLOCKING_IO_ERRORS + ) + BLOCKING_IO_READ_ERROR: tuple = ( # type: ignore[type-arg] + _pyssl.BLOCKING_IO_READ_ERROR, + _ssl.BLOCKING_IO_READ_ERROR, + ) + BLOCKING_IO_WRITE_ERROR: tuple = ( # type: ignore[type-arg] + _pyssl.BLOCKING_IO_WRITE_ERROR, + _ssl.BLOCKING_IO_WRITE_ERROR, + ) + else: + PYSSLError = _ssl.SSLError + BLOCKING_IO_ERRORS: tuple = _ssl.BLOCKING_IO_ERRORS # type: ignore[type-arg, no-redef] + BLOCKING_IO_READ_ERROR: tuple = (_ssl.BLOCKING_IO_READ_ERROR,) # type: ignore[type-arg, no-redef] + BLOCKING_IO_WRITE_ERROR: tuple = (_ssl.BLOCKING_IO_WRITE_ERROR,) # type: ignore[type-arg, no-redef] SSLError = _ssl.SSLError - BLOCKING_IO_ERRORS = _ssl.BLOCKING_IO_ERRORS + BLOCKING_IO_LOOKUP_ERROR = BLOCKING_IO_READ_ERROR + + def _has_sni(is_sync: bool) -> bool: + if is_sync and HAVE_PYSSL: + return _pyssl.HAS_SNI + return _ssl.HAS_SNI def get_ssl_context( - certfile, - passphrase, - ca_certs, - crlfile, - allow_invalid_certificates, - allow_invalid_hostnames, - disable_ocsp_endpoint_check, - ): + certfile: Optional[str], + passphrase: Optional[str], + ca_certs: Optional[str], + crlfile: Optional[str], + allow_invalid_certificates: bool, + allow_invalid_hostnames: bool, + disable_ocsp_endpoint_check: bool, + is_sync: bool, + ) -> Union[_pyssl.SSLContext, _ssl.SSLContext]: # type: ignore[name-defined] """Create and return an SSLContext object.""" + if is_sync and HAVE_PYSSL: + ssl: types.ModuleType = _pyssl + else: + ssl = _ssl verify_mode = CERT_NONE if allow_invalid_certificates else CERT_REQUIRED - ctx = _ssl.SSLContext(_ssl.PROTOCOL_SSLv23) + ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) if verify_mode != CERT_NONE: ctx.check_hostname = not allow_invalid_hostnames else: @@ -63,20 +108,20 @@ def get_ssl_context( # up to date versions of MongoDB 2.4 and above already disable # SSLv2 and SSLv3, python disables SSLv2 by default in >= 2.7.7 # and >= 3.3.4 and SSLv3 in >= 3.4.3. - ctx.options |= _ssl.OP_NO_SSLv2 - ctx.options |= _ssl.OP_NO_SSLv3 - ctx.options |= _ssl.OP_NO_COMPRESSION - ctx.options |= _ssl.OP_NO_RENEGOTIATION + ctx.options |= ssl.OP_NO_SSLv2 + ctx.options |= ssl.OP_NO_SSLv3 + ctx.options |= ssl.OP_NO_COMPRESSION + ctx.options |= ssl.OP_NO_RENEGOTIATION if certfile is not None: try: ctx.load_cert_chain(certfile, None, passphrase) - except _ssl.SSLError as exc: - raise ConfigurationError("Private key doesn't match certificate: %s" % (exc,)) + except ssl.SSLError as exc: + raise ConfigurationError(f"Private key doesn't match certificate: {exc}") from None if crlfile is not None: - if _ssl.IS_PYOPENSSL: + if ssl.IS_PYOPENSSL: raise ConfigurationError("tlsCRLFile cannot be used with PyOpenSSL") # Match the server's behavior. - setattr(ctx, "verify_flags", getattr(_ssl, "VERIFY_CRL_CHECK_LEAF", 0)) # noqa + ctx.verify_flags = getattr(ssl, "VERIFY_CRL_CHECK_LEAF", 0) ctx.load_verify_locations(crlfile) if ca_certs is not None: ctx.load_verify_locations(ca_certs) @@ -90,10 +135,12 @@ def get_ssl_context( class SSLError(Exception): # type: ignore pass - HAS_SNI = False IPADDR_SAFE = False - BLOCKING_IO_ERRORS = () # type: ignore + BLOCKING_IO_ERRORS: tuple = () # type: ignore[type-arg, no-redef] + + def _has_sni(is_sync: bool) -> bool: # noqa: ARG001 + return False def get_ssl_context(*dummy): # type: ignore """No ssl module, raise ConfigurationError.""" - raise ConfigurationError("The ssl module is not available.") + raise ConfigurationError("The ssl module is not available") diff --git a/pymongo/synchronous/__init__.py b/pymongo/synchronous/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pymongo/synchronous/aggregation.py b/pymongo/synchronous/aggregation.py new file mode 100644 index 0000000000..486768ab7d --- /dev/null +++ b/pymongo/synchronous/aggregation.py @@ -0,0 +1,254 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Perform aggregation operations on a collection or database.""" +from __future__ import annotations + +from collections.abc import Callable, Mapping, MutableMapping +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo import common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ConfigurationError +from pymongo.read_preferences import ReadPreference, _AggWritePref + +if TYPE_CHECKING: + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.command_cursor import CommandCursor + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.typings import _DocumentType, _Pipeline + +_IS_SYNC = True + + +class _AggregationCommand: + """The internal abstract base class for aggregation cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.collection.Collection.aggregate`, or + :meth:`pymongo.database.Database.aggregate` instead. + """ + + def __init__( + self, + target: Union[Database[Any], Collection[Any]], + cursor_class: type[CommandCursor[Any]], + pipeline: _Pipeline, + options: MutableMapping[str, Any], + let: Optional[Mapping[str, Any]] = None, + user_fields: Optional[MutableMapping[str, Any]] = None, + result_processor: Optional[Callable[[Mapping[str, Any], Connection], None]] = None, + comment: Any = None, + ) -> None: + if "explain" in options: + raise ConfigurationError( + "The explain option is not supported. Use Database.command instead." + ) + + self._target = target + + pipeline = common.validate_list("pipeline", pipeline) + self._pipeline = pipeline + self._performs_write = False + if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): + self._performs_write = True + + common.validate_is_mapping("options", options) + if let is not None: + common.validate_is_mapping("let", let) + options["let"] = let + if comment is not None: + options["comment"] = comment + + self._options = options + + # This is the batchSize that will be used for setting the initial + # batchSize for the cursor, as well as the subsequent getMores. + self._batch_size = common.validate_non_negative_integer_or_none( + "batchSize", self._options.pop("batchSize", None) + ) + + # If the cursor option is already specified, avoid overriding it. + self._options.setdefault("cursor", {}) + # If the pipeline performs a write, we ignore the initial batchSize + # since the server doesn't return results in this case. + if self._batch_size is not None and not self._performs_write: + self._options["cursor"]["batchSize"] = self._batch_size + + self._cursor_class = cursor_class + self._user_fields = user_fields + self._result_processor = result_processor + + self._collation = validate_collation_or_none(options.pop("collation", None)) + + self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) + self._write_preference: Optional[_AggWritePref] = None + + @property + def _aggregation_target(self) -> Union[str, int]: + """The argument to pass to the aggregate command.""" + raise NotImplementedError + + @property + def _cursor_namespace(self) -> str: + """The namespace in which the aggregate command is run.""" + raise NotImplementedError + + def _cursor_collection(self, cursor_doc: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + raise NotImplementedError + + @property + def _database(self) -> Database[Any]: + """The database against which the aggregation command is run.""" + raise NotImplementedError + + def get_read_preference( + self, session: Optional[ClientSession] + ) -> Union[_AggWritePref, _ServerMode]: + if self._write_preference: + return self._write_preference + pref = self._target._read_preference_for(session) + if self._performs_write and pref != ReadPreference.PRIMARY: + self._write_preference = pref = _AggWritePref(pref) # type: ignore[assignment] + return pref + + def get_cursor( + self, + session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[_DocumentType]: + # Serialize command. + cmd = {"aggregate": self._aggregation_target, "pipeline": self._pipeline} + cmd.update(self._options) + + # Apply this target's read concern if: + # readConcern has not been specified as a kwarg and either + # - server version is >= 4.2 or + # - server version is >= 3.2 and pipeline doesn't use $out + if ("readConcern" not in cmd) and ( + not self._performs_write or (conn.max_wire_version >= 8) + ): + read_concern = self._target.read_concern + else: + read_concern = None + + # Apply this target's write concern if: + # writeConcern has not been specified as a kwarg and pipeline doesn't + # perform a write operation + if "writeConcern" not in cmd and self._performs_write: + write_concern = self._target._write_concern_for(session) + else: + write_concern = None + + # Run command. + result = conn.command( + self._database.name, + cmd, + read_preference, + self._target.codec_options, + parse_write_concern_error=True, + read_concern=read_concern, + write_concern=write_concern, + collation=self._collation, + session=session, + client=self._database.client, + user_fields=self._user_fields, + ) + + if self._result_processor: + self._result_processor(result, conn) + + # Extract cursor from result or mock/fake one if necessary. + if "cursor" in result: + cursor = result["cursor"] + else: + # Unacknowledged $out/$merge write. Fake a cursor. + cursor = { + "id": 0, + "firstBatch": result.get("result", []), + "ns": self._cursor_namespace, + } + + # Create and return cursor instance. + cmd_cursor = self._cursor_class( + self._cursor_collection(cursor), + cursor, + conn.address, + batch_size=self._batch_size or 0, + max_await_time_ms=self._max_await_time_ms, + session=session, + comment=self._options.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + +class _CollectionAggregationCommand(_AggregationCommand): + _target: Collection[Any] + + @property + def _aggregation_target(self) -> str: + return self._target.name + + @property + def _cursor_namespace(self) -> str: + return self._target.full_name + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + return self._target + + @property + def _database(self) -> Database[Any]: + return self._target.database + + +class _CollectionRawAggregationCommand(_CollectionAggregationCommand): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + # For raw-batches, we set the initial batchSize for the cursor to 0. + if not self._performs_write: + self._options["cursor"]["batchSize"] = 0 + + +class _DatabaseAggregationCommand(_AggregationCommand): + _target: Database[Any] + + @property + def _aggregation_target(self) -> int: + return 1 + + @property + def _cursor_namespace(self) -> str: + return f"{self._target.name}.$cmd.aggregate" + + @property + def _database(self) -> Database[Any]: + return self._target + + def _cursor_collection(self, cursor: Mapping[str, Any]) -> Collection[Any]: + """The Collection used for the aggregate command cursor.""" + # Collection level aggregate may not always return the "ns" field + # according to our MockupDB tests. Let's handle that case for db level + # aggregate too by defaulting to the .$cmd.aggregate namespace. + _, collname = cursor.get("ns", self._cursor_namespace).split(".", 1) + return self._database[collname] diff --git a/pymongo/synchronous/auth.py b/pymongo/synchronous/auth.py new file mode 100644 index 0000000000..650e25234d --- /dev/null +++ b/pymongo/synchronous/auth.py @@ -0,0 +1,450 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication helpers.""" +from __future__ import annotations + +import functools +import hashlib +import hmac +import socket +from base64 import standard_b64decode, standard_b64encode +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Mapping, + MutableMapping, + Optional, + cast, +) +from urllib.parse import quote + +from bson.binary import Binary +from pymongo.auth_shared import ( + MongoCredential, + _authenticate_scram_start, + _parse_scram_response, + _xor, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.saslprep import saslprep +from pymongo.synchronous.auth_aws import _authenticate_aws +from pymongo.synchronous.auth_oidc import ( + _authenticate_oidc, + _get_authenticator, +) +from pymongo.synchronous.helpers import _getaddrinfo + +if TYPE_CHECKING: + from pymongo.hello import Hello + from pymongo.synchronous.pool import Connection + +HAVE_KERBEROS = True +_USE_PRINCIPAL = False +try: + import winkerberos as kerberos # type:ignore[import] + + if tuple(map(int, kerberos.__version__.split(".")[:2])) >= (0, 5): + _USE_PRINCIPAL = True +except ImportError: + try: + import kerberos # type:ignore[import] + except ImportError: + HAVE_KERBEROS = False + + +_IS_SYNC = True + + +def _authenticate_scram(credentials: MongoCredential, conn: Connection, mechanism: str) -> None: + """Authenticate using SCRAM.""" + username = credentials.username + if mechanism == "SCRAM-SHA-256": + digest = "sha256" + digestmod = hashlib.sha256 + data = saslprep(credentials.password).encode("utf-8") + else: + digest = "sha1" + digestmod = hashlib.sha1 + data = _password_digest(username, credentials.password).encode("utf-8") + source = credentials.source + cache = credentials.cache + + # Make local + _hmac = hmac.HMAC + + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + assert isinstance(ctx, _ScramContext) + assert ctx.scram_data is not None + nonce, first_bare = ctx.scram_data + res = ctx.speculative_authenticate + else: + nonce, first_bare, cmd = _authenticate_scram_start(credentials, mechanism) + res = conn.command(source, cmd) + + assert res is not None + server_first = res["payload"] + parsed = _parse_scram_response(server_first) + iterations = int(parsed[b"i"]) + if iterations < 4096: + raise OperationFailure("Server returned an invalid iteration count.") + salt = parsed[b"s"] + rnonce = parsed[b"r"] + if not rnonce.startswith(nonce): + raise OperationFailure("Server returned an invalid nonce.") + + without_proof = b"c=biws,r=" + rnonce + if cache.data: + client_key, server_key, csalt, citerations = cache.data + else: + client_key, server_key, csalt, citerations = None, None, None, None + + # Salt and / or iterations could change for a number of different + # reasons. Either changing invalidates the cache. + if not client_key or salt != csalt or iterations != citerations: + salted_pass = hashlib.pbkdf2_hmac(digest, data, standard_b64decode(salt), iterations) + client_key = _hmac(salted_pass, b"Client Key", digestmod).digest() + server_key = _hmac(salted_pass, b"Server Key", digestmod).digest() + cache.data = (client_key, server_key, salt, iterations) + stored_key = digestmod(client_key).digest() + auth_msg = b",".join((first_bare, server_first, without_proof)) + client_sig = _hmac(stored_key, auth_msg, digestmod).digest() + client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig)) + client_final = b",".join((without_proof, client_proof)) + + server_sig = standard_b64encode(_hmac(server_key, auth_msg, digestmod).digest()) + + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(client_final), + } + res = conn.command(source, cmd) + + parsed = _parse_scram_response(res["payload"]) + if not hmac.compare_digest(parsed[b"v"], server_sig): + raise OperationFailure("Server returned an invalid signature.") + + # A third empty challenge may be required if the server does not support + # skipEmptyExchange: SERVER-44857. + if not res["done"]: + cmd = { + "saslContinue": 1, + "conversationId": res["conversationId"], + "payload": Binary(b""), + } + res = conn.command(source, cmd) + if not res["done"]: + raise OperationFailure("SASL conversation failed to complete.") + + +def _password_digest(username: str, password: str) -> str: + """Get a password digest to use for authentication.""" + if not isinstance(password, str): + raise TypeError("password must be an instance of str") + if len(password) == 0: + raise ValueError("password can't be empty") + if not isinstance(username, str): + raise TypeError(f"username must be an instance of str, not {type(username)}") + + md5hash = hashlib.md5() # noqa: S324 + data = f"{username}:mongo:{password}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _auth_key(nonce: str, username: str, password: str) -> str: + """Get an auth key to use for authentication.""" + digest = _password_digest(username, password) + md5hash = hashlib.md5() # noqa: S324 + data = f"{nonce}{username}{digest}" + md5hash.update(data.encode("utf-8")) + return md5hash.hexdigest() + + +def _canonicalize_hostname(hostname: str, option: str | bool) -> str: + """Canonicalize hostname following MIT-krb5 behavior.""" + # https://github.com/krb5/krb5/blob/d406afa363554097ac48646a29249c04f498c88e/src/util/k5test.py#L505-L520 + if option in [False, "none"]: + return hostname + + af, socktype, proto, canonname, sockaddr = ( + _getaddrinfo( + hostname, + None, + family=0, + type=0, + proto=socket.IPPROTO_TCP, + flags=socket.AI_CANONNAME, + ) + )[0] # type: ignore[index] + + # For forward just to resolve the cname as dns.lookup() will not return it. + if option == "forward": + return canonname.lower() + + try: + name = socket.getnameinfo(sockaddr, socket.NI_NAMEREQD) + except socket.gaierror: + return canonname.lower() + + return name[0].lower() + + +def _authenticate_gssapi(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using GSSAPI.""" + if not HAVE_KERBEROS: + raise ConfigurationError( + 'The "kerberos" module must be installed to use GSSAPI authentication.' + ) + + try: + username = credentials.username + password = credentials.password + props = credentials.mechanism_properties + # Starting here and continuing through the while loop below - establish + # the security context. See RFC 4752, Section 3.1, first paragraph. + host = props.service_host or conn.address[0] + host = _canonicalize_hostname(host, props.canonicalize_host_name) + service = props.service_name + "@" + host + if props.service_realm is not None: + service = service + "@" + props.service_realm + + if password is not None: + if _USE_PRINCIPAL: + # Note that, though we use unquote_plus for unquoting URI + # options, we use quote here. Microsoft's UrlUnescape (used + # by WinKerberos) doesn't support +. + principal = ":".join((quote(username), quote(password))) + result, ctx = kerberos.authGSSClientInit( + service, principal, gssflags=kerberos.GSS_C_MUTUAL_FLAG + ) + else: + if "@" in username: + user, domain = username.split("@", 1) + else: + user, domain = username, None + result, ctx = kerberos.authGSSClientInit( + service, + gssflags=kerberos.GSS_C_MUTUAL_FLAG, + user=user, + domain=domain, + password=password, + ) + else: + result, ctx = kerberos.authGSSClientInit(service, gssflags=kerberos.GSS_C_MUTUAL_FLAG) + + if result != kerberos.AUTH_GSS_COMPLETE: + raise OperationFailure("Kerberos context failed to initialize.") + + try: + # pykerberos uses a weird mix of exceptions and return values + # to indicate errors. + # 0 == continue, 1 == complete, -1 == error + # Only authGSSClientStep can return 0. + if kerberos.authGSSClientStep(ctx, "") != 0: + raise OperationFailure("Unknown kerberos failure in step function.") + + # Start a SASL conversation with mongod/s + # Note: pykerberos deals with base64 encoded byte strings. + # Since mongo accepts base64 strings as the payload we don't + # have to use bson.binary.Binary. + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslStart": 1, + "mechanism": "GSSAPI", + "payload": payload, + "autoAuthorize": 1, + } + response = conn.command("$external", cmd) + + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + result = kerberos.authGSSClientStep(ctx, str(response["payload"])) + if result == -1: + raise OperationFailure("Unknown kerberos failure in step function.") + + payload = kerberos.authGSSClientResponse(ctx) or "" + + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + response = conn.command("$external", cmd) + + if result == kerberos.AUTH_GSS_COMPLETE: + break + else: + raise OperationFailure("Kerberos authentication failed to complete.") + + # Once the security context is established actually authenticate. + # See RFC 4752, Section 3.1, last two paragraphs. + if kerberos.authGSSClientUnwrap(ctx, str(response["payload"])) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Unwrap step.") + + if kerberos.authGSSClientWrap(ctx, kerberos.authGSSClientResponse(ctx), username) != 1: + raise OperationFailure("Unknown kerberos failure during GSS_Wrap step.") + + payload = kerberos.authGSSClientResponse(ctx) + cmd = { + "saslContinue": 1, + "conversationId": response["conversationId"], + "payload": payload, + } + conn.command("$external", cmd) + + finally: + kerberos.authGSSClientClean(ctx) + + except kerberos.KrbError as exc: + raise OperationFailure(str(exc)) from None + + +def _authenticate_plain(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using SASL PLAIN (RFC 4616)""" + source = credentials.source + username = credentials.username + password = credentials.password + payload = (f"\x00{username}\x00{password}").encode() + cmd = { + "saslStart": 1, + "mechanism": "PLAIN", + "payload": Binary(payload), + "autoAuthorize": 1, + } + conn.command(source, cmd) + + +def _authenticate_x509(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-X509.""" + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + # MONGODB-X509 is done after the speculative auth step. + return + + cmd = _X509Context(credentials, conn.address).speculate_command() + conn.command("$external", cmd) + + +def _authenticate_default(credentials: MongoCredential, conn: Connection) -> None: + if conn.max_wire_version >= 7: + if conn.negotiated_mechs: + mechs = conn.negotiated_mechs + else: + source = credentials.source + cmd = conn.hello_cmd() + cmd["saslSupportedMechs"] = source + "." + credentials.username + mechs = (conn.command(source, cmd, publish_events=False)).get("saslSupportedMechs", []) + if "SCRAM-SHA-256" in mechs: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-256") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + else: + return _authenticate_scram(credentials, conn, "SCRAM-SHA-1") + + +_AUTH_MAP: Mapping[str, Callable[..., None]] = { + "GSSAPI": _authenticate_gssapi, + "MONGODB-X509": _authenticate_x509, + "MONGODB-AWS": _authenticate_aws, + "MONGODB-OIDC": _authenticate_oidc, # type:ignore[dict-item] + "PLAIN": _authenticate_plain, + "SCRAM-SHA-1": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_authenticate_scram, mechanism="SCRAM-SHA-256"), + "DEFAULT": _authenticate_default, +} + + +class _AuthContext: + def __init__(self, credentials: MongoCredential, address: tuple[str, int]) -> None: + self.credentials = credentials + self.speculative_authenticate: Optional[Mapping[str, Any]] = None + self.address = address + + @staticmethod + def from_credentials( + creds: MongoCredential, address: tuple[str, int] + ) -> Optional[_AuthContext]: + spec_cls = _SPECULATIVE_AUTH_MAP.get(creds.mechanism) + if spec_cls: + return cast(_AuthContext, spec_cls(creds, address)) + return None + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + raise NotImplementedError + + def parse_response(self, hello: Hello[Mapping[str, Any]]) -> None: + self.speculative_authenticate = hello.speculative_authenticate + + def speculate_succeeded(self) -> bool: + return bool(self.speculative_authenticate) + + +class _ScramContext(_AuthContext): + def __init__( + self, credentials: MongoCredential, address: tuple[str, int], mechanism: str + ) -> None: + super().__init__(credentials, address) + self.scram_data: Optional[tuple[bytes, bytes]] = None + self.mechanism = mechanism + + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + nonce, first_bare, cmd = _authenticate_scram_start(self.credentials, self.mechanism) + # The 'db' field is included only on the speculative command. + cmd["db"] = self.credentials.source + # Save for later use. + self.scram_data = (nonce, first_bare) + return cmd + + +class _X509Context(_AuthContext): + def speculate_command(self) -> MutableMapping[str, Any]: + cmd = {"authenticate": 1, "mechanism": "MONGODB-X509"} + if self.credentials.username is not None: + cmd["user"] = self.credentials.username + return cmd + + +class _OIDCContext(_AuthContext): + def speculate_command(self) -> Optional[MutableMapping[str, Any]]: + authenticator = _get_authenticator(self.credentials, self.address) + cmd = authenticator.get_spec_auth_cmd() + if cmd is None: + return None + cmd["db"] = self.credentials.source + return cmd + + +_SPECULATIVE_AUTH_MAP: Mapping[str, Any] = { + "MONGODB-X509": _X509Context, + "SCRAM-SHA-1": functools.partial(_ScramContext, mechanism="SCRAM-SHA-1"), + "SCRAM-SHA-256": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), + "MONGODB-OIDC": _OIDCContext, + "DEFAULT": functools.partial(_ScramContext, mechanism="SCRAM-SHA-256"), +} + + +def authenticate( + credentials: MongoCredential, conn: Connection, reauthenticate: bool = False +) -> None: + """Authenticate connection.""" + mechanism = credentials.mechanism + auth_func = _AUTH_MAP[mechanism] + if mechanism == "MONGODB-OIDC": + _authenticate_oidc(credentials, conn, reauthenticate) + else: + auth_func(credentials, conn) diff --git a/pymongo/synchronous/auth_aws.py b/pymongo/synchronous/auth_aws.py new file mode 100644 index 0000000000..c7ea47886f --- /dev/null +++ b/pymongo/synchronous/auth_aws.py @@ -0,0 +1,100 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-AWS Authentication helpers.""" +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Mapping, Type + +import bson +from bson.binary import Binary +from pymongo.errors import ConfigurationError, OperationFailure + +if TYPE_CHECKING: + from bson.typings import _ReadableBuffer + from pymongo.auth_shared import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +def _authenticate_aws(credentials: MongoCredential, conn: Connection) -> None: + """Authenticate using MONGODB-AWS.""" + try: + import pymongo_auth_aws # type:ignore[import] + except ImportError as e: + raise ConfigurationError( + "MONGODB-AWS authentication requires pymongo-auth-aws: " + "install with: python -m pip install 'pymongo[aws]'" + ) from e + # Delayed import. + from pymongo_auth_aws.auth import ( # type:ignore[import] + set_cached_credentials, + set_use_cached_credentials, + ) + + set_use_cached_credentials(True) + + if conn.max_wire_version < 9: + raise ConfigurationError("MONGODB-AWS authentication requires MongoDB version 4.4 or later") + + class AwsSaslContext(pymongo_auth_aws.AwsSaslContext): # type: ignore + # Dependency injection: + def binary_type(self) -> Type[Binary]: + """Return the bson.binary.Binary type.""" + return Binary + + def bson_encode(self, doc: Mapping[str, Any]) -> bytes: + """Encode a dictionary to BSON.""" + return bson.encode(doc) + + def bson_decode(self, data: _ReadableBuffer) -> Mapping[str, Any]: + """Decode BSON to a dictionary.""" + return bson.decode(data) + + try: + ctx = AwsSaslContext( + pymongo_auth_aws.AwsCredential( + credentials.username, + credentials.password, + credentials.mechanism_properties.aws_session_token, + ) + ) + client_payload = ctx.step(None) + client_first = {"saslStart": 1, "mechanism": "MONGODB-AWS", "payload": client_payload} + server_first = conn.command("$external", client_first) + res = server_first + # Limit how many times we loop to catch protocol / library issues + for _ in range(10): + client_payload = ctx.step(res["payload"]) + cmd = { + "saslContinue": 1, + "conversationId": server_first["conversationId"], + "payload": client_payload, + } + res = conn.command("$external", cmd) + if res["done"]: + # SASL complete. + break + except pymongo_auth_aws.PyMongoAuthAwsError as exc: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + # Convert to OperationFailure and include pymongo-auth-aws version. + raise OperationFailure( + f"{exc} (pymongo-auth-aws version {pymongo_auth_aws.__version__})" + ) from None + except Exception: + # Clear the cached credentials if we hit a failure in auth. + set_cached_credentials(None) + raise diff --git a/pymongo/synchronous/auth_oidc.py b/pymongo/synchronous/auth_oidc.py new file mode 100644 index 0000000000..583ee39f67 --- /dev/null +++ b/pymongo/synchronous/auth_oidc.py @@ -0,0 +1,303 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MONGODB-OIDC Authentication helpers.""" +from __future__ import annotations + +import asyncio +import threading +import time +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, Mapping, MutableMapping, Optional, Union + +import bson +from bson.binary import Binary +from pymongo._csot import remaining +from pymongo.auth_oidc_shared import ( + CALLBACK_VERSION, + HUMAN_CALLBACK_TIMEOUT_SECONDS, + MACHINE_CALLBACK_TIMEOUT_SECONDS, + TIME_BETWEEN_CALLS_SECONDS, + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + _OIDCProperties, +) +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.helpers_shared import _AUTHENTICATION_FAILURE_CODE +from pymongo.lock import Lock, _create_lock + +if TYPE_CHECKING: + from pymongo.auth_shared import MongoCredential + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +def _get_authenticator( + credentials: MongoCredential, address: tuple[str, int] +) -> _OIDCAuthenticator: + if credentials.cache.data: + return credentials.cache.data + + # Extract values. + principal_name = credentials.username + properties = credentials.mechanism_properties + + # Validate that the address is allowed. + if properties.human_callback is not None: + found = False + allowed_hosts = properties.allowed_hosts + for patt in allowed_hosts: + if patt == address[0]: + found = True + elif patt.startswith("*.") and address[0].endswith(patt[1:]): + found = True + if not found: + raise ConfigurationError( + f"Refusing to connect to {address[0]}, which is not in authOIDCAllowedHosts: {allowed_hosts}" + ) + + # Get or create the cache data. + credentials.cache.data = _OIDCAuthenticator(username=principal_name, properties=properties) + return credentials.cache.data + + +@dataclass +class _OIDCAuthenticator: + username: str + properties: _OIDCProperties + refresh_token: Optional[str] = field(default=None) + access_token: Optional[str] = field(default=None) + idp_info: Optional[OIDCIdPInfo] = field(default=None) + token_gen_id: int = field(default=0) + if not _IS_SYNC: + lock: Lock = field(default_factory=_create_lock) # type: ignore[assignment] + else: + lock: threading.Lock = field(default_factory=_create_lock) # type: ignore[assignment, no-redef] + + last_call_time: float = field(default=0) + + def reauthenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle a reauthenticate from the server.""" + # Invalidate the token for the connection. + self._invalidate(conn) + # Call the appropriate auth logic for the callback type. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def authenticate(self, conn: Connection) -> Optional[Mapping[str, Any]]: + """Handle an initial authenticate request.""" + # First handle speculative auth. + # If it succeeded, we are done. + ctx = conn.auth_ctx + if ctx and ctx.speculate_succeeded(): + resp = ctx.speculative_authenticate + if resp and resp["done"]: + conn.oidc_token_gen_id = self.token_gen_id + return resp + + # If spec auth failed, call the appropriate auth logic for the callback type. + # We cannot assume that the token is invalid, because a proxy may have been + # involved that stripped the speculative auth information. + if self.properties.callback: + return self._authenticate_machine(conn) + return self._authenticate_human(conn) + + def get_spec_auth_cmd(self) -> Optional[MutableMapping[str, Any]]: + """Get the appropriate speculative auth command.""" + if not self.access_token: + return None + return self._get_start_command({"jwt": self.access_token}) + + def _authenticate_machine(self, conn: Connection) -> Mapping[str, Any]: + # If there is a cached access token, try to authenticate with it. If + # authentication fails with error code 18, invalidate the access token, + # fetch a new access token, and try to authenticate again. If authentication + # fails for any other reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_machine(conn) + raise + return self._sasl_start_jwt(conn) + + def _authenticate_human(self, conn: Connection) -> Optional[Mapping[str, Any]]: + # If we have a cached access token, try a JwtStepRequest. + # authentication fails with error code 18, invalidate the access token, + # and try to authenticate again. If authentication fails for any other + # reason, raise the error to the user. + if self.access_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + return self._authenticate_human(conn) + raise + + # If we have a cached refresh token, try a JwtStepRequest with that. + # If authentication fails with error code 18, invalidate the access and + # refresh tokens, and try to authenticate again. If authentication fails for + # any other reason, raise the error to the user. + if self.refresh_token: + try: + return self._sasl_start_jwt(conn) + except OperationFailure as e: + if self._is_auth_error(e): + self.refresh_token = None + return self._authenticate_human(conn) + raise + + # Start a new Two-Step SASL conversation. + # Run a PrincipalStepRequest to get the IdpInfo. + cmd = self._get_start_command(None) + start_resp = self._run_command(conn, cmd) + # Attempt to authenticate with a JwtStepRequest. + return self._sasl_continue_jwt(conn, start_resp) + + def _get_access_token(self) -> Optional[str]: + properties = self.properties + cb: Union[None, OIDCCallback] + resp: OIDCCallbackResult + + is_human = properties.human_callback is not None + if is_human and self.idp_info is None: + return None + + if properties.callback: + cb = properties.callback + if properties.human_callback: + cb = properties.human_callback + + prev_token = self.access_token + if prev_token: + return prev_token + + if cb is None and not prev_token: + return None + + if not prev_token and cb is not None: + with self.lock: # type: ignore[attr-defined] + # See if the token was changed while we were waiting for the + # lock. + new_token = self.access_token + if new_token != prev_token: + return new_token + + # Ensure that we are waiting a min time between callback invocations. + delta = time.time() - self.last_call_time + if delta < TIME_BETWEEN_CALLS_SECONDS: + time.sleep(TIME_BETWEEN_CALLS_SECONDS - delta) + self.last_call_time = time.time() + + if is_human: + timeout = HUMAN_CALLBACK_TIMEOUT_SECONDS + assert self.idp_info is not None + else: + timeout = int(remaining() or MACHINE_CALLBACK_TIMEOUT_SECONDS) + context = OIDCCallbackContext( + timeout_seconds=timeout, + version=CALLBACK_VERSION, + refresh_token=self.refresh_token, + idp_info=self.idp_info, + username=self.properties.username, + ) + if not _IS_SYNC: + resp = asyncio.get_running_loop().run_in_executor(None, cb.fetch, context) # type: ignore[assignment] + else: + resp = cb.fetch(context) + if not isinstance(resp, OIDCCallbackResult): + raise ValueError( + f"Callback result must be of type OIDCCallbackResult, not {type(resp)}" + ) + self.refresh_token = resp.refresh_token + self.access_token = resp.access_token + self.token_gen_id += 1 + + return self.access_token + + def _run_command(self, conn: Connection, cmd: MutableMapping[str, Any]) -> Mapping[str, Any]: + try: + return conn.command("$external", cmd, no_reauth=True) # type: ignore[call-arg] + except OperationFailure as e: + if self._is_auth_error(e): + self._invalidate(conn) + raise + + def _is_auth_error(self, err: Exception) -> bool: + if not isinstance(err, OperationFailure): + return False + return err.code == _AUTHENTICATION_FAILURE_CODE + + def _invalidate(self, conn: Connection) -> None: + # Ignore the invalidation if a token gen id is given and is less than our + # current token gen id. + token_gen_id = conn.oidc_token_gen_id or 0 + if token_gen_id is not None and token_gen_id < self.token_gen_id: + return + self.access_token = None + + def _sasl_continue_jwt( + self, conn: Connection, start_resp: Mapping[str, Any] + ) -> Mapping[str, Any]: + self.access_token = None + self.refresh_token = None + start_payload: dict[str, Any] = bson.decode(start_resp["payload"]) + if "issuer" in start_payload: + self.idp_info = OIDCIdPInfo(**start_payload) + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_continue_command({"jwt": access_token}, start_resp) + return self._run_command(conn, cmd) + + def _sasl_start_jwt(self, conn: Connection) -> Mapping[str, Any]: + access_token = self._get_access_token() + conn.oidc_token_gen_id = self.token_gen_id + cmd = self._get_start_command({"jwt": access_token}) + return self._run_command(conn, cmd) + + def _get_start_command(self, payload: Optional[Mapping[str, Any]]) -> MutableMapping[str, Any]: + if payload is None: + principal_name = self.username + if principal_name: + payload = {"n": principal_name} + else: + payload = {} + bin_payload = Binary(bson.encode(payload)) + return {"saslStart": 1, "mechanism": "MONGODB-OIDC", "payload": bin_payload} + + def _get_continue_command( + self, payload: Mapping[str, Any], start_resp: Mapping[str, Any] + ) -> MutableMapping[str, Any]: + bin_payload = Binary(bson.encode(payload)) + return { + "saslContinue": 1, + "payload": bin_payload, + "conversationId": start_resp["conversationId"], + } + + +def _authenticate_oidc( + credentials: MongoCredential, conn: Connection, reauthenticate: bool +) -> Optional[Mapping[str, Any]]: + """Authenticate using MONGODB-OIDC.""" + authenticator = _get_authenticator(credentials, conn.address) + if reauthenticate: + return authenticator.reauthenticate(conn) + else: + return authenticator.authenticate(conn) diff --git a/pymongo/synchronous/bulk.py b/pymongo/synchronous/bulk.py new file mode 100644 index 0000000000..22d6a7a76a --- /dev/null +++ b/pymongo/synchronous/bulk.py @@ -0,0 +1,751 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The bulk write operations interface. + +.. versionadded:: 2.7 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Iterator, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.bulk_shared import ( + _COMMANDS, + _DELETE_ALL, + _merge_command, + _raise_bulk_write_error, + _Run, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + InvalidOperation, + NotPrimaryError, + OperationFailure, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _DELETE, + _INSERT, + _UPDATE, + _BulkWriteContext, + _convert_exception, + _convert_write_result, + _EncryptedBulkWriteContext, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.typings import _DocumentOut, _DocumentType, _Pipeline + +_IS_SYNC = True + + +class _Bulk: + """The private guts of the bulk write API.""" + + def __init__( + self, + collection: Collection[_DocumentType], + ordered: bool, + bypass_document_validation: Optional[bool], + comment: Optional[str] = None, + let: Optional[Any] = None, + ) -> None: + """Initialize a _Bulk instance.""" + self.collection = collection.with_options( + codec_options=collection.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + ) + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.comment: Optional[str] = comment + self.ordered = ordered + self.ops: list[tuple[int, Mapping[str, Any]]] = [] + self.executed = False + self.bypass_doc_val = bypass_document_validation + self.uses_collation = False + self.uses_array_filters = False + self.uses_hint_update = False + self.uses_hint_delete = False + self.uses_sort = False + self.is_retryable = True + self.retrying = False + self.started_retryable_write = False + # Extra state so that we know where to pick up on a retry attempt. + self.current_run = None + self.next_run = None + self.is_encrypted = False + + @property + def bulk_ctx_class(self) -> Type[_BulkWriteContext]: + encrypter = self.collection.database.client._encrypter + if encrypter and not encrypter._bypass_auto_encryption: + self.is_encrypted = True + return _EncryptedBulkWriteContext + else: + self.is_encrypted = False + return _BulkWriteContext + + def add_insert(self, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + self.ops.append((_INSERT, document)) + + def add_update( + self, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd: dict[str, Any] = {"q": selector, "u": update, "multi": multi} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append((_UPDATE, cmd)) + + def add_replace( + self, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool], + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd: dict[str, Any] = {"q": selector, "u": replacement} + if upsert is not None: + cmd["upsert"] = upsert + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_update = True + cmd["hint"] = hint + if sort is not None: + self.uses_sort = True + cmd["sort"] = sort + self.ops.append((_UPDATE, cmd)) + + def add_delete( + self, + selector: Mapping[str, Any], + limit: int, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd: dict[str, Any] = {"q": selector, "limit": limit} + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if hint is not None: + self.uses_hint_delete = True + cmd["hint"] = hint + if limit == _DELETE_ALL: + # A bulk_write containing a delete_many is not retryable. + self.is_retryable = False + self.ops.append((_DELETE, cmd)) + + def gen_ordered(self) -> Iterator[Optional[_Run]]: + """Generate batches of operations, batched by type of + operation, in the order **provided**. + """ + run = None + for idx, (op_type, operation) in enumerate(self.ops): + if run is None: + run = _Run(op_type) + elif run.op_type != op_type: + yield run + run = _Run(op_type) + run.add(idx, operation) + yield run + + def gen_unordered(self) -> Iterator[_Run]: + """Generate batches of operations, batched by type of + operation, in arbitrary order. + """ + operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] + for idx, (op_type, operation) in enumerate(self.ops): + operations[op_type].add(idx, operation) + + for run in operations: + if run.ops: + yield run + + @_handle_reauth + def write_command( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> dict[str, Any]: + """A proxy for SocketInfo.write_command that handles event publishing.""" + cmd[bwc.field] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, docs) + try: + reply = bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Process the response from the server. + if isinstance(exc, (NotPrimaryError, OperationFailure)): + client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + raise + return reply # type: ignore[return-value] + + def unack_write( + self, + bwc: _BulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + max_doc_size: int, + docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, docs) + try: + result = bwc.conn.unack_write(msg, max_doc_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + raise + return result # type: ignore[return-value] + + def _execute_batch_unack( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> list[Mapping[str, Any]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + write_concern=WriteConcern(w=0), + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + # Though this isn't strictly a "legacy" write, the helper + # handles publishing commands and sending our message + # without receiving a result. Send 0 for max_doc_size + # to disable size checking. Size checking is handled while + # the documents are encoded to BSON. + self.unack_write(bwc, cmd, request_id, msg, 0, to_send, client) # type: ignore[arg-type] + + return to_send + + def _execute_batch( + self, + bwc: Union[_BulkWriteContext, _EncryptedBulkWriteContext], + cmd: dict[str, Any], + ops: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]]]: + if self.is_encrypted: + _, batched_cmd, to_send = bwc.batch_command(cmd, ops) + result = bwc.conn.command( # type: ignore[misc] + bwc.db_name, + batched_cmd, # type: ignore[arg-type] + codec_options=bwc.codec, + session=bwc.session, # type: ignore[arg-type] + client=client, # type: ignore[arg-type] + ) + else: + request_id, msg, to_send = bwc.batch_command(cmd, ops) + result = self.write_command(bwc, cmd, request_id, msg, to_send, client) # type: ignore[arg-type] + + return result, to_send # type: ignore[return-value] + + def _execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + + if not self.current_run: + self.current_run = next(generator) + self.next_run = None + run = self.current_run + + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(client, session) + last_run = False + + while run: + if not self.retrying: + self.next_run = next(generator, None) + if self.next_run is None: + last_run = True + + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + session, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + # If this is the last possible operation, use the + # final write concern. + if last_run and (len(run.ops) - run.idx_offset) == 1: + write_concern = final_write_concern or write_concern + + cmd = {cmd_name: self.collection.name, "ordered": self.ordered} + if self.comment: + cmd["comment"] = self.comment + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.let is not None and run.op_type in (_DELETE, _UPDATE): + cmd["let"] = self.let + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(client, cmd) + ops = islice(run.ops, run.idx_offset, None) + + # Run as many ops as possible in one command. + if write_concern.acknowledged: + result, to_send = self._execute_batch(bwc, cmd, ops, client) + + # Retryable writeConcernErrors halt the execution of this run. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(run, full, run.idx_offset, result) + _raise_bulk_write_error(full) + + _merge_command(run, full_result, run.idx_offset, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + if self.ordered and "writeErrors" in result: + break + else: + to_send = self._execute_batch_unack(bwc, cmd, ops, client) + + run.idx_offset += len(to_send) + + # We're supposed to continue if errors are + # at the write concern level (e.g. wtimeout) + if self.ordered and full_result["writeErrors"]: + break + # Reset our state + self.current_run = run = self.next_run + + def execute_command( + self, + generator: Iterator[Any], + write_concern: WriteConcern, + session: Optional[ClientSession], + operation: str, + ) -> dict[str, Any]: + """Execute using write commands.""" + # nModified is only reported for write commands, not legacy ops. + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + op_id = _randint() + + def retryable_bulk( + session: Optional[ClientSession], conn: Connection, retryable: bool + ) -> None: + self._execute_command( + generator, + write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + client = self.collection.database.client + _ = client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, # type: ignore[arg-type] + operation_id=op_id, + ) + + if full_result["writeErrors"] or full_result["writeConcernErrors"]: + _raise_bulk_write_error(full_result) + return full_result + + def execute_op_msg_no_results(self, conn: Connection, generator: Iterator[Any]) -> None: + """Execute write commands with OP_MSG and w=0 writeConcern, unordered.""" + db_name = self.collection.database.name + client = self.collection.database.client + listeners = client._event_listeners + op_id = _randint() + + if not self.current_run: + self.current_run = next(generator) + run = self.current_run + + while run: + cmd_name = _COMMANDS[run.op_type] + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, + None, + run.op_type, + self.collection.codec_options, + ) + + while run.idx_offset < len(run.ops): + cmd = { + cmd_name: self.collection.name, + "ordered": False, + "writeConcern": {"w": 0}, + } + conn.add_server_api(cmd) + ops = islice(run.ops, run.idx_offset, None) + # Run as many ops as possible. + to_send = self._execute_batch_unack(bwc, cmd, ops, client) + run.idx_offset += len(to_send) + self.current_run = run = next(generator, None) + + def execute_command_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute write commands with OP_MSG and w=0 WriteConcern, ordered.""" + full_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + # Ordered bulk writes have to be acknowledged so that we stop + # processing at the first error, even when the application + # specified unacknowledged writeConcern. + initial_write_concern = WriteConcern() + op_id = _randint() + try: + self._execute_command( + generator, + initial_write_concern, + None, + conn, + op_id, + False, + full_result, + write_concern, + ) + except OperationFailure: + pass + + def execute_no_results( + self, + conn: Connection, + generator: Iterator[Any], + write_concern: WriteConcern, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Guard against unsupported unacknowledged writes. + unack = write_concern and not write_concern.acknowledged + if unack and self.uses_hint_delete and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if unack and self.uses_hint_update and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if unack and self.uses_sort and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + if self.ordered: + return self.execute_command_no_results(conn, generator, write_concern) + return self.execute_op_msg_no_results(conn, generator) + + def execute( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + write_concern = write_concern or self.collection.write_concern + session = _validate_session_write_concern(session, write_concern) + + if self.ordered: + generator = self.gen_ordered() + else: + generator = self.gen_unordered() + + client = self.collection.database.client + if not write_concern.acknowledged: + with client._conn_for_writes(session, operation) as connection: + self.execute_no_results(connection, generator, write_concern) + return None + else: + return self.execute_command(generator, write_concern, session, operation) diff --git a/pymongo/synchronous/change_stream.py b/pymongo/synchronous/change_stream.py new file mode 100644 index 0000000000..7e34d7b848 --- /dev/null +++ b/pymongo/synchronous/change_stream.py @@ -0,0 +1,494 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Watch changes on a collection, a database, or the entire cluster.""" +from __future__ import annotations + +import copy +from typing import TYPE_CHECKING, Any, Generic, Mapping, Optional, Type, Union + +from bson import CodecOptions, _bson_to_dict +from bson.raw_bson import RawBSONDocument +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.collation import validate_collation_or_none +from pymongo.errors import ( + ConnectionFailure, + CursorNotFound, + InvalidOperation, + OperationFailure, + PyMongoError, +) +from pymongo.operations import _Op +from pymongo.synchronous.aggregation import ( + _AggregationCommand, + _CollectionAggregationCommand, + _DatabaseAggregationCommand, +) +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.typings import _CollationIn, _DocumentType, _Pipeline + +_IS_SYNC = True + +# The change streams spec considers the following server errors from the +# getMore command non-resumable. All other getMore errors are resumable. +_RESUMABLE_GETMORE_ERRORS = frozenset( + [ + 6, # HostUnreachable + 7, # HostNotFound + 89, # NetworkTimeout + 91, # ShutdownInProgress + 189, # PrimarySteppedDown + 262, # ExceededTimeLimit + 9001, # SocketException + 10107, # NotWritablePrimary + 11600, # InterruptedAtShutdown + 11602, # InterruptedDueToReplStateChange + 13435, # NotPrimaryNoSecondaryOk + 13436, # NotPrimaryOrSecondary + 63, # StaleShardVersion + 150, # StaleEpoch + 13388, # StaleConfig + 234, # RetryChangeStream + 133, # FailedToSatisfyReadPreference + ] +) + + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.database import Database + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + + +def _resumable(exc: PyMongoError) -> bool: + """Return True if given a resumable change stream error.""" + if isinstance(exc, (ConnectionFailure, CursorNotFound)): + return True + if isinstance(exc, OperationFailure): + if exc._max_wire_version is None: + return False + return ( + exc._max_wire_version >= 9 and exc.has_error_label("ResumableChangeStreamError") + ) or (exc._max_wire_version < 9 and exc.code in _RESUMABLE_GETMORE_ERRORS) + return False + + +class ChangeStream(Generic[_DocumentType]): + """The internal abstract base class for change stream cursors. + + Should not be called directly by application developers. Use + :meth:`pymongo.collection.Collection.watch`, + :meth:`pymongo.database.Database.watch`, or + :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.6 + .. seealso:: The MongoDB documentation on `changeStreams `_. + """ + + def __init__( + self, + target: Union[ + MongoClient[_DocumentType], + Database[_DocumentType], + Collection[_DocumentType], + ], + pipeline: Optional[_Pipeline], + full_document: Optional[str], + resume_after: Optional[Mapping[str, Any]], + max_await_time_ms: Optional[int], + batch_size: Optional[int], + collation: Optional[_CollationIn], + start_at_operation_time: Optional[Timestamp], + session: Optional[ClientSession], + start_after: Optional[Mapping[str, Any]], + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> None: + if pipeline is None: + pipeline = [] + pipeline = common.validate_list("pipeline", pipeline) + common.validate_string_or_none("full_document", full_document) + validate_collation_or_none(collation) + common.validate_non_negative_integer_or_none("batchSize", batch_size) + + self._decode_custom = False + self._orig_codec_options: CodecOptions[_DocumentType] = target.codec_options + if target.codec_options.type_registry._decoder_map: + self._decode_custom = True + # Keep the type registry so that we support encoding custom types + # in the pipeline. + self._target = target.with_options( # type: ignore + codec_options=target.codec_options.with_options(document_class=RawBSONDocument) + ) + else: + self._target = target + + self._pipeline = copy.deepcopy(pipeline) + self._full_document = full_document + self._full_document_before_change = full_document_before_change + self._uses_start_after = start_after is not None + self._uses_resume_after = resume_after is not None + self._resume_token = copy.deepcopy(start_after or resume_after) + self._max_await_time_ms = max_await_time_ms + self._batch_size = batch_size + self._collation = collation + self._start_at_operation_time = start_at_operation_time + self._session = session + self._comment = comment + self._closed = False + self._timeout = self._target._timeout + self._show_expanded_events = show_expanded_events + + def _initialize_cursor(self) -> None: + # Initialize cursor. + self._cursor = self._create_cursor() + + @property + def _aggregation_command_class(self) -> Type[_AggregationCommand]: + """The aggregation command class to be used.""" + raise NotImplementedError + + @property + def _client(self) -> MongoClient: # type: ignore[type-arg] + """The client against which the aggregation commands for + this ChangeStream will be run. + """ + raise NotImplementedError + + def _change_stream_options(self) -> dict[str, Any]: + """Return the options dict for the $changeStream pipeline stage.""" + options: dict[str, Any] = {} + if self._full_document is not None: + options["fullDocument"] = self._full_document + + if self._full_document_before_change is not None: + options["fullDocumentBeforeChange"] = self._full_document_before_change + + resume_token = self.resume_token + if resume_token is not None: + if self._uses_start_after: + options["startAfter"] = resume_token + else: + options["resumeAfter"] = resume_token + + elif self._start_at_operation_time is not None: + options["startAtOperationTime"] = self._start_at_operation_time + + if self._show_expanded_events: + options["showExpandedEvents"] = self._show_expanded_events + + return options + + def _command_options(self) -> dict[str, Any]: + """Return the options dict for the aggregation command.""" + options = {} + if self._max_await_time_ms is not None: + options["maxAwaitTimeMS"] = self._max_await_time_ms + if self._batch_size is not None: + options["batchSize"] = self._batch_size + return options + + def _aggregation_pipeline(self) -> list[dict[str, Any]]: + """Return the full aggregation pipeline for this ChangeStream.""" + options = self._change_stream_options() + full_pipeline: list[dict[str, Any]] = [{"$changeStream": options}] + full_pipeline.extend(self._pipeline) + return full_pipeline + + def _process_result(self, result: Mapping[str, Any], conn: Connection) -> None: + """Callback that caches the postBatchResumeToken or + startAtOperationTime from a changeStream aggregate command response + containing an empty batch of change documents. + + This is implemented as a callback because we need access to the wire + version in order to determine whether to cache this value. + """ + if not result["cursor"]["firstBatch"]: + if "postBatchResumeToken" in result["cursor"]: + self._resume_token = result["cursor"]["postBatchResumeToken"] + elif ( + self._start_at_operation_time is None + and self._uses_resume_after is False + and self._uses_start_after is False + and conn.max_wire_version >= 7 + ): + self._start_at_operation_time = result.get("operationTime") + # PYTHON-2181: informative error on missing operationTime. + if self._start_at_operation_time is None: + raise OperationFailure( + "Expected field 'operationTime' missing from command " + f"response : {result!r}" + ) + + def _run_aggregation_cmd(self, session: Optional[ClientSession]) -> CommandCursor: # type: ignore[type-arg] + """Run the full aggregation pipeline for this ChangeStream and return + the corresponding CommandCursor. + """ + cmd = self._aggregation_command_class( + self._target, + CommandCursor, + self._aggregation_pipeline(), + self._command_options(), + result_processor=self._process_result, + comment=self._comment, + ) + return self._client._retryable_read( + cmd.get_cursor, + self._target._read_preference_for(session), + session, + operation=_Op.AGGREGATE, + ) + + def _create_cursor(self) -> CommandCursor: # type: ignore[type-arg] + with self._client._tmp_session(self._session) as s: + return self._run_aggregation_cmd(session=s) + + def _resume(self) -> None: + """Reestablish this change stream after a resumable error.""" + try: + self._cursor.close() + except PyMongoError: + pass + self._cursor = self._create_cursor() + + def close(self) -> None: + """Close this ChangeStream.""" + self._closed = True + self._cursor.close() + + def __iter__(self) -> ChangeStream[_DocumentType]: + return self + + @property + def resume_token(self) -> Optional[Mapping[str, Any]]: + """The cached resume token that will be used to resume after the most + recently returned change. + + .. versionadded:: 3.9 + """ + return copy.deepcopy(self._resume_token) + + @_csot.apply + def next(self) -> _DocumentType: + """Advance the cursor. + + This method blocks until the next change document is returned or an + unrecoverable error is raised. This method is used when iterating over + all changes in the cursor. For example:: + + try: + resume_token = None + pipeline = [{'$match': {'operationType': 'insert'}}] + with db.collection.watch(pipeline) as stream: + for insert_change in stream: + print(insert_change) + resume_token = stream.resume_token + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + if resume_token is None: + # There is no usable resume token because there was a + # failure during ChangeStream initialization. + logging.error('...') + else: + # Use the interrupted ChangeStream's resume token to create + # a new ChangeStream. The new stream will continue from the + # last seen insert change without missing any events. + with db.collection.watch( + pipeline, resume_after=resume_token) as stream: + for insert_change in stream: + print(insert_change) + + Raises :exc:`StopIteration` if this ChangeStream is closed. + """ + while self.alive: + doc = self.try_next() + if doc is not None: + return doc + + raise StopIteration + + __next__ = next + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + .. note:: Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration` and :meth:`try_next` can return ``None``. + + .. versionadded:: 3.8 + """ + return not self._closed + + @_csot.apply + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next change document without waiting + indefinitely for the next change. For example:: + + with db.collection.watch() as stream: + while stream.alive: + change = stream.try_next() + # Note that the ChangeStream's resume token may be updated + # even when no changes are returned. + print("Current resume token: %r" % (stream.resume_token,)) + if change is not None: + print("Change document: %r" % (change,)) + continue + # We end up here when there are no recent changes. + # Sleep for a while before trying again to avoid flooding + # the server with getMore requests when no changes are + # available. + time.sleep(10) + + If no change document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there have been no changes) then ``None`` is returned. + + :return: The next change document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 3.8 + """ + if not self._closed and not self._cursor.alive: + self._resume() + + # Attempt to get the next change with at most one getMore and at most + # one resume attempt. + try: + try: + change = self._cursor._try_next(True) + except PyMongoError as exc: + if not _resumable(exc): + raise + self._resume() + change = self._cursor._try_next(False) + except PyMongoError as exc: + # Close the stream after a fatal error. + if not _resumable(exc) and not exc.timeout: + self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self.close() + raise + + # Check if the cursor was invalidated. + if not self._cursor.alive: + self._closed = True + + # If no changes are available. + if change is None: + # We have either iterated over all documents in the cursor, + # OR the most-recently returned batch is empty. In either case, + # update the cached resume token with the postBatchResumeToken if + # one was returned. We also clear the startAtOperationTime. + if self._cursor._post_batch_resume_token is not None: + self._resume_token = self._cursor._post_batch_resume_token + self._start_at_operation_time = None + return change + + # Else, changes are available. + try: + resume_token = change["_id"] + except KeyError: + self.close() + raise InvalidOperation( + "Cannot provide resume functionality when the resume token is missing." + ) from None + + # If this is the last change document from the current batch, cache the + # postBatchResumeToken. + if not self._cursor._has_next() and self._cursor._post_batch_resume_token: + resume_token = self._cursor._post_batch_resume_token + + # Hereafter, don't use startAfter; instead use resumeAfter. + self._uses_start_after = False + self._uses_resume_after = True + + # Cache the resume token and clear startAtOperationTime. + self._resume_token = resume_token + self._start_at_operation_time = None + + if self._decode_custom: + return _bson_to_dict(change.raw, self._orig_codec_options) + return change + + def __enter__(self) -> ChangeStream[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + +class CollectionChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on a single collection. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.collection.Collection.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Collection[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_CollectionAggregationCommand]: + return _CollectionAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.database.client + + +class DatabaseChangeStream(ChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in a database. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.database.Database.watch` instead. + + .. versionadded:: 3.7 + """ + + _target: Database[_DocumentType] + + @property + def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: + return _DatabaseAggregationCommand + + @property + def _client(self) -> MongoClient[_DocumentType]: + return self._target.client + + +class ClusterChangeStream(DatabaseChangeStream[_DocumentType]): + """A change stream that watches changes on all collections in the cluster. + + Should not be called directly by application developers. Use + helper method :meth:`pymongo.mongo_client.MongoClient.watch` instead. + + .. versionadded:: 3.7 + """ + + def _change_stream_options(self) -> dict[str, Any]: + options = super()._change_stream_options() + options["allChangesForCluster"] = True + return options diff --git a/pymongo/synchronous/client_bulk.py b/pymongo/synchronous/client_bulk.py new file mode 100644 index 0000000000..a606d028e1 --- /dev/null +++ b/pymongo/synchronous/client_bulk.py @@ -0,0 +1,754 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The client-level bulk write operations interface. + +.. versionadded:: 4.9 +""" +from __future__ import annotations + +import copy +import datetime +import logging +from collections.abc import MutableMapping +from itertools import islice +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + Optional, + Type, + Union, +) + +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from pymongo import _csot, common +from pymongo.synchronous.client_session import ClientSession, _validate_session_write_concern +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection +from pymongo._client_bulk_shared import ( + _merge_command, + _throw_client_bulk_write_exception, +) +from pymongo.common import ( + validate_is_document_type, + validate_ok_for_replace, + validate_ok_for_update, +) +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + WaitQueueTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import ( + _ClientBulkWriteContext, + _convert_client_bulk_exception, + _convert_exception, + _convert_write_result, + _randint, +) +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + ClientBulkWriteResult, + DeleteResult, + InsertOneResult, + UpdateResult, +) +from pymongo.typings import _DocumentOut, _Pipeline +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class _ClientBulk: + """The private guts of the client-level bulk write API.""" + + def __init__( + self, + client: MongoClient[Any], + write_concern: WriteConcern, + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + comment: Optional[str] = None, + let: Optional[Any] = None, + verbose_results: bool = False, + ) -> None: + """Initialize a _ClientBulk instance.""" + self.client = client + self.write_concern = write_concern + self.let = let + if self.let is not None: + common.validate_is_document_type("let", self.let) + self.ordered = ordered + self.bypass_doc_val = bypass_document_validation + self.comment = comment + self.verbose_results = verbose_results + self.ops: list[tuple[str, Mapping[str, Any]]] = [] + self.namespaces: list[str] = [] + self.idx_offset: int = 0 + self.total_ops: int = 0 + self.executed = False + self.uses_collation = False + self.uses_array_filters = False + self.is_retryable = self.client.options.retry_writes + self.retrying = False + self.started_retryable_write = False + + @property + def bulk_ctx_class(self) -> Type[_ClientBulkWriteContext]: + return _ClientBulkWriteContext + + def add_insert(self, namespace: str, document: _DocumentOut) -> None: + """Add an insert document to the list of ops.""" + validate_is_document_type("document", document) + # Generate ObjectId client side. + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() + cmd = {"insert": -1, "document": document} + self.ops.append(("insert", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_update( + self, + namespace: str, + selector: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + multi: bool, + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + array_filters: Optional[list[Mapping[str, Any]]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create an update document and add it to the list of ops.""" + validate_ok_for_update(update) + cmd = { + "update": -1, + "filter": selector, + "updateMods": update, + "multi": multi, + } + if upsert is not None: + cmd["upsert"] = upsert + if array_filters is not None: + self.uses_array_filters = True + cmd["arrayFilters"] = array_filters + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("update", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_replace( + self, + namespace: str, + selector: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: Optional[bool] = None, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + sort: Optional[Mapping[str, Any]] = None, + ) -> None: + """Create a replace document and add it to the list of ops.""" + validate_ok_for_replace(replacement) + cmd = { + "update": -1, + "filter": selector, + "updateMods": replacement, + "multi": False, + } + if upsert is not None: + cmd["upsert"] = upsert + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if sort is not None: + cmd["sort"] = sort + self.ops.append(("replace", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + def add_delete( + self, + namespace: str, + selector: Mapping[str, Any], + multi: bool, + collation: Optional[Mapping[str, Any]] = None, + hint: Union[str, dict[str, Any], None] = None, + ) -> None: + """Create a delete document and add it to the list of ops.""" + cmd = {"delete": -1, "filter": selector, "multi": multi} + if hint is not None: + cmd["hint"] = hint + if collation is not None: + self.uses_collation = True + cmd["collation"] = collation + if multi: + # A bulk_write containing an update_many is not retryable. + self.is_retryable = False + self.ops.append(("delete", cmd)) + self.namespaces.append(namespace) + self.total_ops += 1 + + @_handle_reauth + def write_command( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: Union[bytes, dict[str, Any]], + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> dict[str, Any]: + """A proxy for Connection.write_command that handles event publishing.""" + cmd["ops"] = op_docs + cmd["nsInfo"] = ns_docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._start(cmd, request_id, op_docs, ns_docs) + try: + reply = bwc.conn.write_command(request_id, msg, bwc.codec) # type: ignore[misc, arg-type] + duration = datetime.datetime.now() - bwc.start_time + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) # type: ignore[arg-type] + # Process the response from the server. + self.client._process_response(reply, bwc.session) # type: ignore[arg-type] + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + + if bwc.publish: + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + # Process the response from the server. + if isinstance(exc, OperationFailure): + self.client._process_response(exc.details, bwc.session) # type: ignore[arg-type] + else: + self.client._process_response({}, bwc.session) # type: ignore[arg-type] + return reply # type: ignore[return-value] + + def unack_write( + self, + bwc: _ClientBulkWriteContext, + cmd: MutableMapping[str, Any], + request_id: int, + msg: bytes, + op_docs: list[Mapping[str, Any]], + ns_docs: list[Mapping[str, Any]], + client: MongoClient[Any], + ) -> Optional[Mapping[str, Any]]: + """A proxy for Connection.unack_write that handles event publishing.""" + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + cmd = bwc._start(cmd, request_id, op_docs, ns_docs) + try: + result = bwc.conn.unack_write(msg, bwc.max_bson_size) # type: ignore[func-returns-value, misc, override] + duration = datetime.datetime.now() - bwc.start_time + if result is not None: + reply = _convert_write_result(bwc.name, cmd, result) # type: ignore[arg-type] + else: + # Comply with APM spec. + reply = {"ok": 1} + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=reply, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + ) + if bwc.publish: + bwc._succeed(request_id, reply, duration) + except Exception as exc: + duration = datetime.datetime.now() - bwc.start_time + if isinstance(exc, OperationFailure): + failure: _DocumentOut = _convert_write_result(bwc.name, cmd, exc.details) # type: ignore[arg-type] + elif isinstance(exc, NotPrimaryError): + failure = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=bwc.db_name, + requestId=request_id, + operationId=request_id, + driverConnectionId=bwc.conn.id, + serverConnectionId=bwc.conn.server_connection_id, + serverHost=bwc.conn.address[0], + serverPort=bwc.conn.address[1], + serviceId=bwc.conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if bwc.publish: + assert bwc.start_time is not None + bwc._fail(request_id, failure, duration) + # Top-level error will be embedded in ClientBulkWriteException. + reply = {"error": exc} + return reply + + def _execute_batch_unack( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (unack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + self.unack_write(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return to_send_ops, to_send_ns + + def _execute_batch( + self, + bwc: _ClientBulkWriteContext, + cmd: dict[str, Any], + ops: list[tuple[str, Mapping[str, Any]]], + namespaces: list[str], + ) -> tuple[dict[str, Any], list[Mapping[str, Any]], list[Mapping[str, Any]]]: + """Executes a batch of bulkWrite server commands (ack).""" + request_id, msg, to_send_ops, to_send_ns = bwc.batch_command(cmd, ops, namespaces) + result = self.write_command(bwc, cmd, request_id, msg, to_send_ops, to_send_ns, self.client) # type: ignore[arg-type] + return result, to_send_ops, to_send_ns # type: ignore[return-value] + + def _process_results_cursor( + self, + full_result: MutableMapping[str, Any], + result: MutableMapping[str, Any], + conn: Connection, + session: Optional[ClientSession], + ) -> None: + """Internal helper for processing the server reply command cursor.""" + if result.get("cursor"): + if session: + session._leave_alive = True + coll = Collection( + database=Database(self.client, "admin"), + name="$cmd.bulkWrite", + ) + cmd_cursor = CommandCursor( + coll, + result["cursor"], + conn.address, + session=session, + comment=self.comment, + ) + cmd_cursor._maybe_pin_connection(conn) + + # Iterate the cursor to get individual write results. + try: + for doc in cmd_cursor: + original_index = doc["idx"] + self.idx_offset + op_type, op = self.ops[original_index] + + if not doc["ok"]: + result["writeErrors"].append(doc) + if self.ordered: + return + + # Record individual write result. + if doc["ok"] and self.verbose_results: + if op_type == "insert": + inserted_id = op["document"]["_id"] + res = InsertOneResult(inserted_id, acknowledged=True) # type: ignore[assignment] + if op_type in ["update", "replace"]: + op_type = "update" + res = UpdateResult(doc, acknowledged=True, in_client_bulk=True) # type: ignore[assignment] + if op_type == "delete": + res = DeleteResult(doc, acknowledged=True) # type: ignore[assignment] + full_result[f"{op_type}Results"][original_index] = res + except Exception as exc: + # Attempt to close the cursor, then raise top-level error. + if cmd_cursor.alive: + cmd_cursor.close() + result["error"] = _convert_client_bulk_exception(exc) + + def _execute_command( + self, + write_concern: WriteConcern, + session: Optional[ClientSession], + conn: Connection, + op_id: int, + retryable: bool, + full_result: MutableMapping[str, Any], + final_write_concern: Optional[WriteConcern] = None, + ) -> None: + """Internal helper for executing batches of bulkWrite commands.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + + # Connection.command validates the session, but we use + # Connection.write_command + conn.validate_session(self.client, session) + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + session, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # If this is the last possible batch, use the + # final write concern. + if self.total_ops - self.idx_offset <= bwc.max_write_batch_size: + write_concern = final_write_concern or write_concern + + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = not self.verbose_results + cmd["ordered"] = self.ordered # type: ignore[assignment] + not_in_transaction = session and not session.in_transaction + if not_in_transaction or not session: + _csot.apply_write_concern(cmd, write_concern) + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + if session: + # Start a new retryable write unless one was already + # started for this command. + if retryable and not self.started_retryable_write: + session._start_retryable_write() + self.started_retryable_write = True + session._apply_to(cmd, retryable, ReadPreference.PRIMARY, conn) + conn.send_cluster_time(cmd, session, self.client) + conn.add_server_api(cmd) + # CSOT: apply timeout before encoding the command. + conn.apply_timeout(self.client, cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + if write_concern.acknowledged: + raw_result, to_send_ops, _ = self._execute_batch(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + result = raw_result + + # Top-level server/network error. + if result.get("error"): + error = result["error"] + retryable_top_level_error = ( + hasattr(error, "details") + and isinstance(error.details, dict) + and error.details.get("code", 0) in _RETRYABLE_ERROR_CODES + ) + retryable_network_error = isinstance( + error, ConnectionFailure + ) and not isinstance(error, (NotPrimaryError, WaitQueueTimeoutError)) + + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + if retryable and (retryable_top_level_error or retryable_network_error): + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + else: + _merge_command(self.ops, self.idx_offset, full_result, result) + _throw_client_bulk_write_exception(full_result, self.verbose_results) + + result["error"] = None + result["writeErrors"] = [] + if result.get("nErrors", 0) < len(to_send_ops): + full_result["anySuccessful"] = True + + # Top-level command error. + if not result["ok"]: + result["error"] = raw_result + _merge_command(self.ops, self.idx_offset, full_result, result) + break + + if retryable: + # Retryable writeConcernErrors halt the execution of this batch. + wce = result.get("writeConcernError", {}) + if wce.get("code", 0) in _RETRYABLE_ERROR_CODES: + # Synthesize the full bulk result without modifying the + # current one because this write operation may be retried. + full = copy.deepcopy(full_result) + _merge_command(self.ops, self.idx_offset, full, result) + _throw_client_bulk_write_exception(full, self.verbose_results) + + # Process the server reply as a command cursor. + self._process_results_cursor(full_result, result, conn, session) + + # Merge this batch's results with the full results. + _merge_command(self.ops, self.idx_offset, full_result, result) + + # We're no longer in a retry once a command succeeds. + self.retrying = False + self.started_retryable_write = False + + else: + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + # We halt execution if we hit a top-level error, + # or an individual error in an ordered bulk write. + if full_result["error"] or (self.ordered and full_result["writeErrors"]): + break + + def execute_command( + self, + session: Optional[ClientSession], + operation: str, + ) -> MutableMapping[str, Any]: + """Execute commands with w=1 WriteConcern.""" + full_result: MutableMapping[str, Any] = { + "anySuccessful": False, + "error": None, + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 0, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nDeleted": 0, + "insertResults": {}, + "updateResults": {}, + "deleteResults": {}, + } + op_id = _randint() + + def retryable_bulk( + session: Optional[ClientSession], + conn: Connection, + retryable: bool, + ) -> None: + if conn.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self._execute_command( + self.write_concern, + session, + conn, + op_id, + retryable, + full_result, + ) + + self.client._retryable_write( + self.is_retryable, + retryable_bulk, + session, + operation, + bulk=self, + operation_id=op_id, + ) + + if full_result["error"] or full_result["writeErrors"] or full_result["writeConcernErrors"]: + _throw_client_bulk_write_exception(full_result, self.verbose_results) + return full_result + + def execute_command_unack( + self, + conn: Connection, + ) -> None: + """Execute commands with OP_MSG and w=0 writeConcern. Always unordered.""" + db_name = "admin" + cmd_name = "bulkWrite" + listeners = self.client._event_listeners + op_id = _randint() + + bwc = self.bulk_ctx_class( + db_name, + cmd_name, + conn, + op_id, + listeners, # type: ignore[arg-type] + None, + self.client.codec_options, + ) + + while self.idx_offset < self.total_ops: + # Construct the server command, specifying the relevant options. + cmd = {"bulkWrite": 1} + cmd["errorsOnly"] = True + cmd["ordered"] = False + if self.bypass_doc_val is not None: + cmd["bypassDocumentValidation"] = self.bypass_doc_val + cmd["writeConcern"] = {"w": 0} # type: ignore[assignment] + if self.comment: + cmd["comment"] = self.comment # type: ignore[assignment] + if self.let: + cmd["let"] = self.let + + conn.add_server_api(cmd) + ops = islice(self.ops, self.idx_offset, None) + namespaces = islice(self.namespaces, self.idx_offset, None) + + # Run as many ops as possible in one server command. + to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) # type: ignore[arg-type] + + self.idx_offset += len(to_send_ops) + + def execute_no_results( + self, + conn: Connection, + ) -> None: + """Execute all operations, returning no results (w=0).""" + if self.uses_collation: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + if self.uses_array_filters: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + # Cannot have both unacknowledged writes and bypass document validation. + if self.bypass_doc_val is not None: + raise OperationFailure( + "Cannot set bypass_document_validation with unacknowledged write concern" + ) + + return self.execute_command_unack(conn) + + def execute( + self, + session: Optional[ClientSession], + operation: str, + ) -> Any: + """Execute operations.""" + if not self.ops: + raise InvalidOperation("No operations to execute") + if self.executed: + raise InvalidOperation("Bulk operations can only be executed once.") + self.executed = True + session = _validate_session_write_concern(session, self.write_concern) + + if not self.write_concern.acknowledged: + with self.client._conn_for_writes(session, operation) as connection: + if connection.max_wire_version < 25: + raise InvalidOperation( + "MongoClient.bulk_write requires MongoDB server version 8.0+." + ) + self.execute_no_results(connection) + return ClientBulkWriteResult(None, False, False) # type: ignore[arg-type] + + result = self.execute_command(session, operation) + return ClientBulkWriteResult( + result, + self.write_concern.acknowledged, + self.verbose_results, + ) diff --git a/pymongo/synchronous/client_session.py b/pymongo/synchronous/client_session.py new file mode 100644 index 0000000000..9b547dc946 --- /dev/null +++ b/pymongo/synchronous/client_session.py @@ -0,0 +1,1184 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Logical sessions for ordering sequential operations. + +.. versionadded:: 3.6 + +Causally Consistent Reads +========================= + +.. code-block:: python + + with client.start_session(causal_consistency=True) as session: + collection = client.db.collection + collection.update_one({"_id": 1}, {"$set": {"x": 10}}, session=session) + secondary_c = collection.with_options(read_preference=ReadPreference.SECONDARY) + + # A secondary read waits for replication of the write. + secondary_c.find_one({"_id": 1}, session=session) + +If `causal_consistency` is True (the default), read operations that use +the session are causally after previous read and write operations. Using a +causally consistent session, an application can read its own writes and is +guaranteed monotonic reads, even when reading from replica set secondaries. + +.. seealso:: The MongoDB documentation on `causal-consistency `_. + +.. _transactions-ref: + +Transactions +============ + +.. versionadded:: 3.7 + +MongoDB 4.0 adds support for transactions on replica set primaries. A +transaction is associated with a :class:`ClientSession`. To start a transaction +on a session, use :meth:`ClientSession.start_transaction` in a with-statement. +Then, execute an operation within the transaction by passing the session to the +operation: + +.. code-block:: python + + orders = client.db.orders + inventory = client.db.inventory + with client.start_session() as session: + with session.start_transaction(): + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one( + {"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, + session=session, + ) + +Upon normal completion of ``with session.start_transaction()`` block, the +transaction automatically calls :meth:`ClientSession.commit_transaction`. +If the block exits with an exception, the transaction automatically calls +:meth:`ClientSession.abort_transaction`. + +In general, multi-document transactions only support read/write (CRUD) +operations on existing collections. However, MongoDB 4.4 adds support for +creating collections and indexes with some limitations, including an +insert operation that would result in the creation of a new collection. +For a complete description of all the supported and unsupported operations +see the `MongoDB server's documentation for transactions +`_. + +A session may only have a single active transaction at a time, multiple +transactions on the same session can be executed in sequence. + +Sharded Transactions +^^^^^^^^^^^^^^^^^^^^ + +.. versionadded:: 3.9 + +PyMongo 3.9 adds support for transactions on sharded clusters running MongoDB +>=4.2. Sharded transactions have the same API as replica set transactions. +When running a transaction against a sharded cluster, the session is +pinned to the mongos server selected for the first operation in the +transaction. All subsequent operations that are part of the same transaction +are routed to the same mongos server. When the transaction is completed, by +running either commitTransaction or abortTransaction, the session is unpinned. + +.. seealso:: The MongoDB documentation on `transactions `_. + +.. _snapshot-reads-ref: + +Snapshot Reads +============== + +.. versionadded:: 3.12 + +MongoDB 5.0 adds support for snapshot reads. Snapshot reads are requested by +passing the ``snapshot`` option to +:meth:`~pymongo.mongo_client.MongoClient.start_session`. +If ``snapshot`` is True, all read operations that use this session read data +from the same snapshot timestamp. The server chooses the latest +majority-committed snapshot timestamp when executing the first read operation +using the session. Subsequent reads on this session read from the same +snapshot timestamp. Snapshot reads are also supported when reading from +replica set secondaries. + +.. code-block:: python + + # Each read using this session reads data from the same point in time. + with client.start_session(snapshot=True) as session: + order = orders.find_one({"sku": "abc123"}, session=session) + inventory = inventory.find_one({"sku": "abc123"}, session=session) + +Snapshot Reads Limitations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Snapshot reads sessions are incompatible with ``causal_consistency=True``. +Only the following read operations are supported in a snapshot reads session: + +- :meth:`~pymongo.collection.Collection.find` +- :meth:`~pymongo.collection.Collection.find_one` +- :meth:`~pymongo.collection.Collection.aggregate` +- :meth:`~pymongo.collection.Collection.count_documents` +- :meth:`~pymongo.collection.Collection.distinct` (on unsharded collections) + +Classes +======= +""" + +from __future__ import annotations + +import collections +import time +import uuid +from collections.abc import Mapping as _Mapping +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Mapping, + MutableMapping, + NoReturn, + Optional, + Type, + TypeVar, +) + +from bson.binary import Binary +from bson.int64 import Int64 +from bson.timestamp import Timestamp +from pymongo import _csot +from pymongo.errors import ( + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, + PyMongoError, + WTimeoutError, +) +from pymongo.helpers_shared import _RETRYABLE_ERROR_CODES +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = True + + +class SessionOptions: + """Options for a new :class:`ClientSession`. + + :param causal_consistency: If True, read operations are causally + ordered within the session. Defaults to True when the ``snapshot`` + option is ``False``. + :param default_transaction_options: The default + TransactionOptions to use for transactions started on this session. + :param snapshot: If True, then all reads performed using this + session will read from the same snapshot. This option is incompatible + with ``causal_consistency=True``. Defaults to ``False``. + + .. versionchanged:: 3.12 + Added the ``snapshot`` parameter. + """ + + def __init__( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> None: + if snapshot: + if causal_consistency: + raise ConfigurationError("snapshot reads do not support causal_consistency=True") + causal_consistency = False + elif causal_consistency is None: + causal_consistency = True + self._causal_consistency = causal_consistency + if default_transaction_options is not None: + if not isinstance(default_transaction_options, TransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of " + "pymongo.client_session.TransactionOptions, not: {!r}".format( + default_transaction_options + ) + ) + self._default_transaction_options = default_transaction_options + self._snapshot = snapshot + + @property + def causal_consistency(self) -> bool: + """Whether causal consistency is configured.""" + return self._causal_consistency + + @property + def default_transaction_options(self) -> Optional[TransactionOptions]: + """The default TransactionOptions to use for transactions started on + this session. + + .. versionadded:: 3.7 + """ + return self._default_transaction_options + + @property + def snapshot(self) -> Optional[bool]: + """Whether snapshot reads are configured. + + .. versionadded:: 3.12 + """ + return self._snapshot + + +class TransactionOptions: + """Options for :meth:`ClientSession.start_transaction`. + + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this transaction. + If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` of + the :class:`MongoClient` is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. Transactions which read must use + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param max_commit_time_ms: The maximum amount of time to allow a + single commitTransaction command to run. This option is an alias for + maxTimeMS option on the commitTransaction command. If ``None`` (the + default) maxTimeMS is not used. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + + def __init__( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> None: + self._read_concern = read_concern + self._write_concern = write_concern + self._read_preference = read_preference + self._max_commit_time_ms = max_commit_time_ms + if read_concern is not None: + if not isinstance(read_concern, ReadConcern): + raise TypeError( + "read_concern must be an instance of " + f"pymongo.read_concern.ReadConcern, not: {read_concern!r}" + ) + if write_concern is not None: + if not isinstance(write_concern, WriteConcern): + raise TypeError( + "write_concern must be an instance of " + f"pymongo.write_concern.WriteConcern, not: {write_concern!r}" + ) + if not write_concern.acknowledged: + raise ConfigurationError( + "transactions do not support unacknowledged write concern" + f": {write_concern!r}" + ) + if read_preference is not None: + if not isinstance(read_preference, _ServerMode): + raise TypeError( + f"{read_preference!r} is not valid for read_preference. See " + "pymongo.read_preferences for valid " + "options." + ) + if max_commit_time_ms is not None: + if not isinstance(max_commit_time_ms, int): + raise TypeError( + f"max_commit_time_ms must be an integer or None, not {type(max_commit_time_ms)}" + ) + + @property + def read_concern(self) -> Optional[ReadConcern]: + """This transaction's :class:`~pymongo.read_concern.ReadConcern`.""" + return self._read_concern + + @property + def write_concern(self) -> Optional[WriteConcern]: + """This transaction's :class:`~pymongo.write_concern.WriteConcern`.""" + return self._write_concern + + @property + def read_preference(self) -> Optional[_ServerMode]: + """This transaction's :class:`~pymongo.read_preferences.ReadPreference`.""" + return self._read_preference + + @property + def max_commit_time_ms(self) -> Optional[int]: + """The maxTimeMS to use when running a commitTransaction command. + + .. versionadded:: 3.9 + """ + return self._max_commit_time_ms + + +def _validate_session_write_concern( + session: Optional[ClientSession], write_concern: Optional[WriteConcern] +) -> Optional[ClientSession]: + """Validate that an explicit session is not used with an unack'ed write. + + Returns the session to use for the next operation. + """ + if session: + if write_concern is not None and not write_concern.acknowledged: + # For unacknowledged writes without an explicit session, + # drivers SHOULD NOT use an implicit session. If a driver + # creates an implicit session for unacknowledged writes + # without an explicit session, the driver MUST NOT send the + # session ID. + if session._implicit: + return None + else: + raise ConfigurationError( + "Explicit sessions are incompatible with " + f"unacknowledged write concern: {write_concern!r}" + ) + return session + + +class _TransactionContext: + """Internal transaction context manager for start_transaction.""" + + def __init__(self, session: ClientSession): + self.__session = session + + def __enter__(self) -> _TransactionContext: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + if self.__session.in_transaction: + if exc_val is None: + self.__session.commit_transaction() + else: + self.__session.abort_transaction() + + +class _TxnState: + NONE = 1 + STARTING = 2 + IN_PROGRESS = 3 + COMMITTED = 4 + COMMITTED_EMPTY = 5 + ABORTED = 6 + + +class _Transaction: + """Internal class to hold transaction information in a ClientSession.""" + + def __init__(self, opts: Optional[TransactionOptions], client: MongoClient[Any]): + self.opts = opts + self.state = _TxnState.NONE + self.sharded = False + self.pinned_address: Optional[_Address] = None + self.conn_mgr: Optional[_ConnectionManager] = None + self.recovery_token = None + self.attempt = 0 + self.client = client + + def active(self) -> bool: + return self.state in (_TxnState.STARTING, _TxnState.IN_PROGRESS) + + def starting(self) -> bool: + return self.state == _TxnState.STARTING + + @property + def pinned_conn(self) -> Optional[Connection]: + if self.active() and self.conn_mgr: + return self.conn_mgr.conn + return None + + def pin(self, server: Server, conn: Connection) -> None: + self.sharded = True + self.pinned_address = server.description.address + if server.description.server_type == SERVER_TYPE.LoadBalancer: + conn.pin_txn() + self.conn_mgr = _ConnectionManager(conn, False) + + def unpin(self) -> None: + self.pinned_address = None + if self.conn_mgr: + self.conn_mgr.close() + self.conn_mgr = None + + def reset(self) -> None: + self.unpin() + self.state = _TxnState.NONE + self.sharded = False + self.recovery_token = None + self.attempt = 0 + + def __del__(self) -> None: + if self.conn_mgr: + # Reuse the cursor closing machinery to return the socket to the + # pool soon. + self.client._close_cursor_soon(0, None, self.conn_mgr) + self.conn_mgr = None + + +def _reraise_with_unknown_commit(exc: Any) -> NoReturn: + """Re-raise an exception with the UnknownTransactionCommitResult label.""" + exc._add_error_label("UnknownTransactionCommitResult") + raise + + +def _max_time_expired_error(exc: PyMongoError) -> bool: + """Return true if exc is a MaxTimeMSExpired error.""" + return isinstance(exc, OperationFailure) and exc.code == 50 + + +# From the transactions spec, all the retryable writes errors plus +# WriteConcernTimeout. +_UNKNOWN_COMMIT_ERROR_CODES: frozenset = _RETRYABLE_ERROR_CODES | frozenset( # type: ignore[type-arg] + [ + 64, # WriteConcernTimeout + 50, # MaxTimeMSExpired + ] +) + +# From the Convenient API for Transactions spec, with_transaction must +# halt retries after 120 seconds. +# This limit is non-configurable and was chosen to be twice the 60 second +# default value of MongoDB's `transactionLifetimeLimitSeconds` parameter. +_WITH_TRANSACTION_RETRY_TIME_LIMIT = 120 + + +def _within_time_limit(start_time: float) -> bool: + """Are we within the with_transaction retry limit?""" + return time.monotonic() - start_time < _WITH_TRANSACTION_RETRY_TIME_LIMIT + + +_T = TypeVar("_T") + +if TYPE_CHECKING: + from pymongo.synchronous.mongo_client import MongoClient + + +class ClientSession: + """A session for ordering sequential operations. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + They can only be used by one thread or process at a time. A single + :class:`ClientSession` cannot be used to run multiple operations + concurrently. + + Should not be initialized directly by application developers - to create a + :class:`ClientSession`, call + :meth:`~pymongo.mongo_client.MongoClient.start_session`. + """ + + def __init__( + self, + client: MongoClient[Any], + server_session: Any, + options: SessionOptions, + implicit: bool, + ) -> None: + # A MongoClient, a _ServerSession, a SessionOptions, and a set. + self._client: MongoClient[Any] = client + self._server_session = server_session + self._options = options + self._cluster_time: Optional[Mapping[str, Any]] = None + self._operation_time: Optional[Timestamp] = None + self._snapshot_time = None + # Is this an implicitly created session? + self._implicit = implicit + self._transaction = _Transaction(None, client) + # Is this session attached to a cursor? + self._attached_to_cursor = False + # Should we leave the session alive when the cursor is closed? + self._leave_alive = False + + def end_session(self) -> None: + """Finish this session. If a transaction has started, abort it. + + It is an error to use the session after the session has ended. + """ + self._end_session(lock=True) + + def _end_session(self, lock: bool) -> None: + if self._server_session is not None: + try: + if self.in_transaction: + self.abort_transaction() + # It's possible we're still pinned here when the transaction + # is in the committed state when the session is discarded. + self._unpin() + finally: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _end_implicit_session(self) -> None: + # Implicit sessions can't be part of transactions or pinned connections + if not self._leave_alive and self._server_session is not None: + self._client._return_server_session(self._server_session) + self._server_session = None + + def _check_ended(self) -> None: + if self._server_session is None: + raise InvalidOperation("Cannot use ended session") + + def __enter__(self) -> ClientSession: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self._end_session(lock=True) + + @property + def client(self) -> MongoClient[Any]: + """The :class:`~pymongo.mongo_client.MongoClient` this session was + created from. + """ + return self._client + + @property + def options(self) -> SessionOptions: + """The :class:`SessionOptions` this session was created with.""" + return self._options + + @property + def session_id(self) -> Mapping[str, Any]: + """A BSON document, the opaque server session identifier.""" + self._check_ended() + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.session_id + + @property + def _transaction_id(self) -> Int64: + """The current transaction id for the underlying server session.""" + self._materialize(self._client.topology_description.logical_session_timeout_minutes) + return self._server_session.transaction_id + + @property + def cluster_time(self) -> Optional[ClusterTime]: + """The cluster time returned by the last operation executed + in this session. + """ + return self._cluster_time + + @property + def operation_time(self) -> Optional[Timestamp]: + """The operation time returned by the last operation executed + in this session. + """ + return self._operation_time + + def _inherit_option(self, name: str, val: _T) -> _T: + """Return the inherited TransactionOption value.""" + if val: + return val + txn_opts = self.options.default_transaction_options + parent_val = txn_opts and getattr(txn_opts, name) + if parent_val: + return parent_val + return getattr(self.client, name) + + def with_transaction( + self, + callback: Callable[[ClientSession], _T], + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> _T: + """Execute a callback in a transaction. + + This method starts a transaction on this session, executes ``callback`` + once, and then commits the transaction. For example:: + + def callback(session): + orders = session.client.db.orders + inventory = session.client.db.inventory + orders.insert_one({"sku": "abc123", "qty": 100}, session=session) + inventory.update_one({"sku": "abc123", "qty": {"$gte": 100}}, + {"$inc": {"qty": -100}}, session=session) + + with client.start_session() as session: + session.with_transaction(callback) + + To pass arbitrary arguments to the ``callback``, wrap your callable + with a ``lambda`` like this:: + + def callback(session, custom_arg, custom_kwarg=None): + # Transaction operations... + + with client.start_session() as session: + session.with_transaction( + lambda s: callback(s, "custom_arg", custom_kwarg=1)) + + In the event of an exception, ``with_transaction`` may retry the commit + or the entire transaction, therefore ``callback`` may be invoked + multiple times by a single call to ``with_transaction``. Developers + should be mindful of this possibility when writing a ``callback`` that + modifies application state or has any other side-effects. + Note that even when the ``callback`` is invoked multiple times, + ``with_transaction`` ensures that the transaction will be committed + at-most-once on the server. + + The ``callback`` should not attempt to start new transactions, but + should simply run operations meant to be contained within a + transaction. The ``callback`` should also not commit the transaction; + this is handled automatically by ``with_transaction``. If the + ``callback`` does commit or abort the transaction without error, + however, ``with_transaction`` will return without taking further + action. + + :class:`ClientSession` instances are **not thread-safe or fork-safe**. + Consequently, the ``callback`` must not attempt to execute multiple + operations concurrently. + + When ``callback`` raises an exception, ``with_transaction`` + automatically aborts the current transaction. When ``callback`` or + :meth:`~ClientSession.commit_transaction` raises an exception that + includes the ``"TransientTransactionError"`` error label, + ``with_transaction`` starts a new transaction and re-executes + the ``callback``. + + The ``callback`` MUST NOT silently handle command errors + without allowing such errors to propagate. Command errors may abort the + transaction on the server, and an attempt to commit the transaction will + be rejected with a ``NoSuchTransaction`` error. For more information see + the `transactions specification`_. + + When :meth:`~ClientSession.commit_transaction` raises an exception with + the ``"UnknownTransactionCommitResult"`` error label, + ``with_transaction`` retries the commit until the result of the + transaction is known. + + This method will cease retrying after 120 seconds has elapsed. This + timeout is not configurable and any exception raised by the + ``callback`` or by :meth:`ClientSession.commit_transaction` after the + timeout is reached will be re-raised. Applications that desire a + different timeout duration should not use this method. + + :param callback: The callable ``callback`` to run inside a transaction. + The callable must accept a single argument, this session. Note, + under certain error conditions the callback may be run multiple + times. + :param read_concern: The + :class:`~pymongo.read_concern.ReadConcern` to use for this + transaction. + :param write_concern: The + :class:`~pymongo.write_concern.WriteConcern` to use for this + transaction. + :param read_preference: The read preference to use for this + transaction. If ``None`` (the default) the :attr:`read_preference` + of this :class:`Database` is used. See + :mod:`~pymongo.read_preferences` for options. + + :return: The return value of the ``callback``. + + .. versionadded:: 3.9 + + .. _transactions specification: + https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/transactions-convenient-api.md#handling-errors-inside-the-callback + """ + start_time = time.monotonic() + while True: + self.start_transaction(read_concern, write_concern, read_preference, max_commit_time_ms) + try: + ret = callback(self) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as exc: + if self.in_transaction: + self.abort_transaction() + if ( + isinstance(exc, PyMongoError) + and exc.has_error_label("TransientTransactionError") + and _within_time_limit(start_time) + ): + # Retry the entire transaction. + continue + raise + + if not self.in_transaction: + # Assume callback intentionally ended the transaction. + return ret + + while True: + try: + self.commit_transaction() + except PyMongoError as exc: + if ( + exc.has_error_label("UnknownTransactionCommitResult") + and _within_time_limit(start_time) + and not _max_time_expired_error(exc) + ): + # Retry the commit. + continue + + if exc.has_error_label("TransientTransactionError") and _within_time_limit( + start_time + ): + # Retry the entire transaction. + break + raise + + # Commit succeeded. + return ret + + def start_transaction( + self, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + read_preference: Optional[_ServerMode] = None, + max_commit_time_ms: Optional[int] = None, + ) -> ContextManager[Any]: + """Start a multi-statement transaction. + + Takes the same arguments as :class:`TransactionOptions`. + + .. versionchanged:: 3.9 + Added the ``max_commit_time_ms`` option. + + .. versionadded:: 3.7 + """ + self._check_ended() + + if self.options.snapshot: + raise InvalidOperation("Transactions are not supported in snapshot sessions") + + if self.in_transaction: + raise InvalidOperation("Transaction already in progress") + + read_concern = self._inherit_option("read_concern", read_concern) + write_concern = self._inherit_option("write_concern", write_concern) + read_preference = self._inherit_option("read_preference", read_preference) + if max_commit_time_ms is None: + opts = self.options.default_transaction_options + if opts: + max_commit_time_ms = opts.max_commit_time_ms + + self._transaction.opts = TransactionOptions( + read_concern, write_concern, read_preference, max_commit_time_ms + ) + self._transaction.reset() + self._transaction.state = _TxnState.STARTING + self._start_retryable_write() + return _TransactionContext(self) + + def commit_transaction(self) -> None: + """Commit a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.COMMITTED_EMPTY + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call commitTransaction after calling abortTransaction") + elif state is _TxnState.COMMITTED: + # We're explicitly retrying the commit, move the state back to + # "in progress" so that in_transaction returns true. + self._transaction.state = _TxnState.IN_PROGRESS + + try: + self._finish_transaction_with_retry("commitTransaction") + except ConnectionFailure as exc: + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + exc._remove_error_label("TransientTransactionError") + _reraise_with_unknown_commit(exc) + except WTimeoutError as exc: + # We do not know if the commit has satisfied the provided write + # concern, add the unknown commit error label. + _reraise_with_unknown_commit(exc) + except OperationFailure as exc: + if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: + # The server reports errorLabels in the case. + raise + # We do not know if the commit was successfully applied on the + # server or if it satisfied the provided write concern, set the + # unknown commit error label. + _reraise_with_unknown_commit(exc) + finally: + self._transaction.state = _TxnState.COMMITTED + + def abort_transaction(self) -> None: + """Abort a multi-statement transaction. + + .. versionadded:: 3.7 + """ + self._check_ended() + + state = self._transaction.state + if state is _TxnState.NONE: + raise InvalidOperation("No transaction started") + elif state is _TxnState.STARTING: + # Server transaction was never started, no need to send a command. + self._transaction.state = _TxnState.ABORTED + return + elif state is _TxnState.ABORTED: + raise InvalidOperation("Cannot call abortTransaction twice") + elif state in (_TxnState.COMMITTED, _TxnState.COMMITTED_EMPTY): + raise InvalidOperation("Cannot call abortTransaction after calling commitTransaction") + + try: + self._finish_transaction_with_retry("abortTransaction") + except (OperationFailure, ConnectionFailure): + # The transactions spec says to ignore abortTransaction errors. + pass + finally: + self._transaction.state = _TxnState.ABORTED + self._unpin() + + def _finish_transaction_with_retry(self, command_name: str) -> dict[str, Any]: + """Run commit or abort with one retry after any retryable error. + + :param command_name: Either "commitTransaction" or "abortTransaction". + """ + + def func( + _session: Optional[ClientSession], conn: Connection, _retryable: bool + ) -> dict[str, Any]: + return self._finish_transaction(conn, command_name) + + return self._client._retry_internal( + func, self, None, retryable=True, operation=command_name + ) + + def _finish_transaction(self, conn: Connection, command_name: str) -> dict[str, Any]: + self._transaction.attempt += 1 + opts = self._transaction.opts + assert opts + wc = opts.write_concern + cmd = {command_name: 1} + if command_name == "commitTransaction": + if opts.max_commit_time_ms and _csot.get_timeout() is None: + cmd["maxTimeMS"] = opts.max_commit_time_ms + + # Transaction spec says that after the initial commit attempt, + # subsequent commitTransaction commands should be upgraded to use + # w:"majority" and set a default value of 10 seconds for wtimeout. + if self._transaction.attempt > 1: + assert wc + wc_doc = wc.document + wc_doc["w"] = "majority" + wc_doc.setdefault("wtimeout", 10000) + wc = WriteConcern(**wc_doc) + + if self._transaction.recovery_token: + cmd["recoveryToken"] = self._transaction.recovery_token + + return self._client.admin._command( + conn, cmd, session=self, write_concern=wc, parse_write_concern_error=True + ) + + def _advance_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + """Internal cluster time helper.""" + if self._cluster_time is None: + self._cluster_time = cluster_time + elif cluster_time is not None: + if cluster_time["clusterTime"] > self._cluster_time["clusterTime"]: + self._cluster_time = cluster_time + + def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: + """Update the cluster time for this session. + + :param cluster_time: The + :data:`~pymongo.client_session.ClientSession.cluster_time` from + another `ClientSession` instance. + """ + if not isinstance(cluster_time, _Mapping): + raise TypeError( + f"cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}" + ) + if not isinstance(cluster_time.get("clusterTime"), Timestamp): + raise ValueError("Invalid cluster_time") + self._advance_cluster_time(cluster_time) + + def _advance_operation_time(self, operation_time: Optional[Timestamp]) -> None: + """Internal operation time helper.""" + if self._operation_time is None: + self._operation_time = operation_time + elif operation_time is not None: + if operation_time > self._operation_time: + self._operation_time = operation_time + + def advance_operation_time(self, operation_time: Timestamp) -> None: + """Update the operation time for this session. + + :param operation_time: The + :data:`~pymongo.client_session.ClientSession.operation_time` from + another `ClientSession` instance. + """ + if not isinstance(operation_time, Timestamp): + raise TypeError( + f"operation_time must be an instance of bson.timestamp.Timestamp, not {type(operation_time)}" + ) + self._advance_operation_time(operation_time) + + def _process_response(self, reply: Mapping[str, Any]) -> None: + """Process a response to a command that was run with this session.""" + self._advance_cluster_time(reply.get("$clusterTime")) + self._advance_operation_time(reply.get("operationTime")) + if self._options.snapshot and self._snapshot_time is None: + if "cursor" in reply: + ct = reply["cursor"].get("atClusterTime") + else: + ct = reply.get("atClusterTime") + self._snapshot_time = ct + if self.in_transaction and self._transaction.sharded: + recovery_token = reply.get("recoveryToken") + if recovery_token: + self._transaction.recovery_token = recovery_token + + @property + def has_ended(self) -> bool: + """True if this session is finished.""" + return self._server_session is None + + @property + def in_transaction(self) -> bool: + """True if this session has an active multi-statement transaction. + + .. versionadded:: 3.10 + """ + return self._transaction.active() + + @property + def _starting_transaction(self) -> bool: + """True if this session is starting a multi-statement transaction.""" + return self._transaction.starting() + + @property + def _pinned_address(self) -> Optional[_Address]: + """The mongos address this transaction was created on.""" + if self._transaction.active(): + return self._transaction.pinned_address + return None + + @property + def _pinned_connection(self) -> Optional[Connection]: + """The connection this transaction was started on.""" + return self._transaction.pinned_conn + + def _pin(self, server: Server, conn: Connection) -> None: + """Pin this session to the given Server or to the given connection.""" + self._transaction.pin(server, conn) + + def _unpin(self) -> None: + """Unpin this session from any pinned Server.""" + self._transaction.unpin() + + def _txn_read_preference(self) -> Optional[_ServerMode]: + """Return read preference of this transaction or None.""" + if self.in_transaction: + assert self._transaction.opts + return self._transaction.opts.read_preference + return None + + def _materialize(self, logical_session_timeout_minutes: Optional[int] = None) -> None: + if isinstance(self._server_session, _EmptyServerSession): + old = self._server_session + self._server_session = self._client._topology.get_server_session( + logical_session_timeout_minutes + ) + if old.started_retryable_write: + self._server_session.inc_transaction_id() + + def _apply_to( + self, + command: MutableMapping[str, Any], + is_retryable: bool, + read_preference: _ServerMode, + conn: Connection, + ) -> None: + if not conn.supports_sessions: + if not self._implicit: + raise ConfigurationError("Sessions are not supported by this MongoDB deployment") + return + self._check_ended() + self._materialize(conn.logical_session_timeout_minutes) + if self.options.snapshot: + self._update_read_concern(command, conn) + + self._server_session.last_use = time.monotonic() + command["lsid"] = self._server_session.session_id + + if is_retryable: + command["txnNumber"] = self._server_session.transaction_id + return + + if self.in_transaction: + if read_preference != ReadPreference.PRIMARY: + raise InvalidOperation( + f"read preference in a transaction must be primary, not: {read_preference!r}" + ) + + if self._transaction.state == _TxnState.STARTING: + # First command begins a new transaction. + self._transaction.state = _TxnState.IN_PROGRESS + command["startTransaction"] = True + + assert self._transaction.opts + if self._transaction.opts.read_concern: + rc = self._transaction.opts.read_concern.document + if rc: + command["readConcern"] = rc + self._update_read_concern(command, conn) + + command["txnNumber"] = self._server_session.transaction_id + command["autocommit"] = False + + def _start_retryable_write(self) -> None: + self._check_ended() + self._server_session.inc_transaction_id() + + def _update_read_concern(self, cmd: MutableMapping[str, Any], conn: Connection) -> None: + if self.options.causal_consistency and self.operation_time is not None: + cmd.setdefault("readConcern", {})["afterClusterTime"] = self.operation_time + if self.options.snapshot: + if conn.max_wire_version < 13: + raise ConfigurationError("Snapshot reads require MongoDB 5.0 or later") + rc = cmd.setdefault("readConcern", {}) + rc["level"] = "snapshot" + if self._snapshot_time is not None: + rc["atClusterTime"] = self._snapshot_time + + def __copy__(self) -> NoReturn: + raise TypeError("A ClientSession cannot be copied, create a new session instead") + + +class _EmptyServerSession: + __slots__ = "dirty", "started_retryable_write" + + def __init__(self) -> None: + self.dirty = False + self.started_retryable_write = False + + def mark_dirty(self) -> None: + self.dirty = True + + def inc_transaction_id(self) -> None: + self.started_retryable_write = True + + +class _ServerSession: + def __init__(self, generation: int): + # Ensure id is type 4, regardless of CodecOptions.uuid_representation. + self.session_id = {"id": Binary(uuid.uuid4().bytes, 4)} + self.last_use = time.monotonic() + self._transaction_id = 0 + self.dirty = False + self.generation = generation + + def mark_dirty(self) -> None: + """Mark this session as dirty. + + A server session is marked dirty when a command fails with a network + error. Dirty sessions are later discarded from the server session pool. + """ + self.dirty = True + + def timed_out(self, session_timeout_minutes: Optional[int]) -> bool: + if session_timeout_minutes is None: + return False + + idle_seconds = time.monotonic() - self.last_use + + # Timed out if we have less than a minute to live. + return idle_seconds > (session_timeout_minutes - 1) * 60 + + @property + def transaction_id(self) -> Int64: + """Positive 64-bit integer.""" + return Int64(self._transaction_id) + + def inc_transaction_id(self) -> None: + self._transaction_id += 1 + + +class _ServerSessionPool(collections.deque): # type: ignore[type-arg] + """Pool of _ServerSession objects. + + This class is thread-safe. + """ + + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self.generation = 0 + + def reset(self) -> None: + self.generation += 1 + self.clear() + + def pop_all(self) -> list[_ServerSession]: + ids = [] + while True: + try: + ids.append(self.pop().session_id) + except IndexError: + break + return ids + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + # Although the Driver Sessions Spec says we only clear stale sessions + # in return_server_session, PyMongo can't take a lock when returning + # sessions from a __del__ method (like in Cursor.__die), so it can't + # clear stale sessions there. In case many sessions were returned via + # __del__, check for stale sessions here too. + self._clear_stale(session_timeout_minutes) + + # The most recently used sessions are on the left. + while True: + try: + s = self.popleft() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + return s + + return _ServerSession(self.generation) + + def return_server_session(self, server_session: _ServerSession) -> None: + # Discard sessions from an old pool to avoid duplicate sessions in the + # child process after a fork. + if server_session.generation == self.generation and not server_session.dirty: + self.appendleft(server_session) + + def _clear_stale(self, session_timeout_minutes: Optional[int]) -> None: + # Clear stale sessions. The least recently used are on the right. + while True: + try: + s = self.pop() + except IndexError: + break + if not s.timed_out(session_timeout_minutes): + self.append(s) + # The remaining sessions also haven't timed out. + break diff --git a/pymongo/synchronous/collection.py b/pymongo/synchronous/collection.py new file mode 100644 index 0000000000..4e5f7d08fb --- /dev/null +++ b/pymongo/synchronous/collection.py @@ -0,0 +1,3639 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection level utilities for Mongo.""" +from __future__ import annotations + +import warnings +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Generic, + Iterable, + Iterator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from bson.timestamp import Timestamp +from pymongo import ASCENDING, _csot, common, helpers_shared, message +from pymongo.collation import validate_collation_or_none +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.errors import ( + ConfigurationError, + InvalidName, + InvalidOperation, + OperationFailure, +) +from pymongo.helpers_shared import _check_write_command_response +from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + InsertOne, + ReplaceOne, + SearchIndexModel, + UpdateMany, + UpdateOne, + _IndexKeyHint, + _IndexList, + _Op, +) +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.synchronous.aggregation import ( + _CollectionAggregationCommand, + _CollectionRawAggregationCommand, +) +from pymongo.synchronous.bulk import _Bulk +from pymongo.synchronous.change_stream import CollectionChangeStream +from pymongo.synchronous.command_cursor import ( + CommandCursor, + RawBatchCommandCursor, +) +from pymongo.synchronous.cursor import ( + Cursor, + RawBatchCursor, +) +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern, validate_boolean + +_IS_SYNC = True + +T = TypeVar("T") + +_FIND_AND_MODIFY_DOC_FIELDS = {"value": 1} + + +_WriteOp = Union[ + InsertOne[_DocumentType], + DeleteOne, + DeleteMany, + ReplaceOne[_DocumentType], + UpdateOne, + UpdateMany, +] + + +class ReturnDocument: + """An enum used with + :meth:`~pymongo.collection.Collection.find_one_and_replace` and + :meth:`~pymongo.collection.Collection.find_one_and_update`. + """ + + BEFORE = False + """Return the original document before it was updated/replaced, or + ``None`` if no document matches the query. + """ + AFTER = True + """Return the updated/replaced or inserted document.""" + + +if TYPE_CHECKING: + import bson + from pymongo.collation import Collation + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.aggregation import _AggregationCommand + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.database import Database + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + + +class Collection(common.BaseObject, Generic[_DocumentType]): + """A Mongo collection.""" + + def __init__( + self, + database: Database[_DocumentType], + name: str, + create: Optional[bool] = False, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> None: + """Get / create a Mongo collection. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if `name` is + not a valid collection name. Any additional keyword arguments will be used + as options passed to the create command. See + :meth:`~pymongo.database.Database.create_collection` for valid + options. + + If `create` is ``True``, `collation` is specified, or any additional + keyword arguments are present, a ``create`` command will be + sent, using ``session`` if specified. Otherwise, a ``create`` command + will not be sent and the collection will be created implicitly on first + use. The optional ``session`` argument is *only* used for the ``create`` + command, it is not associated with the collection afterward. + + :param database: the database to get a collection from + :param name: the name of the collection to get + :param create: If ``True``, force collection + creation even without options being set. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) database.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) database.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) database.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) database.read_concern is used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. If a collation is provided, + it will be passed to the create collection command. + :param session: A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command. + :param kwargs: Additional keyword arguments will + be passed as options for the create collection command. + + .. versionchanged:: 4.2 + Added the ``clusteredIndex`` and ``encryptedFields`` parameters. + + .. versionchanged:: 4.0 + Removed the reindex, map_reduce, inline_map_reduce, + parallel_scan, initialize_unordered_bulk_op, + initialize_ordered_bulk_op, group, count, insert, save, + update, remove, find_and_modify, and ensure_index methods. See the + :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + Removed the uuid_subtype attribute. + :class:`~pymongo.collection.Collection` no longer returns an + instance of :class:`~pymongo.collection.Collection` for attribute + names with leading underscores. You must use dict-style lookups + instead:: + + collection['__my_collection__'] + + Not: + + collection.__my_collection__ + + .. seealso:: The MongoDB documentation on `collections `_. + """ + super().__init__( + codec_options or database.codec_options, + read_preference or database.read_preference, + write_concern or database.write_concern, + read_concern or database.read_concern, + ) + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + from pymongo.synchronous.database import Database + + if not isinstance(database, Database): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") + + if not name or ".." in name: + raise InvalidName("collection names cannot be empty") + if "$" in name and not (name.startswith(("oplog.$main", "$cmd"))): + raise InvalidName("collection names must not contain '$': %r" % name) + if name[0] == "." or name[-1] == ".": + raise InvalidName("collection names must not start or end with '.': %r" % name) + if "\x00" in name: + raise InvalidName("collection names must not contain the null character") + + self._database: Database[_DocumentType] = database + self._name = name + self._full_name = f"{self._database.name}.{self._name}" + self._write_response_codec_options = self.codec_options._replace( + unicode_decode_error_handler="replace", document_class=dict + ) + self._timeout = database.client.options.timeout + + if create or kwargs: + if _IS_SYNC: + warnings.warn( + "The `create` and `kwargs` arguments to Collection are deprecated and will be removed in PyMongo 5.0", + DeprecationWarning, + stacklevel=2, + ) + self._create(kwargs, session) # type: ignore[unused-coroutine] + else: + raise ValueError("Collection does not support the `create` or `kwargs` arguments.") + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a sub-collection of this collection by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + full_name = f"{self._name}.{name}" + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {full_name}" + f" collection, use database['{full_name}']." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + return Collection( + self._database, + f"{self._name}.{name}", + False, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._database!r}, {self._name!r})" + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Collection): + return self._database == other.database and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._database, self._name)) + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: collection is not None" + ) + + @property + def full_name(self) -> str: + """The full name of this :class:`Collection`. + + The full name is of the form `database_name.collection_name`. + """ + return self._full_name + + @property + def name(self) -> str: + """The name of this :class:`Collection`.""" + return self._name + + @property + def database(self) -> Database[_DocumentType]: + """The :class:`~pymongo.database.Database` that this + :class:`Collection` is a part of. + """ + return self._database + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Collection[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[bson.CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType] | Collection[_DocumentTypeArg]: + """Get a clone of this collection changing the specified settings. + + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY) + >>> coll1.read_preference + Primary() + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + """ + return Collection( + self._database, + self._name, + False, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def _write_concern_for_cmd( + self, cmd: Mapping[str, Any], session: Optional[ClientSession] + ) -> WriteConcern: + raw_wc = cmd.get("writeConcern") + if raw_wc is not None: + return WriteConcern(**raw_wc) + else: + return self._write_concern_for(session) + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Collection' object is not iterable") + + next = __next__ + + def __call__(self, *args: Any, **kwargs: Any) -> NoReturn: + """This is only here so that some API misusages are easier to debug.""" + if "." not in self._name: + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you " + "meant to call the '%s' method on a 'Database' " + "object it is failing because no such method " + "exists." % self._name + ) + raise TypeError( + f"'{type(self).__name__}' object is not callable. If you meant to " + f"call the '%s' method on a '{type(self).__name__}' object it is " + "failing because no such method exists." % self._name.split(".")[-1] + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> CollectionChangeStream[_DocumentType]: + """Watch changes on this collection. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.CollectionChangeStream` cursor which + iterates over changes on this collection. + + .. code-block:: python + + with db.collection.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.CollectionChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.CollectionChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.coll.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + .. note:: Using this helper method is preferred to directly calling + :meth:`~pymongo.collection.Collection.aggregate` with a + ``$changeStream`` stage, for the purpose of supporting + resumability. + + .. warning:: This Collection's :attr:`read_concern` must be + ``ReadConcern("majority")`` in order to use the ``$changeStream`` + stage. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.CollectionChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionchanged:: 3.7 + Added the ``start_at_operation_time`` parameter. + + .. versionadded:: 3.6 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = CollectionChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + return self._database.client._conn_for_writes(session, operation) + + def _command( + self, + conn: Connection, + command: MutableMapping[str, Any], + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[Mapping[str, Any]]] = None, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + user_fields: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal command helper. + + :param conn` - A Connection instance. + :param command` - The command itself, as a :class:`~bson.son.SON` instance. + :param read_preference` (optional) - The read preference to use. + :param codec_options` (optional) - An instance of + :class:`~bson.codec_options.CodecOptions`. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern` (optional) - An instance of + :class:`~pymongo.read_concern.ReadConcern`. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. + :param collation` (optional) - An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param retryable_write: True if this command is a retryable + write. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + + :return: The result document. + """ + with self._database.client._tmp_session(session) as s: + return conn.command( + self._database.name, + command, + read_preference or self._read_preference_for(session), + codec_options or self.codec_options, + check, + allowable_errors, + read_concern=read_concern, + write_concern=write_concern, + parse_write_concern_error=True, + collation=collation, + session=s, + client=self._database.client, + retryable_write=retryable_write, + user_fields=user_fields, + ) + + def _create_helper( + self, + name: str, + options: MutableMapping[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + encrypted_fields: Optional[Mapping[str, Any]] = None, + qev2_required: bool = False, + ) -> None: + """Sends a create command with the given options.""" + cmd: dict[str, Any] = {"create": name} + if encrypted_fields: + cmd["encryptedFields"] = encrypted_fields + + if options: + if "size" in options: + options["size"] = float(options["size"]) + cmd.update(options) + with self._conn_for_writes(session, operation=_Op.CREATE) as conn: + if qev2_required and conn.max_wire_version < 21: + raise ConfigurationError( + "Driver support of Queryable Encryption is incompatible with server. " + "Upgrade server to use Queryable Encryption. " + f"Got maxWireVersion {conn.max_wire_version} but need maxWireVersion >= 21 (MongoDB >=7.0)" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + collation=collation, + session=session, + ) + + def _create( + self, + options: MutableMapping[str, Any], + session: Optional[ClientSession], + ) -> None: + collation = validate_collation_or_none(options.pop("collation", None)) + encrypted_fields = options.pop("encryptedFields", None) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + opts = {"clusteredIndex": {"key": {"_id": 1}, "unique": True}} + self._create_helper( + _esc_coll_name(encrypted_fields, self._name), + opts, + None, + session, + qev2_required=True, + ) + self._create_helper(_ecoc_coll_name(encrypted_fields, self._name), opts, None, session) + self._create_helper( + self._name, options, collation, session, encrypted_fields=encrypted_fields + ) + self.create_index([("__safeContent__", ASCENDING)], session) + else: + self._create_helper(self._name, options, collation, session) + + @_csot.apply + def bulk_write( + self, + requests: Sequence[_WriteOp[_DocumentType]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + ) -> BulkWriteResult: + """Send a batch of write operations to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + ... + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}), + ... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)] + >>> result = db.test.bulk_write(requests) + >>> result.inserted_count + 1 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_ids + {2: ObjectId('54f62ee28891e756a6e1abd5')} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + + :param requests: A list of write operations (see examples above). + :param ordered: If ``True`` (the default) requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False`` requests + will be performed on the server in arbitrary order, possibly in + parallel, and all operations will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + + :return: An instance of :class:`~pymongo.results.BulkWriteResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_list("requests", requests) + + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment, let=let) + for request in requests: + try: + request._add_to_bulk(blk) + except AttributeError: + raise TypeError(f"{request!r} is not a valid request") from None + + write_concern = self._write_concern_for(session) + bulk_api_result = blk.execute(write_concern, session, _Op.INSERT) + if bulk_api_result is not None: + return BulkWriteResult(bulk_api_result, True) + return BulkWriteResult({}, False) + + def _insert_one( + self, + doc: Mapping[str, Any], + ordered: bool, + write_concern: WriteConcern, + op_id: Optional[int], + bypass_doc_val: Optional[bool], + session: Optional[ClientSession], + comment: Optional[Any] = None, + ) -> Any: + """Internal helper for inserting a single document.""" + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + command = {"insert": self.name, "ordered": ordered, "documents": [doc]} + if comment is not None: + command["comment"] = comment + + def _insert_command( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> None: + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + + _check_write_command_response(result) + + self._database.client._retryable_write( + acknowledged, _insert_command, session, operation=_Op.INSERT + ) + + if not isinstance(doc, RawBSONDocument): + return doc.get("_id") + return None + + def insert_one( + self, + document: Union[_DocumentType, RawBSONDocument], + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertOneResult: + """Insert a single document. + + >>> db.test.count_documents({'x': 1}) + 0 + >>> result = db.test.insert_one({'x': 1}) + >>> result.inserted_id + ObjectId('54f112defba522406c9cc208') + >>> db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f112defba522406c9cc208')} + + :param document: The document to insert. Must be a mutable mapping + type. If the document does not have an _id field one will be + added automatically. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.InsertOneResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + common.validate_is_document_type("document", document) + if not (isinstance(document, RawBSONDocument) or "_id" in document): + document["_id"] = ObjectId() # type: ignore[index] + + write_concern = self._write_concern_for(session) + return InsertOneResult( + self._insert_one( + document, + ordered=True, + write_concern=write_concern, + op_id=None, + bypass_doc_val=bypass_document_validation, + session=session, + comment=comment, + ), + write_concern.acknowledged, + ) + + @_csot.apply + def insert_many( + self, + documents: Iterable[Union[_DocumentType, RawBSONDocument]], + ordered: bool = True, + bypass_document_validation: Optional[bool] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> InsertManyResult: + """Insert an iterable of documents. + + >>> db.test.count_documents({}) + 0 + >>> result = db.test.insert_many([{'x': i} for i in range(2)]) + >>> result.inserted_ids + [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] + >>> db.test.count_documents({}) + 2 + + :param documents: A iterable of documents to insert. + :param ordered: If ``True`` (the default) documents will be + inserted on the server serially, in the order provided. If an error + occurs all remaining inserts are aborted. If ``False``, documents + will be inserted on the server in arbitrary order, possibly in + parallel, and all document inserts will be attempted. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.results.InsertManyResult`. + + .. seealso:: `Writes and ids `_ + + .. note:: `bypass_document_validation` requires server version + **>= 3.2** + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.2 + Added bypass_document_validation support + + .. versionadded:: 3.0 + """ + if ( + not isinstance(documents, abc.Iterable) + or isinstance(documents, abc.Mapping) + or not documents + ): + raise TypeError("documents must be a non-empty list") + inserted_ids: list[ObjectId] = [] + + def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: + """A generator that validates documents and handles _ids.""" + for document in documents: + common.validate_is_document_type("document", document) + if not isinstance(document, RawBSONDocument): + if "_id" not in document: + document["_id"] = ObjectId() # type: ignore[index] + inserted_ids.append(document["_id"]) + yield (message._INSERT, document) + + write_concern = self._write_concern_for(session) + blk = _Bulk(self, ordered, bypass_document_validation, comment=comment) + blk.ops = list(gen()) + blk.execute(write_concern, session, _Op.INSERT) + return InsertManyResult(inserted_ids, write_concern.acknowledged) + + def _update( + self, + conn: Connection, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + validate_boolean("upsert", upsert) + collation = validate_collation_or_none(collation) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + update_doc: dict[str, Any] = { + "q": criteria, + "u": document, + "multi": multi, + "upsert": upsert, + } + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + update_doc["collation"] = collation + if array_filters is not None: + if not acknowledged: + raise ConfigurationError("arrayFilters is unsupported for unacknowledged writes.") + else: + update_doc["arrayFilters"] = array_filters + if hint is not None: + if not acknowledged and conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on unacknowledged update commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + update_doc["hint"] = hint + if sort is not None: + if not acknowledged and conn.max_wire_version < 25: + raise ConfigurationError( + "Must be connected to MongoDB 8.0+ to use sort on unacknowledged update commands." + ) + common.validate_is_mapping("sort", sort) + update_doc["sort"] = sort + + command = {"update": self.name, "ordered": ordered, "updates": [update_doc]} + if let is not None: + common.validate_is_mapping("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + # Update command. + if bypass_doc_val is not None: + command["bypassDocumentValidation"] = bypass_doc_val + + # The command result has to be published for APM unmodified + # so we make a shallow copy here before adding updatedExisting. + result = ( + conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + ).copy() + _check_write_command_response(result) + # Add the updatedExisting field for compatibility. + if result.get("n") and "upserted" not in result: + result["updatedExisting"] = True + else: + result["updatedExisting"] = False + # MongoDB >= 2.6.0 returns the upsert _id in an array + # element. Break it out for backward compatibility. + if "upserted" in result: + result["upserted"] = result["upserted"][0]["_id"] + + if not acknowledged: + return None + return result + + def _update_retryable( + self, + criteria: Mapping[str, Any], + document: Union[Mapping[str, Any], _Pipeline], + operation: str, + upsert: bool = False, + multi: bool = False, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + bypass_doc_val: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Optional[Mapping[str, Any]]: + """Internal update / replace helper.""" + + def _update( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Optional[Mapping[str, Any]]: + return self._update( + conn, + criteria, + document, + upsert=upsert, + multi=multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + bypass_doc_val=bypass_doc_val, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + sort=sort, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _update, + session, + operation, + ) + + def replace_one( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Replace a single document matching the filter. + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + >>> result = db.test.replace_one({'x': 1}, {'y': 1}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')} + + The *upsert* option can be used to insert a new document if a matching + document does not exist. + + >>> result = db.test.replace_one({'x': 1}, {'x': 1}, True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('54f11e5c8891e756a6e1abd4') + >>> db.test.find_one({'x': 1}) + {'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')} + + :param filter: A query that matches the document to replace. + :param replacement: The new document. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_replace(replacement) + if let is not None: + common.validate_is_mapping("let", let) + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + replacement, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_one( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + sort: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update a single document matching the filter. + + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 1 + >>> result.modified_count + 1 + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + If ``upsert=True`` and no documents match the filter, create a + new document based on the filter criteria and update modifications. + + >>> result = db.test.update_one({'x': -10}, {'$inc': {'x': 3}}, upsert=True) + >>> result.matched_count + 0 + >>> result.modified_count + 0 + >>> result.upserted_id + ObjectId('626a678eeaa80587d4bb3fb7') + >>> db.test.find_one(result.upserted_id) + {'_id': ObjectId('626a678eeaa80587d4bb3fb7'), 'x': -7} + + :param filter: A query that matches the document to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param sort: Specify which document the operation updates if the query matches + multiple documents. The first document matched by the sort order will be updated. + This option is only supported on MongoDB 8.0 and above. + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.11 + Added ``sort`` parameter. + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Added ``bypass_document_validation`` support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + sort=sort, + comment=comment, + ), + write_concern.acknowledged, + ) + + def update_many( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + upsert: bool = False, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + bypass_document_validation: Optional[bool] = None, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> UpdateResult: + """Update one or more documents that match the filter. + + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}}) + >>> result.matched_count + 3 + >>> result.modified_count + 3 + >>> for doc in db.test.find(): + ... print(doc) + ... + {'x': 4, '_id': 0} + {'x': 4, '_id': 1} + {'x': 4, '_id': 2} + + :param filter: A query that matches the documents to update. + :param update: The modifications to apply. + :param upsert: If ``True``, perform an insert if no documents + match the filter. + :param bypass_document_validation: If ``True``, allows the + write to opt-out of document level validation. Default is + ``False``. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.2 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.UpdateResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the `update`. + .. versionchanged:: 3.6 + Added ``array_filters`` and ``session`` parameters. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionchanged:: 3.2 + Added bypass_document_validation support. + + .. versionadded:: 3.0 + """ + common.validate_is_mapping("filter", filter) + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + + write_concern = self._write_concern_for(session) + return UpdateResult( + self._update_retryable( + filter, + update, + _Op.UPDATE, + upsert, + multi=True, + write_concern=write_concern, + bypass_doc_val=bypass_document_validation, + collation=collation, + array_filters=array_filters, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def drop( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> None: + """Alias for :meth:`~pymongo.database.Database.drop_collection`. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. + + The following two calls are equivalent: + + >>> db.foo.drop() + >>> db.drop_collection("foo") + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.7 + :meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + dbo.drop_collection( + self._name, session=session, comment=comment, encrypted_fields=encrypted_fields + ) + + def _delete( + self, + conn: Connection, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + retryable_write: bool = False, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + common.validate_is_mapping("filter", criteria) + write_concern = write_concern or self.write_concern + acknowledged = write_concern.acknowledged + delete_doc = {"q": criteria, "limit": int(not multi)} + collation = validate_collation_or_none(collation) + if collation is not None: + if not acknowledged: + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + else: + delete_doc["collation"] = collation + if hint is not None: + if not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged delete commands." + ) + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + delete_doc["hint"] = hint + command = {"delete": self.name, "ordered": ordered, "deletes": [delete_doc]} + + if let is not None: + common.validate_is_document_type("let", let) + command["let"] = let + + if comment is not None: + command["comment"] = comment + + # Delete command. + result = conn.command( + self._database.name, + command, + write_concern=write_concern, + codec_options=self._write_response_codec_options, + session=session, + client=self._database.client, + retryable_write=retryable_write, + ) + _check_write_command_response(result) + return result + + def _delete_retryable( + self, + criteria: Mapping[str, Any], + multi: bool, + write_concern: Optional[WriteConcern] = None, + op_id: Optional[int] = None, + ordered: bool = True, + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> Mapping[str, Any]: + """Internal delete helper.""" + + def _delete( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Mapping[str, Any]: + return self._delete( + conn, + criteria, + multi, + write_concern=write_concern, + op_id=op_id, + ordered=ordered, + collation=collation, + hint=hint, + session=session, + retryable_write=retryable_write, + let=let, + comment=comment, + ) + + return self._database.client._retryable_write( + (write_concern or self.write_concern).acknowledged and not multi, + _delete, + session, + operation=_Op.DELETE, + ) + + def delete_one( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete a single document matching the filter. + + >>> db.test.count_documents({'x': 1}) + 3 + >>> result = db.test.delete_one({'x': 1}) + >>> result.deleted_count + 1 + >>> db.test.count_documents({'x': 1}) + 2 + + :param filter: A query that matches the document to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + False, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def delete_many( + self, + filter: Mapping[str, Any], + collation: Optional[_CollationIn] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + ) -> DeleteResult: + """Delete one or more documents matching the filter. + + >>> db.test.count_documents({'x': 1}) + 3 + >>> result = db.test.delete_many({'x': 1}) + >>> result.deleted_count + 3 + >>> db.test.count_documents({'x': 1}) + 0 + + :param filter: A query that matches the documents to delete. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + + :return: - An instance of :class:`~pymongo.results.DeleteResult`. + + .. versionchanged:: 4.1 + Added ``let`` parameter. + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + write_concern = self._write_concern_for(session) + return DeleteResult( + self._delete_retryable( + filter, + True, + write_concern=write_concern, + collation=collation, + hint=hint, + session=session, + let=let, + comment=comment, + ), + write_concern.acknowledged, + ) + + def find_one( + self, filter: Optional[Any] = None, *args: Any, **kwargs: Any + ) -> Optional[_DocumentType]: + """Get a single document from the database. + + All arguments to :meth:`find` are also valid arguments for + :meth:`find_one`, although any `limit` argument will be + ignored. Returns a single document, or ``None`` if no matching + document is found. + + The :meth:`find_one` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: a dictionary specifying + the query to be performed OR any other type to be used as + the value for a query for ``"_id"``. + + :param args: any additional positional arguments + are the same as the arguments to :meth:`find`. + + :param kwargs: any additional keyword arguments + are the same as the arguments to :meth:`find`. + + :: code-block: python + + >>> collection.find_one(max_time_ms=100) + + """ + if filter is not None and not isinstance(filter, abc.Mapping): + filter = {"_id": filter} + cursor = self.find(filter, *args, **kwargs) + for result in cursor.limit(-1): + return result + return None + + def find(self, *args: Any, **kwargs: Any) -> Cursor[_DocumentType]: + """Query the database. + + The `filter` argument is a query document that all results + must match. For example: + + >>> db.test.find({"hello": "world"}) + + only matches documents that have a key "hello" with value + "world". Matches can have other keys *in addition* to + "hello". The `projection` argument is used to specify a subset + of fields that should be included in the result documents. By + limiting results to a certain subset of fields you can cut + down on network traffic and decoding time. + + Raises :class:`TypeError` if any of the arguments are of + improper type. Returns an instance of + :class:`~pymongo.cursor.Cursor` corresponding to this query. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.find() as cursor: + for doc in cursor: + print(doc) + + The :meth:`find` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param filter: A query document that selects which documents + to include in the result set. Can be an empty document to include + all documents. + :param projection: a list of field names that should be + returned in the result set or a dict specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param skip: the number of documents to omit (from + the start of the result set) when returning the results + :param limit: the maximum number of results to + return. A limit of 0 (the default) is equivalent to setting no + limit. + :param no_cursor_timeout: if False (the default), any + returned cursor is closed by the server after 10 minutes of + inactivity. If set to True, the returned cursor will never + time out on the server. Care should be taken to ensure that + cursors with no_cursor_timeout turned on are properly closed. + :param cursor_type: the type of cursor to return. The valid + options are defined by :class:`~pymongo.cursor.CursorType`: + + - :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of + this find call will return a standard cursor over the result set. + - :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this + find call will be a tailable cursor - tailable cursors are only + for use with capped collections. They are not closed when the + last data is retrieved but are kept open and the cursor location + marks the final document position. If more data is received + iteration of the cursor will continue from the last document + received. For details, see the `tailable cursor documentation + `_. + - :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result + of this find call will be a tailable cursor with the await flag + set. The server will wait for a few seconds after returning the + full result set so that it can capture and return additional data + added during the query. + - :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this + find call will be an exhaust cursor. MongoDB will stream batched + results to the client without waiting for the client to request + each batch, reducing latency. See notes on compatibility below. + + :param sort: a list of (key, direction) pairs + specifying the sort order for this query. See + :meth:`~pymongo.cursor.Cursor.sort` for details. + :param allow_partial_results: if True, mongos will return + partial results if some shards are down instead of returning an + error. + :param oplog_replay: **DEPRECATED** - if True, set the + oplogReplay query flag. Default: False. + :param batch_size: Limits the number of documents returned in + a single batch. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param return_key: If True, return only the index keys in + each document. + :param show_record_id: If True, adds a field ``$recordId`` in + each document with the storage engine's internal record identifier. + :param snapshot: **DEPRECATED** - If True, prevents the + cursor from returning a document more than once because of an + intervening write operation. + :param hint: An index, in the same format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the + proper index to use for the query. + :param max_time_ms: Specifies a time limit for a query + operation. If the specified time is exceeded, the operation will be + aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass + this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor. + :param max_scan: **DEPRECATED** - The maximum number of + documents to scan. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max_scan` on the cursor. + :param min: A list of field, limit pairs specifying the + inclusive lower bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param max: A list of field, limit pairs specifying the + exclusive upper bound for all keys of a specific index in order. + Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must + also be passed to ensure the query utilizes the correct index. + :param comment: A string to attach to the query to help + interpret and trace the operation in the server logs and in profile + data. Pass this as an alternative to calling + :meth:`~pymongo.cursor.Cursor.comment` on the cursor. + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. The option has no effect if + MongoDB can satisfy the specified sort using an index, or if the + blocking sort requires less memory than the 100 MiB limit. This + option is only supported on MongoDB 4.4 and above. + + .. note:: There are a number of caveats to using + :attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type: + + - The `limit` option can not be used with an exhaust cursor. + + - Exhaust cursors are not supported by mongos and can not be + used with a sharded cluster. + + - A :class:`~pymongo.cursor.Cursor` instance created with the + :attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an + exclusive :class:`~socket.socket` connection to MongoDB. If the + :class:`~pymongo.cursor.Cursor` is discarded without being + completely iterated the underlying :class:`~socket.socket` + connection will be closed and discarded without being returned to + the connection pool. + + .. versionchanged:: 4.0 + Removed the ``modifiers`` option. + Empty projections (eg {} or []) are passed to the server as-is, + rather than the previous behavior which substituted in a + projection of ``{"_id": 1}``. This means that an empty projection + will now return the entire document, not just the ``"_id"`` field. + + .. versionchanged:: 3.11 + Added the ``allow_disk_use`` option. + Deprecated the ``oplog_replay`` option. Support for this option is + deprecated in MongoDB 4.4. The query engine now automatically + optimizes queries against the oplog without requiring this + option to be set. + + .. versionchanged:: 3.7 + Deprecated the ``snapshot`` option, which is deprecated in MongoDB + 3.6 and removed in MongoDB 4.0. + Deprecated the ``max_scan`` option. Support for this option is + deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit + server-side execution time. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.5 + Added the options ``return_key``, ``show_record_id``, ``snapshot``, + ``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and + ``comment``. + Deprecated the ``modifiers`` option. + + .. versionchanged:: 3.4 + Added support for the ``collation`` option. + + .. versionchanged:: 3.0 + Changed the parameter names ``spec``, ``fields``, ``timeout``, and + ``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``, + and ``allow_partial_results`` respectively. + Added the ``cursor_type``, ``oplog_replay``, and ``modifiers`` + options. + Removed the ``network_timeout``, ``read_preference``, ``tag_sets``, + ``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``, + ``tailable``, ``await_data``, ``exhaust``, ``as_class``, and + slave_okay parameters. + Removed ``compile_re`` option: PyMongo now always + represents BSON regular expressions as :class:`~bson.regex.Regex` + objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to + convert from a BSON regular expression to a Python regular + expression object. + Soft deprecated the ``manipulate`` option. + + .. seealso:: The MongoDB documentation on `find `_. + """ + return Cursor(self, *args, **kwargs) + + def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: + """Query the database and retrieve batches of raw BSON. + + Similar to the :meth:`find` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.find_raw_batches() + >>> for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: find_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Instead of ignoring the user-specified read concern, this method + now sends it to the server when connected to MongoDB 3.6+. + + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("find_raw_batches does not support auto encryption") + return RawBatchCursor(self, *args, **kwargs) + + def _count_cmd( + self, + session: Optional[ClientSession], + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[Collation], + ) -> int: + """Internal count command helper.""" + res = self._command( + conn, + cmd, + read_preference=read_preference, + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + return int(res["n"]) + + def _aggregate_one_result( + self, + conn: Connection, + read_preference: Optional[_ServerMode], + cmd: dict[str, Any], + collation: Optional[_CollationIn], + session: Optional[ClientSession], + ) -> Optional[Mapping[str, Any]]: + """Internal helper to run an aggregate that returns a single result.""" + result = self._command( + conn, + cmd, + read_preference, + allowable_errors=[26], # Ignore NamespaceNotFound. + codec_options=self._write_response_codec_options, + read_concern=self.read_concern, + collation=collation, + session=session, + ) + # cursor will not be present for NamespaceNotFound errors. + if "cursor" not in result: + return None + batch = result["cursor"]["firstBatch"] + return batch[0] if batch else None + + def estimated_document_count(self, comment: Optional[Any] = None, **kwargs: Any) -> int: + """Get an estimate of the number of documents in this collection using + collection metadata. + + The :meth:`estimated_document_count` method is **not** supported in a + transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + .. versionchanged:: 4.2 + This method now always uses the `count`_ command. Due to an oversight in versions + 5.0.0-5.0.8 of MongoDB, the count command was not included in V1 of the + `versioned API `_. Users of the Stable API with estimated_document_count are + recommended to upgrade their server version to 5.0.9+ or set + :attr:`pymongo.server_api.ServerApi.strict` to ``False`` to avoid encountering errors. + + .. versionadded:: 3.7 + .. _count: https://mongodb.com/docs/manual/reference/command/count/ + """ + if "session" in kwargs: + raise ConfigurationError("estimated_document_count does not support sessions") + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"count": self._name} + cmd.update(kwargs) + return self._count_cmd(session, conn, read_preference, cmd, collation=None) + + return self._retryable_non_cursor_read(_cmd, None, operation=_Op.COUNT) + + def count_documents( + self, + filter: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> int: + """Count the number of documents in this collection. + + .. note:: For a fast count of the total documents in a collection see + :meth:`estimated_document_count`. + + The :meth:`count_documents` method is supported in a transaction. + + All optional parameters should be passed as keyword arguments + to this method. Valid options include: + + - `skip` (int): The number of matching documents to skip before + returning results. + - `limit` (int): The maximum number of documents to count. Must be + a positive integer. If not provided, no limit is imposed. + - `maxTimeMS` (int): The maximum amount of time to allow this + operation to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `hint` (string or list of tuples): The index to use. Specify either + the index name as a string or the index specification as a list of + tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). + + The :meth:`count_documents` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + .. note:: When migrating from :meth:`count` to :meth:`count_documents` + the following query operators must be replaced: + + +-------------+-------------------------------------+ + | Operator | Replacement | + +=============+=====================================+ + | $where | `$expr`_ | + +-------------+-------------------------------------+ + | $near | `$geoWithin`_ with `$center`_ | + +-------------+-------------------------------------+ + | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | + +-------------+-------------------------------------+ + + :param filter: A query document that selects which documents + to count in the collection. Can be an empty document to count all + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: See list of options above. + + + .. versionadded:: 3.7 + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$geoWithin: https://mongodb.com/docs/manual/reference/operator/query/geoWithin/ + .. _$center: https://mongodb.com/docs/manual/reference/operator/query/center/ + .. _$centerSphere: https://mongodb.com/docs/manual/reference/operator/query/centerSphere/ + """ + pipeline = [{"$match": filter}] + if "skip" in kwargs: + pipeline.append({"$skip": kwargs.pop("skip")}) + if "limit" in kwargs: + pipeline.append({"$limit": kwargs.pop("limit")}) + if comment is not None: + kwargs["comment"] = comment + pipeline.append({"$group": {"_id": 1, "n": {"$sum": 1}}}) + if "hint" in kwargs and not isinstance(kwargs["hint"], str): + kwargs["hint"] = helpers_shared._index_document(kwargs["hint"]) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> int: + cmd: dict[str, Any] = {"aggregate": self._name, "pipeline": pipeline, "cursor": {}} + cmd.update(kwargs) + result = self._aggregate_one_result(conn, read_preference, cmd, collation, session) + if not result: + return 0 + return result["n"] + + return self._retryable_non_cursor_read(_cmd, session, _Op.COUNT) + + def _retryable_non_cursor_read( + self, + func: Callable[ + [Optional[ClientSession], Server, Connection, Optional[_ServerMode]], + T, + ], + session: Optional[ClientSession], + operation: str, + ) -> T: + """Non-cursor read helper to handle implicit session creation.""" + client = self._database.client + with client._tmp_session(session) as s: + return client._retryable_read(func, self._read_preference_for(s), s, operation) + + def create_indexes( + self, + indexes: Sequence[IndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create one or more indexes on this collection. + + >>> from pymongo import IndexModel, ASCENDING, DESCENDING + >>> index1 = IndexModel([("hello", DESCENDING), + ... ("world", ASCENDING)], name="hello_world") + >>> index2 = IndexModel([("goodbye", DESCENDING)]) + >>> db.test.create_indexes([index1, index2]) + ["hello_world", "goodbye_-1"] + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + .. versionadded:: 3.0 + + .. _createIndexes: https://mongodb.com/docs/manual/reference/command/createIndexes/ + """ + common.validate_list("indexes", indexes) + if comment is not None: + kwargs["comment"] = comment + return self._create_indexes(indexes, session, **kwargs) + + @_csot.apply + def _create_indexes( + self, indexes: Sequence[IndexModel], session: Optional[ClientSession], **kwargs: Any + ) -> list[str]: + """Internal createIndexes helper. + + :param indexes: A list of :class:`~pymongo.operations.IndexModel` + instances. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + """ + names = [] + with self._conn_for_writes(session, operation=_Op.CREATE_INDEXES) as conn: + supports_quorum = conn.max_wire_version >= 9 + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in indexes: + if not isinstance(index, IndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.IndexModel" + ) + document = index.document + names.append(document["name"]) + yield document + + cmd = {"createIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + if "commitQuorum" in kwargs and not supports_quorum: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use the " + "commitQuorum option for createIndexes" + ) + + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + write_concern=self._write_concern_for(session), + session=session, + ) + return names + + def create_index( + self, + keys: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> str: + """Creates an index on this collection. + + Takes either a single key or a list containing (key, direction) pairs + or keys. If no direction is given, :data:`~pymongo.ASCENDING` will + be assumed. + The key(s) must be an instance of :class:`str` and the direction(s) must + be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, + :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, + :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). + + To create a single key ascending index on the key ``'mike'`` we just + use a string argument:: + + >>> my_collection.create_index("mike") + + For a compound index on ``'mike'`` descending and ``'eliot'`` + ascending we need to use a list of tuples:: + + >>> my_collection.create_index([("mike", pymongo.DESCENDING), + ... "eliot"]) + + All optional index creation parameters should be passed as + keyword arguments to this method. For example:: + + >>> my_collection.create_index([("mike", pymongo.DESCENDING)], + ... background=True) + + Valid options include, but are not limited to: + + - `name`: custom name to use for this index - if none is + given, a name will be generated. + - `unique`: if ``True``, creates a uniqueness constraint on the + index. + - `background`: if ``True``, this index should be created in the + background. + - `sparse`: if ``True``, omit from the index any documents that lack + the indexed field. + - `bucketSize`: for use with geoHaystack indexes. + Number of documents to group together within a certain proximity + to a given longitude and latitude. + - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` + index. + - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` + index. + - `expireAfterSeconds`: Used to create an expiring (TTL) + collection. MongoDB will automatically delete documents from + this collection after seconds. The indexed field must + be a UTC datetime or the data will not expire. + - `partialFilterExpression`: A document that specifies a filter for + a partial index. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `wildcardProjection`: Allows users to include or exclude specific + field paths from a `wildcard index`_ using the {"$**" : 1} key + pattern. Requires MongoDB >= 4.2. + - `hidden`: if ``True``, this index will be hidden from the query + planner and will not be evaluated as part of query plan + selection. Requires MongoDB >= 4.4. + + See the MongoDB documentation for a full list of supported options by + server version. + + .. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The + option is silently ignored by the server and unique index builds + using the option will fail if a duplicate value is detected. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + :param keys: a single key or a list of (key, direction) + pairs specifying the index to create + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional index creation + options (see the above list) should be passed as keyword + arguments. + + .. versionchanged:: 4.4 + Allow passing a list containing (key, direction) pairs + or keys for the ``keys`` parameter. + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.11 + Added the ``hidden`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for passing maxTimeMS + in kwargs. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.2 + Added partialFilterExpression to support partial indexes. + .. versionchanged:: 3.0 + Renamed `key_or_list` to `keys`. Removed the `cache_for` option. + :meth:`create_index` no longer caches index names. Removed support + for the drop_dups and bucket_size aliases. + + .. seealso:: The MongoDB documentation on `indexes `_. + + .. _wildcard index: https://dochub.mongodb.org/core/index-wildcard/ + """ + cmd_options = {} + if "maxTimeMS" in kwargs: + cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS") + if comment is not None: + cmd_options["comment"] = comment + index = IndexModel(keys, **kwargs) + return (self._create_indexes([index], session, **cmd_options))[0] + + def drop_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops all indexes on this collection. + + Can be used on non-existent collections or collections with no indexes. + Raises OperationFailure on an error. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + """ + if comment is not None: + kwargs["comment"] = comment + self._drop_index("*", session=session, **kwargs) + + @_csot.apply + def drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Drops the specified index on this collection. + + Can be used on non-existent collections or collections with no + indexes. Raises OperationFailure on an error (e.g. trying to + drop an index that does not exist). `index_or_name` + can be either an index name (as returned by `create_index`), + or an index specifier (as passed to `create_index`). An index + specifier should be a list of (key, direction) pairs. Raises + TypeError if index is not an instance of (str, unicode, list). + + .. warning:: + + if a custom name was used on index creation (by + passing the `name` parameter to :meth:`create_index`) the index + **must** be dropped by name. + + :param index_or_name: index (or name of index) to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + + .. versionchanged:: 3.6 + Added ``session`` parameter. Added support for arbitrary keyword + arguments. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + self._drop_index(index_or_name, session, comment, **kwargs) + + @_csot.apply + def _drop_index( + self, + index_or_name: _IndexKeyHint, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + name = index_or_name + if isinstance(index_or_name, list): + name = helpers_shared._gen_index_name(index_or_name) + + if not isinstance(name, str): + raise TypeError(f"index_or_name must be an instance of str or list, not {type(name)}") + + cmd = {"dropIndexes": self._name, "index": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + session=session, + ) + + def list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the index documents for this collection. + + >>> for index in db.test.list_indexes(): + ... print(index) + ... + SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')]) + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_indexes() as cursor: + for index in cursor: + print(index) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionadded:: 3.0 + """ + return self._list_indexes(session, comment) + + def _list_indexes( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> CommandCursor[MutableMapping[str, Any]]: + codec_options: CodecOptions[Mapping[str, Any]] = CodecOptions(SON) + coll = cast( + Collection[MutableMapping[str, Any]], + self.with_options(codec_options=codec_options, read_preference=ReadPreference.PRIMARY), + ) + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + cmd = {"listIndexes": self._name, "cursor": {}} + if comment is not None: + cmd["comment"] = comment + + try: + cursor = ( + self._command(conn, cmd, read_preference, codec_options, session=session) + )["cursor"] + except OperationFailure as exc: + # Ignore NamespaceNotFound errors to match the behavior + # of reading from *.system.indexes. + if exc.code != 26: + raise + cursor = {"id": 0, "firstBatch": []} + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + with self._database.client._tmp_session(session) as s: + return self._database.client._retryable_read( + _cmd, read_pref, s, operation=_Op.LIST_INDEXES + ) + + def index_information( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get information on this collection's indexes. + + Returns a dictionary where the keys are index names (as + returned by create_index()) and the values are dictionaries + containing information about each index. The dictionary is + guaranteed to contain at least a single key, ``"key"`` which + is a list of (key, direction) pairs specifying the index (as + passed to create_index()). It will also contain any other + metadata about the indexes, except for the ``"ns"`` and + ``"name"`` keys, which are cleaned. Example output might look + like this: + + >>> db.test.create_index("x", unique=True) + 'x_1' + >>> db.test.index_information() + {'_id_': {'key': [('_id', 1)]}, + 'x_1': {'unique': True, 'key': [('x', 1)]}} + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + cursor = self._list_indexes(session=session, comment=comment) + info = {} + for index in cursor: + index["key"] = list(index["key"].items()) + index = dict(index) # noqa: PLW2901 + info[index.pop("name")] = index + return info + + def list_search_indexes( + self, + name: Optional[str] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[Mapping[str, Any]]: + """Return a cursor over search indexes for the current collection. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.list_search_indexes() as cursor: + for index in cursor: + print(index) + + :param name: If given, the name of the index to search + for. Only indexes with matching index names will be returned. + If not given, all search indexes for the current collection + will be returned. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if name is None: + pipeline: _Pipeline = [{"$listSearchIndexes": {}}] + else: + pipeline = [{"$listSearchIndexes": {"name": name}}] + + coll = self.with_options( + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + read_concern=DEFAULT_READ_CONCERN, + ) + cmd = _CollectionAggregationCommand( + coll, + CommandCursor, + pipeline, + kwargs, + comment=comment, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.LIST_SEARCH_INDEX, + ) + + def create_search_index( + self, + model: Union[Mapping[str, Any], SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Any = None, + **kwargs: Any, + ) -> str: + """Create a single search index for the current collection. + + :param model: The model for the new search index. + It can be given as a :class:`~pymongo.operations.SearchIndexModel` + instance or a dictionary with a model "definition" and optional + "name". + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: The name of the new search index. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + if not isinstance(model, SearchIndexModel): + model = SearchIndexModel(**model) + return (self._create_search_indexes([model], session, comment, **kwargs))[0] + + def create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Create multiple search indexes for the current collection. + + :param models: A list of :class:`~pymongo.operations.SearchIndexModel` instances. + :param session: a :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the createSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + :return: A list of the newly created search index names. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + return self._create_search_indexes(models, session, comment, **kwargs) + + def _create_search_indexes( + self, + models: list[SearchIndexModel], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + + def gen_indexes() -> Iterator[Mapping[str, Any]]: + for index in models: + if not isinstance(index, SearchIndexModel): + raise TypeError( + f"{index!r} is not an instance of pymongo.operations.SearchIndexModel" + ) + yield index.document + + cmd = {"createSearchIndexes": self.name, "indexes": list(gen_indexes())} + cmd.update(kwargs) + + with self._conn_for_writes(session, operation=_Op.CREATE_SEARCH_INDEXES) as conn: + resp = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + return [index["name"] for index in resp["indexesCreated"]] + + def drop_search_index( + self, + name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Delete a search index by index name. + + :param name: The name of the search index to be deleted. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the dropSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"dropSearchIndex": self._name, "name": name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.DROP_SEARCH_INDEXES) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def update_search_index( + self, + name: str, + definition: Mapping[str, Any], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> None: + """Update a search index by replacing the existing index definition with the provided definition. + + :param name: The name of the search index to be updated. + :param definition: The new search index definition. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: optional arguments to the updateSearchIndexes + command (like maxTimeMS) can be passed as keyword arguments. + + .. note:: requires a MongoDB server version 7.0+ Atlas cluster. + + .. versionadded:: 4.5 + """ + cmd = {"updateSearchIndex": self._name, "name": name, "definition": definition} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + with self._conn_for_writes(session, operation=_Op.UPDATE_SEARCH_INDEX) as conn: + self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + allowable_errors=["ns not found", 26], + codec_options=_UNICODE_REPLACE_CODEC_OPTIONS, + ) + + def options( + self, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + ) -> MutableMapping[str, Any]: + """Get the options set on this collection. + + Returns a dictionary of options and their values - see + :meth:`~pymongo.database.Database.create_collection` for more + information on the possible options. Returns an empty + dictionary if the collection has not been created yet. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + dbo = self._database.client.get_database( + self._database.name, + self.codec_options, + self.read_preference, + self.write_concern, + self.read_concern, + ) + cursor = dbo.list_collections(session=session, filter={"name": self._name}, comment=comment) + + result = None + for doc in cursor: + result = doc + break + + if not result: + return {} + + options = result.get("options", {}) + assert options is not None + if "create" in options: + del options["create"] + + return options + + @_csot.apply + def _aggregate( + self, + aggregation_command: Type[_AggregationCommand], + pipeline: _Pipeline, + cursor_class: Type[CommandCursor], # type: ignore[type-arg] + session: Optional[ClientSession], + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + if comment is not None: + kwargs["comment"] = comment + cmd = aggregation_command( + self, + cursor_class, + pipeline, + kwargs, + let, + user_fields={"cursor": {"firstBatch": 1}}, + ) + + return self._database.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(session), # type: ignore[arg-type] + session, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + def aggregate( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Perform an aggregation using the aggregation framework on this + collection. + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Collection`, except when ``$out`` or ``$merge`` are used on + MongoDB <5.0, in which case + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` is used. + + .. note:: This method does not support the 'explain' option. Please + use `PyMongoExplain `_ + instead. An example is included in the `aggregation example `_ + documentation. + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with collection.aggregate() as cursor: + for operation in cursor: + print(operation) + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `bypassDocumentValidation` (bool): If ``True``, allows the write to opt-out of document level validation. + + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + Added ``let`` parameter. + Support $merge and $out executing on secondaries according to the + collection's :attr:`read_preference`. + .. versionchanged:: 4.0 + Removed the ``useCursor`` option. + .. versionchanged:: 3.9 + Apply this collection's read concern to pipelines containing the + `$out` stage when connected to MongoDB >= 4.2. + Added support for the ``$merge`` pipeline stage. + Aggregations that write always use read preference + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + .. versionchanged:: 3.6 + Added the `session` parameter. Added the `maxAwaitTimeMS` option. + Deprecated the `useCursor` option. + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. Support the `collation` option. + .. versionchanged:: 3.0 + The :meth:`aggregate` method always returns a CommandCursor. The + pipeline argument must be a list. + + .. seealso:: `Aggregation `_ + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self._database.client._tmp_session(session) as s: + return self._aggregate( + _CollectionAggregationCommand, + pipeline, + CommandCursor, + session=s, + let=let, + comment=comment, + **kwargs, + ) + + def aggregate_raw_batches( + self, + pipeline: _Pipeline, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> RawBatchCursor[_DocumentType]: + """Perform an aggregation and retrieve batches of raw BSON. + + Similar to the :meth:`aggregate` method but returns a + :class:`~pymongo.cursor.RawBatchCursor`. + + This example demonstrates how to work with raw batches, but in practice + raw batches should be passed to an external library that can decode + BSON into another data type, rather than used with PyMongo's + :mod:`bson` module. + + >>> import bson + >>> cursor = db.test.aggregate_raw_batches([ + ... {'$project': {'x': {'$multiply': [2, '$x']}}}]) + >>> for batch in cursor: + ... print(bson.decode_all(batch)) + + .. note:: aggregate_raw_batches does not support auto encryption. + + .. versionchanged:: 3.12 + Added session support. + + .. versionadded:: 3.6 + """ + # OP_MSG is required to support encryption. + if self._database.client._encrypter: + raise InvalidOperation("aggregate_raw_batches does not support auto encryption") + if comment is not None: + kwargs["comment"] = comment + with self._database.client._tmp_session(session) as s: + return cast( + RawBatchCursor[_DocumentType], + self._aggregate( + _CollectionRawAggregationCommand, + pipeline, + RawBatchCommandCursor, + session=s, + **kwargs, + ), + ) + + @_csot.apply + def rename( + self, + new_name: str, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> MutableMapping[str, Any]: + """Rename this collection. + + If operating in auth mode, client must be authorized as an + admin to perform this operation. Raises :class:`TypeError` if + `new_name` is not an instance of :class:`str`. + Raises :class:`~pymongo.errors.InvalidName` + if `new_name` is not a valid collection name. + + :param new_name: new name for this collection + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional arguments to the rename command + may be passed as keyword arguments to this helper method + (i.e. ``dropTarget=True``) + + .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of + this collection is automatically applied to this operation. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this collection's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + if not isinstance(new_name, str): + raise TypeError(f"new_name must be an instance of str, not {type(new_name)}") + + if not new_name or ".." in new_name: + raise InvalidName("collection names cannot be empty") + if new_name[0] == "." or new_name[-1] == ".": + raise InvalidName("collection names must not start or end with '.'") + if "$" in new_name and not new_name.startswith("oplog.$main"): + raise InvalidName("collection names must not contain '$'") + + new_name = f"{self._database.name}.{new_name}" + cmd = {"renameCollection": self._full_name, "to": new_name} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + write_concern = self._write_concern_for_cmd(cmd, session) + + with self._conn_for_writes(session, operation=_Op.RENAME) as conn: + with self._database.client._tmp_session(session) as s: + return conn.command( + "admin", + cmd, + write_concern=write_concern, + parse_write_concern_error=True, + session=s, + client=self._database.client, + ) + + def distinct( + self, + key: str, + filter: Optional[Mapping[str, Any]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + hint: Optional[_IndexKeyHint] = None, + **kwargs: Any, + ) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in this collection. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + All optional distinct parameters should be passed as keyword arguments + to this method. Valid options include: + + - `maxTimeMS` (int): The maximum amount of time to allow the count + command to run, in milliseconds. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + + The :meth:`distinct` method obeys the :attr:`read_preference` of + this :class:`Collection`. + + :param key: name of the field for which we want to get the distinct + values + :param filter: A query document that specifies the documents + from which to retrieve the distinct values. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). + :param kwargs: See list of options above. + + .. versionchanged:: 4.12 + Added ``hint`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Support the `collation` option. + + """ + if not isinstance(key, str): + raise TypeError(f"key must be an instance of str, not {type(key)}") + if filter is not None: + if "query" in kwargs: + raise ConfigurationError("can't pass both filter and query") + kwargs["query"] = filter + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: Optional[_ServerMode], + ) -> list: # type: ignore[type-arg] + cmd = {"distinct": self._name, "key": key} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + if hint is not None: + cmd["hint"] = hint # type: ignore[assignment] + return ( + self._command( + conn, + cmd, + read_preference=read_preference, + read_concern=self.read_concern, + collation=collation, + session=session, + user_fields={"values": 1}, + ) + )["values"] + + return self._retryable_non_cursor_read(_cmd, session, operation=_Op.DISTINCT) + + def _find_and_modify( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]], + sort: Optional[_IndexList], + upsert: Optional[bool] = None, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> Any: + """Internal findAndModify helper.""" + common.validate_is_mapping("filter", filter) + if not isinstance(return_document, bool): + raise ValueError( + f"return_document must be ReturnDocument.BEFORE or ReturnDocument.AFTER, not {type(return_document)}" + ) + collation = validate_collation_or_none(kwargs.pop("collation", None)) + if hint is not None: + if not isinstance(hint, str): + hint = helpers_shared._index_document(hint) + write_concern = self._write_concern_for_cmd(kwargs, session) + + def _find_and_modify_helper( + session: Optional[ClientSession], conn: Connection, retryable_write: bool + ) -> Any: + cmd = {"findAndModify": self._name, "query": filter, "new": return_document} + if let is not None: + common.validate_is_mapping("let", let) + cmd["let"] = let + cmd.update(kwargs) + if projection is not None: + cmd["fields"] = helpers_shared._fields_list_to_dict(projection, "projection") + if sort is not None: + cmd["sort"] = helpers_shared._index_document(sort) + if upsert is not None: + validate_boolean("upsert", upsert) + cmd["upsert"] = upsert + acknowledged = write_concern.acknowledged + if array_filters is not None: + if not acknowledged: + raise ConfigurationError( + "arrayFilters is unsupported for unacknowledged writes." + ) + cmd["arrayFilters"] = list(array_filters) + if hint is not None: + if conn.max_wire_version < 8: + raise ConfigurationError( + "Must be connected to MongoDB 4.2+ to use hint on find and modify commands." + ) + elif not acknowledged and conn.max_wire_version < 9: + raise ConfigurationError( + "Must be connected to MongoDB 4.4+ to use hint on unacknowledged find and modify commands." + ) + cmd["hint"] = hint + out = self._command( + conn, + cmd, + read_preference=ReadPreference.PRIMARY, + write_concern=write_concern, + collation=collation, + session=session, + retryable_write=retryable_write, + user_fields=_FIND_AND_MODIFY_DOC_FIELDS, + ) + _check_write_command_response(out) + + return out.get("value") + + return self._database.client._retryable_write( + write_concern.acknowledged, + _find_and_modify_helper, + session, + operation=_Op.FIND_AND_MODIFY, + ) + + def find_one_and_delete( + self, + filter: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and deletes it, returning the document. + + >>> db.test.count_documents({'x': 1}) + 2 + >>> db.test.find_one_and_delete({'x': 1}) + {'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')} + >>> db.test.count_documents({'x': 1}) + 1 + + If multiple documents match *filter*, a *sort* can be applied. + + >>> for doc in db.test.find({'x': 1}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> db.test.find_one_and_delete( + ... {'x': 1}, sort=[('_id', pymongo.DESCENDING)]) + {'x': 1, '_id': 2} + + The *projection* option can be used to limit the fields returned. + + >>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False}) + {'x': 1} + + :param filter: A query that matches the document to delete. + :param projection: a list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is deleted. + :param hint: An index to use to support the query predicate + specified either by its string name, or in the same format as + passed to :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``). This option is only supported + on MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added ``hint`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionchanged:: 3.4 + Added the `collation` option. + .. versionadded:: 3.0 + """ + kwargs["remove"] = True + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, projection, sort, let=let, hint=hint, session=session, **kwargs + ) + + def find_one_and_replace( + self, + filter: Mapping[str, Any], + replacement: Mapping[str, Any], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and replaces it, returning either the + original or the replaced document. + + The :meth:`find_one_and_replace` method differs from + :meth:`find_one_and_update` by replacing the document matched by + *filter*, rather than modifying the existing document. + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) + {'x': 1, '_id': 0} + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'y': 1, '_id': 0} + {'x': 1, '_id': 1} + {'x': 1, '_id': 2} + + :param filter: A query that matches the document to replace. + :param replacement: The replacement document. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a mapping to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is replaced. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was replaced, or ``None`` + if no document matches. If + :attr:`ReturnDocument.AFTER`, returns the replaced + or inserted document. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 4.1 + Added ``let`` parameter. + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.6 + Added ``session`` parameter. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_replace(replacement) + kwargs["update"] = replacement + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + let=let, + hint=hint, + session=session, + **kwargs, + ) + + def find_one_and_update( + self, + filter: Mapping[str, Any], + update: Union[Mapping[str, Any], _Pipeline], + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + sort: Optional[_IndexList] = None, + upsert: bool = False, + return_document: bool = ReturnDocument.BEFORE, + array_filters: Optional[Sequence[Mapping[str, Any]]] = None, + hint: Optional[_IndexKeyHint] = None, + session: Optional[ClientSession] = None, + let: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _DocumentType: + """Finds a single document and updates it, returning either the + original or the updated document. + + >>> db.test.find_one_and_update( + ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) + {'_id': 665, 'done': False, 'count': 25}} + + Returns ``None`` if no document matches the filter. + + >>> db.test.find_one_and_update( + ... {'_exists': False}, {'$inc': {'count': 1}}) + + When the filter matches, by default :meth:`find_one_and_update` + returns the original version of the document before the update was + applied. To return the updated (or inserted in the case of + *upsert*) version of the document instead, use the *return_document* + option. + + >>> from pymongo import ReturnDocument + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... return_document=ReturnDocument.AFTER) + {'_id': 'userid', 'seq': 1} + + You can limit the fields returned with the *projection* option. + + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... return_document=ReturnDocument.AFTER) + {'seq': 2} + + The *upsert* option can be used to create the document if it doesn't + already exist. + + >>> (db.example.delete_many({})).deleted_count + 1 + >>> db.example.find_one_and_update( + ... {'_id': 'userid'}, + ... {'$inc': {'seq': 1}}, + ... projection={'seq': True, '_id': False}, + ... upsert=True, + ... return_document=ReturnDocument.AFTER) + {'seq': 1} + + If multiple documents match *filter*, a *sort* can be applied. + + >>> for doc in db.test.find({'done': True}): + ... print(doc) + ... + {'_id': 665, 'done': True, 'result': {'count': 26}} + {'_id': 701, 'done': True, 'result': {'count': 17}} + >>> db.test.find_one_and_update( + ... {'done': True}, + ... {'$set': {'final': True}}, + ... sort=[('_id', pymongo.DESCENDING)]) + {'_id': 701, 'done': True, 'result': {'count': 17}} + + :param filter: A query that matches the document to update. + :param update: The update operations to apply. + :param projection: A list of field names that should be + returned in the result document or a mapping specifying the fields + to include or exclude. If `projection` is a list "_id" will + always be returned. Use a dict to exclude fields from + the result (e.g. projection={'_id': False}). + :param sort: a list of (key, direction) pairs + specifying the sort order for the query. If multiple documents + match the query, they are sorted and the first is updated. + :param upsert: When ``True``, inserts a new document if no + document matches the query. Defaults to ``False``. + :param return_document: If + :attr:`ReturnDocument.BEFORE` (the default), + returns the original document before it was updated. If + :attr:`ReturnDocument.AFTER`, returns the updated + or inserted document. + :param array_filters: A list of filters specifying which + array elements an update should apply. + :param hint: An index to use to support the query + predicate specified either by its string name, or in the same + format as passed to + :meth:`~pymongo.collection.Collection.create_index` (e.g. + ``[('field', ASCENDING)]``). This option is only supported on + MongoDB 4.4 and above. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param let: Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional command arguments can be passed + as keyword arguments (for example maxTimeMS can be used with + recent server versions). + + .. versionchanged:: 3.11 + Added the ``hint`` option. + .. versionchanged:: 3.9 + Added the ability to accept a pipeline as the ``update``. + .. versionchanged:: 3.6 + Added the ``array_filters`` and ``session`` options. + .. versionchanged:: 3.4 + Added the ``collation`` option. + .. versionchanged:: 3.2 + Respects write concern. + + .. warning:: Starting in PyMongo 3.2, this command uses the + :class:`~pymongo.write_concern.WriteConcern` of this + :class:`~pymongo.collection.Collection` when connected to MongoDB >= + 3.2. Note that using an elevated write concern with this command may + be slower compared to using the default write concern. + + .. versionadded:: 3.0 + """ + common.validate_ok_for_update(update) + common.validate_list_or_none("array_filters", array_filters) + kwargs["update"] = update + if comment is not None: + kwargs["comment"] = comment + return self._find_and_modify( + filter, + projection, + sort, + upsert, + return_document, + array_filters, + hint=hint, + let=let, + session=session, + **kwargs, + ) diff --git a/pymongo/synchronous/command_cursor.py b/pymongo/synchronous/command_cursor.py new file mode 100644 index 0000000000..a09a67efc9 --- /dev/null +++ b/pymongo/synchronous/command_cursor.py @@ -0,0 +1,472 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CommandCursor class to iterate over command results.""" +from __future__ import annotations + +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterator, + Mapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import CodecOptions, _convert_raw_document_lists_to_streams +from pymongo import _csot +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _RawBatchGetMore, +) +from pymongo.response import PinnedResponse +from pymongo.synchronous.cursor import _ConnectionManager +from pymongo.typings import _Address, _DocumentOut, _DocumentType + +if TYPE_CHECKING: + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +class CommandCursor(Generic[_DocumentType]): + """A cursor / iterator over command cursors.""" + + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new command cursor.""" + self._sock_mgr: Any = None + self._collection: Collection[_DocumentType] = collection + self._id = cursor_info["id"] + self._data = deque(cursor_info["firstBatch"]) + self._postbatchresumetoken: Optional[Mapping[str, Any]] = cursor_info.get( + "postBatchResumeToken" + ) + self._address = address + self._batch_size = batch_size + self._max_await_time_ms = max_await_time_ms + self._timeout = self._collection.database.client.options.timeout + self._session = session + if self._session is not None: + self._session._attached_to_cursor = True + self._killed = self._id == 0 + self._comment = comment + if self._killed: + self._end_session() + + if "ns" in cursor_info: # noqa: SIM401 + self._ns = cursor_info["ns"] + else: + self._ns = collection.full_name + + self.batch_size(batch_size) + + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + + def __del__(self) -> None: + self._die_no_lock() + + def batch_size(self, batch_size: int) -> CommandCursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + + self._batch_size = batch_size == 1 and 2 or batch_size + return self + + def _has_next(self) -> bool: + """Returns `True` if the cursor has documents remaining from the + previous batch. + """ + return len(self._data) > 0 + + @property + def _post_batch_resume_token(self) -> Optional[Mapping[str, Any]]: + """Retrieve the postBatchResumeToken from the response to a + changeStream aggregate or getMore. + """ + return self._postbatchresumetoken + + def _maybe_pin_connection(self, conn: Connection) -> None: + client = self._collection.database.client + if not client._should_pin_cursor(self._session): + return + if not self._sock_mgr: + conn.pin_cursor() + conn_mgr = _ConnectionManager(conn, False) + # Ensure the connection gets returned when the entire result is + # returned in the first batch. + if self._id == 0: + conn_mgr.close() + else: + self._sock_mgr = conn_mgr + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + Even if :attr:`alive` is ``True``, :meth:`next` can raise + :exc:`StopIteration`. Best to use a for loop:: + + for doc in collection.aggregate(pipeline): + print(doc) + + .. note:: :attr:`alive` can be True while iterating a cursor from + a failed server. In this case :attr:`alive` will return False after + :meth:`next` fails to retrieve the next batch of results from the + server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> int: + """Returns the id of the cursor.""" + return self._id + + @property + def address(self) -> Optional[_Address]: + """The (host, port) of the server used, or None. + + .. versionadded:: 3.0 + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def _prepare_to_die(self) -> tuple[int, Optional[_CursorAddress]]: + already_killed = self._killed + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, self._ns) + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _die_lock(self) -> None: + """Closes this cursor.""" + cursor_id, address = self._prepare_to_die() + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _end_session(self) -> None: + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session._end_implicit_session() + self._session = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def _send_message(self, operation: _GetMore) -> None: + """Send a getmore message and handle the response.""" + client = self._collection.database.client + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + # Return the session and pinned connection, if necessary. + self.close() + raise + except ConnectionFailure: + # Don't send killCursors because the cursor is already closed. + self._killed = True + # Return the session and pinned connection, if necessary. + self.close() + raise + except Exception: + self.close() + raise + + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + if response.from_command: + cursor = response.docs[0]["cursor"] + documents = cursor["nextBatch"] + self._postbatchresumetoken = cursor.get("postBatchResumeToken") + self._id = cursor["id"] + else: + documents = response.docs + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + + if self._id == 0: + self.close() + self._data = deque(documents) + + def _refresh(self) -> int: + """Refreshes the cursor with more data from the server. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if self._id: # Get More + dbname, collname = self._ns.split(".", 1) + read_pref = self._collection._read_preference_for(self.session) + self._send_message( + self._getmore_class( + dbname, + collname, + self._batch_size, + self._id, + self._collection.codec_options, + read_pref, + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + False, + self._comment, + ) + ) + else: # Cursor id is zero nothing else to return + self._die_lock() + + return len(self._data) + + def __iter__(self) -> Iterator[_DocumentType]: + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + # Block until a document is returnable. + while self.alive: + doc = self._try_next(True) + if doc is not None: + return doc + + raise StopIteration + + def __next__(self) -> _DocumentType: + return self.next() + + def _try_next(self, get_more_allowed: bool) -> Optional[_DocumentType]: + """Advance the cursor blocking for at most one getMore command.""" + if not len(self._data) and not self._killed and get_more_allowed: + self._refresh() + if len(self._data): + return self._data.popleft() + else: + return None + + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some available documents from the cursor.""" + if not len(self._data) and not self._killed: + self._refresh() + if len(self._data): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + def try_next(self) -> Optional[_DocumentType]: + """Advance the cursor without blocking indefinitely. + + This method returns the next document without waiting + indefinitely for data. + + If no document is cached locally then this method runs a single + getMore command. If the getMore yields any documents, the next + document is returned, otherwise, if the getMore returns no documents + (because there is no additional data) then ``None`` is returned. + + :return: The next document or ``None`` when no document is available + after running a single getMore or when the cursor is closed. + + .. versionadded:: 4.5 + """ + return self._try_next(get_more_allowed=True) + + def __enter__(self) -> CommandCursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + @_csot.apply + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, so read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class RawBatchCommandCursor(CommandCursor[_DocumentType]): + _getmore_class = _RawBatchGetMore + + def __init__( + self, + collection: Collection[_DocumentType], + cursor_info: Mapping[str, Any], + address: Optional[_Address], + batch_size: int = 0, + max_await_time_ms: Optional[int] = None, + session: Optional[ClientSession] = None, + comment: Any = None, + ) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.aggregate_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + assert not cursor_info.get("firstBatch") + super().__init__( + collection, + cursor_info, + address, + batch_size, + max_await_time_ms, + session, + comment, + ) + + def _unpack_response( # type: ignore[override] + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[dict[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[Mapping[str, Any]]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return raw_response # type: ignore[return-value] + + def __getitem__(self, index: int) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCommandCursor") diff --git a/pymongo/synchronous/cursor.py b/pymongo/synchronous/cursor.py new file mode 100644 index 0000000000..fcd8ebeb1d --- /dev/null +++ b/pymongo/synchronous/cursor.py @@ -0,0 +1,1370 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cursor class to iterate over Mongo query results.""" +from __future__ import annotations + +import copy +import warnings +from collections import deque +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Iterable, + List, + Mapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson import RE_TYPE, _convert_raw_document_lists_to_streams +from bson.code import Code +from bson.son import SON +from pymongo import _csot, helpers_shared +from pymongo.collation import validate_collation_or_none +from pymongo.common import ( + validate_is_document_type, + validate_is_mapping, +) +from pymongo.cursor_shared import _CURSOR_CLOSED_ERRORS, _QUERY_OPTIONS, CursorType, _Hint, _Sort +from pymongo.errors import ConnectionFailure, InvalidOperation, OperationFailure +from pymongo.lock import _create_lock +from pymongo.message import ( + _CursorAddress, + _GetMore, + _OpMsg, + _OpReply, + _Query, + _RawBatchGetMore, + _RawBatchQuery, +) +from pymongo.response import PinnedResponse +from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType +from pymongo.write_concern import validate_boolean + +if TYPE_CHECKING: + from _typeshed import SupportsItems + + from bson.codec_options import CodecOptions + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.collection import Collection + from pymongo.synchronous.pool import Connection + +_IS_SYNC = True + + +class _ConnectionManager: + """Used with exhaust cursors to ensure the connection is returned.""" + + def __init__(self, conn: Connection, more_to_come: bool): + self.conn: Optional[Connection] = conn + self.more_to_come = more_to_come + self._lock = _create_lock() + + def update_exhaust(self, more_to_come: bool) -> None: + self.more_to_come = more_to_come + + def close(self) -> None: + """Return this instance's connection to the connection pool.""" + if self.conn: + self.conn.unpin() + self.conn = None + + +class Cursor(Generic[_DocumentType]): + _query_class = _Query + _getmore_class = _GetMore + + def __init__( + self, + collection: Collection[_DocumentType], + filter: Optional[Mapping[str, Any]] = None, + projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, + skip: int = 0, + limit: int = 0, + no_cursor_timeout: bool = False, + cursor_type: int = CursorType.NON_TAILABLE, + sort: Optional[_Sort] = None, + allow_partial_results: bool = False, + oplog_replay: bool = False, + batch_size: int = 0, + collation: Optional[_CollationIn] = None, + hint: Optional[_Hint] = None, + max_scan: Optional[int] = None, + max_time_ms: Optional[int] = None, + max: Optional[_Sort] = None, + min: Optional[_Sort] = None, + return_key: Optional[bool] = None, + show_record_id: Optional[bool] = None, + snapshot: Optional[bool] = None, + comment: Optional[Any] = None, + session: Optional[ClientSession] = None, + allow_disk_use: Optional[bool] = None, + let: Optional[bool] = None, + ) -> None: + """Create a new cursor. + + Should not be called directly by application developers - see + :meth:`~pymongo.collection.Collection.find` instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + # Initialize all attributes used in __del__ before possibly raising + # an error to avoid attribute errors during garbage collection. + self._collection: Collection[_DocumentType] = collection + self._id: Any = None + self._exhaust = False + self._sock_mgr: Any = None + self._killed = False + self._session: Optional[ClientSession] + + if session: + self._session = session + self._session._attached_to_cursor = True + else: + self._session = None + + spec: Mapping[str, Any] = filter or {} + validate_is_mapping("filter", spec) + if not isinstance(skip, int): + raise TypeError(f"skip must be an instance of int, not {type(skip)}") + if not isinstance(limit, int): + raise TypeError(f"limit must be an instance of int, not {type(limit)}") + validate_boolean("no_cursor_timeout", no_cursor_timeout) + if no_cursor_timeout and self._session and self._session._implicit: + warnings.warn( + "use an explicit session with no_cursor_timeout=True " + "otherwise the cursor may still timeout after " + "30 minutes, for more info see " + "https://mongodb.com/docs/v4.4/reference/method/" + "cursor.noCursorTimeout/" + "#session-idle-timeout-overrides-nocursortimeout", + UserWarning, + stacklevel=2, + ) + if cursor_type not in ( + CursorType.NON_TAILABLE, + CursorType.TAILABLE, + CursorType.TAILABLE_AWAIT, + CursorType.EXHAUST, + ): + raise ValueError("not a valid value for cursor_type") + validate_boolean("allow_partial_results", allow_partial_results) + validate_boolean("oplog_replay", oplog_replay) + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + # Only set if allow_disk_use is provided by the user, else None. + if allow_disk_use is not None: + allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) + + if projection is not None: + projection = helpers_shared._fields_list_to_dict(projection, "projection") + + if let is not None: + validate_is_document_type("let", let) + + self._let = let + self._spec = spec + self._has_filter = filter is not None + self._projection = projection + self._skip = skip + self._limit = limit + self._batch_size = batch_size + self._ordering = sort and helpers_shared._index_document(sort) or None + self._max_scan = max_scan + self._explain = False + self._comment = comment + self._max_time_ms = max_time_ms + self._timeout = self._collection.database.client.options.timeout + self._max_await_time_ms: Optional[int] = None + self._max: Optional[Union[dict[Any, Any], _Sort]] = max + self._min: Optional[Union[dict[Any, Any], _Sort]] = min + self._collation = validate_collation_or_none(collation) + self._return_key = return_key + self._show_record_id = show_record_id + self._allow_disk_use = allow_disk_use + self._snapshot = snapshot + self._hint: Union[str, dict[str, Any], None] + self._set_hint(hint) + + # This is ugly. People want to be able to do cursor[5:5] and + # get an empty result set (old behavior was an + # exception). It's hard to do that right, though, because the + # server uses limit(0) to mean 'no limit'. So we set __empty + # in that case and check for it when iterating. We also unset + # it anytime we change __limit. + self._empty = False + + self._data: deque = deque() # type: ignore[type-arg] + self._address: Optional[_Address] = None + self._retrieved = 0 + + self._codec_options = collection.codec_options + # Read preference is set when the initial find is sent. + self._read_preference: Optional[_ServerMode] = None + self._read_concern = collection.read_concern + + self._query_flags = cursor_type + self._cursor_type = cursor_type + if no_cursor_timeout: + self._query_flags |= _QUERY_OPTIONS["no_timeout"] + if allow_partial_results: + self._query_flags |= _QUERY_OPTIONS["partial"] + if oplog_replay: + self._query_flags |= _QUERY_OPTIONS["oplog_replay"] + + # The namespace to use for find/getMore commands. + self._dbname = collection.database.name + self._collname = collection.name + + # Checking exhaust cursor support requires network IO + if _IS_SYNC: + self._exhaust_checked = True + self._supports_exhaust() # type: ignore[unused-coroutine] + else: + self._exhaust = cursor_type == CursorType.EXHAUST + self._exhaust_checked = False + + def _supports_exhaust(self) -> None: + # Exhaust cursor support + if self._cursor_type == CursorType.EXHAUST: + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + self._exhaust = True + + @property + def collection(self) -> Collection[_DocumentType]: + """The :class:`~pymongo.collection.Collection` that this + :class:`Cursor` is iterating. + """ + return self._collection + + @property + def retrieved(self) -> int: + """The number of documents retrieved so far.""" + return self._retrieved + + def __del__(self) -> None: + self._die_no_lock() + + def clone(self) -> Cursor[_DocumentType]: + """Get a clone of this cursor. + + Returns a new Cursor instance with options matching those that have + been set on the current instance. The clone will be completely + unevaluated, even if the current instance has been partially or + completely evaluated. + """ + return self._clone(True) + + def _clone(self, deepcopy: bool = True, base: Optional[Cursor] = None) -> Cursor: # type: ignore[type-arg] + """Internal clone helper.""" + if not base: + if self._session and not self._session._implicit: + base = self._clone_base(self._session) + else: + base = self._clone_base(None) + + values_to_clone = ( + "spec", + "projection", + "skip", + "limit", + "max_time_ms", + "max_await_time_ms", + "comment", + "max", + "min", + "ordering", + "explain", + "hint", + "batch_size", + "max_scan", + "query_flags", + "collation", + "empty", + "show_record_id", + "return_key", + "allow_disk_use", + "snapshot", + "exhaust", + "has_filter", + "cursor_type", + ) + data = { + k: v for k, v in self.__dict__.items() if k.startswith("_") and k[1:] in values_to_clone + } + if deepcopy: + data = self._deepcopy(data) + base.__dict__.update(data) + return base + + def _clone_base(self, session: Optional[ClientSession]) -> Cursor: # type: ignore[type-arg] + """Creates an empty Cursor object for information to be copied into.""" + return self.__class__(self._collection, session=session) + + def _query_spec(self) -> Mapping[str, Any]: + """Get the spec to use for a query.""" + operators: dict[str, Any] = {} + if self._ordering: + operators["$orderby"] = self._ordering + if self._explain: + operators["$explain"] = True + if self._hint: + operators["$hint"] = self._hint + if self._let: + operators["let"] = self._let + if self._comment: + operators["$comment"] = self._comment + if self._max_scan: + operators["$maxScan"] = self._max_scan + if self._max_time_ms is not None: + operators["$maxTimeMS"] = self._max_time_ms + if self._max: + operators["$max"] = self._max + if self._min: + operators["$min"] = self._min + if self._return_key is not None: + operators["$returnKey"] = self._return_key + if self._show_record_id is not None: + # This is upgraded to showRecordId for MongoDB 3.2+ "find" command. + operators["$showDiskLoc"] = self._show_record_id + if self._snapshot is not None: + operators["$snapshot"] = self._snapshot + + if operators: + # Make a shallow copy so we can cleanly rewind or clone. + spec = dict(self._spec) + + # Allow-listed commands must be wrapped in $query. + if "$query" not in spec: + # $query has to come first + spec = {"$query": spec} + + spec.update(operators) + return spec + # Have to wrap with $query if "query" is the first key. + # We can't just use $query anytime "query" is a key as + # that breaks commands like count and find_and_modify. + # Checking spec.keys()[0] covers the case that the spec + # was passed as an instance of SON or OrderedDict. + elif "query" in self._spec and (len(self._spec) == 1 or next(iter(self._spec)) == "query"): + return {"$query": self._spec} + + return self._spec + + def _check_okay_to_chain(self) -> None: + """Check if it is okay to chain more options onto this cursor.""" + if self._retrieved or self._id is not None: + raise InvalidOperation("cannot set options after executing query") + + def add_option(self, mask: int) -> Cursor[_DocumentType]: + """Set arbitrary query flags using a bitmask. + + To set the tailable flag: + cursor.add_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + if self._limit: + raise InvalidOperation("Can't use limit and exhaust together.") + if self._collection.database.client.is_mongos: + raise InvalidOperation("Exhaust cursors are not supported by mongos") + self._exhaust = True + + self._query_flags |= mask + return self + + def remove_option(self, mask: int) -> Cursor[_DocumentType]: + """Unset arbitrary query flags using a bitmask. + + To unset the tailable flag: + cursor.remove_option(2) + """ + if not isinstance(mask, int): + raise TypeError(f"mask must be an int, not {type(mask)}") + self._check_okay_to_chain() + + if mask & _QUERY_OPTIONS["exhaust"]: + self._exhaust = False + + self._query_flags &= ~mask + return self + + def allow_disk_use(self, allow_disk_use: bool) -> Cursor[_DocumentType]: + """Specifies whether MongoDB can use temporary disk files while + processing a blocking sort operation. + + Raises :exc:`TypeError` if `allow_disk_use` is not a boolean. + + .. note:: `allow_disk_use` requires server version **>= 4.4** + + :param allow_disk_use: if True, MongoDB may use temporary + disk files to store data exceeding the system memory limit while + processing a blocking sort operation. + + .. versionadded:: 3.11 + """ + if not isinstance(allow_disk_use, bool): + raise TypeError(f"allow_disk_use must be a bool, not {type(allow_disk_use)}") + self._check_okay_to_chain() + + self._allow_disk_use = allow_disk_use + return self + + def limit(self, limit: int) -> Cursor[_DocumentType]: + """Limits the number of results to be returned by this cursor. + + Raises :exc:`TypeError` if `limit` is not an integer. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. The last `limit` applied to this cursor + takes precedence. A limit of ``0`` is equivalent to no limit. + + :param limit: the number of results to return + + .. seealso:: The MongoDB documentation on `limit `_. + """ + if not isinstance(limit, int): + raise TypeError(f"limit must be an integer, not {type(limit)}") + if self._exhaust: + raise InvalidOperation("Can't use limit and exhaust together.") + self._check_okay_to_chain() + + self._empty = False + self._limit = limit + return self + + def batch_size(self, batch_size: int) -> Cursor[_DocumentType]: + """Limits the number of documents returned in one batch. Each batch + requires a round trip to the server. It can be adjusted to optimize + performance and limit data transfer. + + .. note:: batch_size can not override MongoDB's internal limits on the + amount of data it will return to the client in a single batch (i.e + if you set batch size to 1,000,000,000, MongoDB will currently only + return 4-16MB of results per batch). + + Raises :exc:`TypeError` if `batch_size` is not an integer. + Raises :exc:`ValueError` if `batch_size` is less than ``0``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. The last `batch_size` + applied to this cursor takes precedence. + + :param batch_size: The size of each batch of results requested. + """ + if not isinstance(batch_size, int): + raise TypeError(f"batch_size must be an integer, not {type(batch_size)}") + if batch_size < 0: + raise ValueError("batch_size must be >= 0") + self._check_okay_to_chain() + + self._batch_size = batch_size + return self + + def skip(self, skip: int) -> Cursor[_DocumentType]: + """Skips the first `skip` results of this cursor. + + Raises :exc:`TypeError` if `skip` is not an integer. Raises + :exc:`ValueError` if `skip` is less than ``0``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. The last `skip` applied to this cursor takes + precedence. + + :param skip: the number of results to skip + """ + if not isinstance(skip, int): + raise TypeError(f"skip must be an integer, not {type(skip)}") + if skip < 0: + raise ValueError("skip must be >= 0") + self._check_okay_to_chain() + + self._skip = skip + return self + + def max_time_ms(self, max_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a query operation. If the specified + time is exceeded, the operation will be aborted and + :exc:`~pymongo.errors.ExecutionTimeout` is raised. If `max_time_ms` + is ``None`` no limit is applied. + + Raises :exc:`TypeError` if `max_time_ms` is not an integer or ``None``. + Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` + has already been used. + + :param max_time_ms: the time limit after which the operation is aborted + """ + if not isinstance(max_time_ms, int) and max_time_ms is not None: + raise TypeError(f"max_time_ms must be an integer or None, not {type(max_time_ms)}") + self._check_okay_to_chain() + + self._max_time_ms = max_time_ms + return self + + def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: + """Specifies a time limit for a getMore operation on a + :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other + types of cursor max_await_time_ms is ignored. + + Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or + ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. + + .. note:: `max_await_time_ms` requires server version **>= 3.2** + + :param max_await_time_ms: the time limit after which the operation is + aborted + + .. versionadded:: 3.2 + """ + if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: + raise TypeError( + f"max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}" + ) + self._check_okay_to_chain() + + # Ignore max_await_time_ms if not tailable or await_data is False. + if self._query_flags & CursorType.TAILABLE_AWAIT: + self._max_await_time_ms = max_await_time_ms + + return self + + @overload + def __getitem__(self, index: int) -> _DocumentType: + ... + + @overload + def __getitem__(self, index: slice) -> Cursor[_DocumentType]: + ... + + def __getitem__(self, index: Union[int, slice]) -> Union[_DocumentType, Cursor[_DocumentType]]: + """Get a single document or a slice of documents from this cursor. + + .. warning:: A :class:`~Cursor` is not a Python :class:`list`. Each + index access or slice requires that a new query be run using skip + and limit. Do not iterate the cursor using index accesses. + The following example is **extremely inefficient** and may return + surprising results:: + + cursor = db.collection.find() + # Warning: This runs a new query for each document. + # Don't do this! + for idx in range(10): + print(cursor[idx]) + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. + + To get a single document use an integral index, e.g.:: + + >>> db.test.find()[50] + + An :class:`IndexError` will be raised if the index is negative + or greater than the amount of documents in this cursor. Any + limit previously applied to this cursor will be ignored. + + To get a slice of documents use a slice index, e.g.:: + + >>> db.test.find()[20:25] + + This will return this cursor with a limit of ``5`` and skip of + ``20`` applied. Using a slice index will override any prior + limits or skips applied to this cursor (including those + applied through previous calls to this method). Raises + :class:`IndexError` when the slice has a step, a negative + start value, or a stop value less than or equal to the start + value. + + :param index: An integer or slice index to be applied to this cursor + """ + if _IS_SYNC: + self._check_okay_to_chain() + self._empty = False + if isinstance(index, slice): + if index.step is not None: + raise IndexError("Cursor instances do not support slice steps") + + skip = 0 + if index.start is not None: + if index.start < 0: + raise IndexError("Cursor instances do not support negative indices") + skip = index.start + + if index.stop is not None: + limit = index.stop - skip + if limit < 0: + raise IndexError( + "stop index must be greater than start index for slice %r" % index + ) + if limit == 0: + self._empty = True + else: + limit = 0 + + self._skip = skip + self._limit = limit + return self + + if isinstance(index, int): + if index < 0: + raise IndexError("Cursor instances do not support negative indices") + clone = self.clone() + clone.skip(index + self._skip) + clone.limit(-1) # use a hard limit + clone._query_flags &= ~CursorType.TAILABLE_AWAIT # PYTHON-1371 + for doc in clone: # type: ignore[attr-defined] + return doc + raise IndexError("no such item for Cursor instance") + raise TypeError("index %r cannot be applied to Cursor instances" % index) + else: + raise IndexError("Cursor does not support indexing") + + def max_scan(self, max_scan: Optional[int]) -> Cursor[_DocumentType]: + """**DEPRECATED** - Limit the number of documents to scan when + performing the query. + + Raises :class:`~pymongo.errors.InvalidOperation` if this + cursor has already been used. Only the last :meth:`max_scan` + applied to this cursor has any effect. + + :param max_scan: the maximum number of documents to scan + + .. versionchanged:: 3.7 + Deprecated :meth:`max_scan`. Support for this option is deprecated in + MongoDB 4.0. Use :meth:`max_time_ms` instead to limit server side + execution time. + """ + self._check_okay_to_chain() + self._max_scan = max_scan + return self + + def max(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``max`` operator that specifies upper bound for specific index. + + When using ``max``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the exclusive + upper bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``max`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._max = dict(spec) + return self + + def min(self, spec: _Sort) -> Cursor[_DocumentType]: + """Adds ``min`` operator that specifies lower bound for specific index. + + When using ``min``, :meth:`~hint` should also be configured to ensure + the query uses the expected index and starting in MongoDB 4.2 + :meth:`~hint` will be required. + + :param spec: a list of field, limit pairs specifying the inclusive + lower bound for all keys of a specific index in order. + + .. versionchanged:: 3.8 + Deprecated cursors that use ``min`` without a :meth:`~hint`. + + .. versionadded:: 2.7 + """ + if not isinstance(spec, (list, tuple)): + raise TypeError(f"spec must be an instance of list or tuple, not {type(spec)}") + + self._check_okay_to_chain() + self._min = dict(spec) + return self + + def sort( + self, key_or_list: _Hint, direction: Optional[Union[int, str]] = None + ) -> Cursor[_DocumentType]: + """Sorts this cursor's results. + + Pass a field name and a direction, either + :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`.:: + + for doc in collection.find().sort('field', pymongo.ASCENDING): + print(doc) + + To sort by multiple fields, pass a list of (key, direction) pairs. + If just a name is given, :data:`~pymongo.ASCENDING` will be inferred:: + + for doc in collection.find().sort([ + 'field1', + ('field2', pymongo.DESCENDING)]): + print(doc) + + Text search results can be sorted by relevance:: + + cursor = db.test.find( + {'$text': {'$search': 'some words'}}, + {'score': {'$meta': 'textScore'}}) + + # Sort by 'score' field. + cursor.sort([('score', {'$meta': 'textScore'})]) + + for doc in cursor: + print(doc) + + For more advanced text search functionality, see MongoDB's + `Atlas Search `_. + + Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. Only the last :meth:`sort` applied to this + cursor has any effect. + + :param key_or_list: a single key or a list of (key, direction) + pairs specifying the keys to sort on + :param direction: only used if `key_or_list` is a single + key, if not given :data:`~pymongo.ASCENDING` is assumed + """ + self._check_okay_to_chain() + keys = helpers_shared._index_list(key_or_list, direction) + self._ordering = helpers_shared._index_document(keys) + return self + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. note:: This method uses the default verbosity mode of the + `explain command + `_, + ``allPlansExecution``. To use a different verbosity use + :meth:`~pymongo.database.Database.command` to run the explain + command directly. + + .. note:: The timeout of this method can be set using :func:`pymongo.timeout`. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + c = self.clone() + c._explain = True + + # always use a hard limit for explains + if c._limit: + c._limit = -abs(c._limit) + return next(c) + + def _set_hint(self, index: Optional[_Hint]) -> None: + if index is None: + self._hint = None + return + + if isinstance(index, str): + self._hint = index + else: + self._hint = helpers_shared._index_document(index) + + def hint(self, index: Optional[_Hint]) -> Cursor[_DocumentType]: + """Adds a 'hint', telling Mongo the proper index to use for the query. + + Judicious use of hints can greatly improve query + performance. When doing a query on multiple fields (at least + one of which is indexed) pass the indexed field as a hint to + the query. Raises :class:`~pymongo.errors.OperationFailure` if the + provided hint requires an index that does not exist on this collection, + and raises :class:`~pymongo.errors.InvalidOperation` if this cursor has + already been used. + + `index` should be an index as passed to + :meth:`~pymongo.collection.Collection.create_index` + (e.g. ``[('field', ASCENDING)]``) or the name of the index. + If `index` is ``None`` any existing hint for this query is + cleared. The last hint applied to this cursor takes precedence + over all others. + + :param index: index to hint on (as an index specifier) + """ + self._check_okay_to_chain() + self._set_hint(index) + return self + + def comment(self, comment: Any) -> Cursor[_DocumentType]: + """Adds a 'comment' to the cursor. + + http://mongodb.com/docs/manual/reference/operator/comment/ + + :param comment: A string to attach to the query to help interpret and + trace the operation in the server logs and in profile data. + + .. versionadded:: 2.7 + """ + self._check_okay_to_chain() + self._comment = comment + return self + + def where(self, code: Union[str, Code]) -> Cursor[_DocumentType]: + """Adds a `$where`_ clause to this query. + + The `code` argument must be an instance of :class:`str` or + :class:`~bson.code.Code` containing a JavaScript expression. + This expression will be evaluated for each document scanned. + Only those documents for which the expression evaluates to + *true* will be returned as results. The keyword *this* refers + to the object currently being scanned. For example:: + + # Find all documents where field "a" is less than "b" plus "c". + for doc in db.test.find().where('this.a < (this.b + this.c)'): + print(doc) + + Raises :class:`TypeError` if `code` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidOperation` if this + :class:`Cursor` has already been used. Only the last call to + :meth:`where` applied to a :class:`Cursor` has any effect. + + .. note:: MongoDB 4.4 drops support for :class:`~bson.code.Code` + with scope variables. Consider using `$expr`_ instead. + + :param code: JavaScript expression to use as a filter + + .. _$expr: https://mongodb.com/docs/manual/reference/operator/query/expr/ + .. _$where: https://mongodb.com/docs/manual/reference/operator/query/where/ + """ + self._check_okay_to_chain() + if not isinstance(code, Code): + code = Code(code) + + # Avoid overwriting a filter argument that was given by the user + # when updating the spec. + spec: dict[str, Any] + if self._has_filter: + spec = dict(self._spec) + else: + spec = cast(dict, self._spec) # type: ignore[type-arg] + spec["$where"] = code + self._spec = spec + return self + + def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: + """Adds a :class:`~pymongo.collation.Collation` to this query. + + Raises :exc:`TypeError` if `collation` is not an instance of + :class:`~pymongo.collation.Collation` or a ``dict``. Raises + :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has + already been used. Only the last collation applied to this cursor has + any effect. + + :param collation: An instance of :class:`~pymongo.collation.Collation`. + """ + self._check_okay_to_chain() + self._collation = validate_collation_or_none(collation) + return self + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions, # type: ignore[type-arg] + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> Sequence[_DocumentOut]: + return response.unpack_response(cursor_id, codec_options, user_fields, legacy_response) + + def _get_read_preference(self) -> _ServerMode: + if self._read_preference is None: + # Save the read preference for getMore commands. + self._read_preference = self._collection._read_preference_for(self.session) + return self._read_preference + + @property + def alive(self) -> bool: + """Does this cursor have the potential to return more data? + + This is mostly useful with `tailable cursors + `_ + since they will stop iterating even though they *may* return more + results in the future. + + With regular cursors, simply use a for loop instead of :attr:`alive`:: + + for doc in collection.find(): + print(doc) + + .. note:: Even if :attr:`alive` is True, :meth:`next` can raise + :exc:`StopIteration`. :attr:`alive` can also be True while iterating + a cursor from a failed server. In this case :attr:`alive` will + return False after :meth:`next` fails to retrieve the next batch + of results from the server. + """ + return bool(len(self._data) or (not self._killed)) + + @property + def cursor_id(self) -> Optional[int]: + """Returns the id of the cursor + + .. versionadded:: 2.2 + """ + return self._id + + @property + def address(self) -> Optional[tuple[str, Any]]: + """The (host, port) of the server used, or None. + + .. versionchanged:: 3.0 + Renamed from "conn_id". + """ + return self._address + + @property + def session(self) -> Optional[ClientSession]: + """The cursor's :class:`~pymongo.client_session.ClientSession`, or None. + + .. versionadded:: 3.6 + """ + if self._session and not self._session._implicit: + return self._session + return None + + def __copy__(self) -> Cursor[_DocumentType]: + """Support function for `copy.copy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=False) + + def __deepcopy__(self, memo: Any) -> Any: + """Support function for `copy.deepcopy()`. + + .. versionadded:: 2.4 + """ + return self._clone(deepcopy=True) + + @overload + def _deepcopy(self, x: Iterable, memo: Optional[dict[int, Union[list, dict]]] = None) -> list: # type: ignore[type-arg] + ... + + @overload + def _deepcopy( + self, + x: SupportsItems, # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> dict: # type: ignore[type-arg] + ... + + def _deepcopy( + self, + x: Union[Iterable, SupportsItems], # type: ignore[type-arg] + memo: Optional[dict[int, Union[list, dict]]] = None, # type: ignore[type-arg] + ) -> Union[list[Any], dict[str, Any]]: + """Deepcopy helper for the data dictionary or list. + + Regular expressions cannot be deep copied but as they are immutable we + don't have to copy them when cloning. + """ + y: Union[list[Any], dict[str, Any]] + iterator: Iterable[tuple[Any, Any]] + if not hasattr(x, "items"): + y, is_list, iterator = [], True, enumerate(x) + else: + y, is_list, iterator = {}, False, cast("SupportsItems", x).items() # type: ignore[type-arg] + if memo is None: + memo = {} + val_id = id(x) + if val_id in memo: + return memo[val_id] + memo[val_id] = y + + for key, value in iterator: + if isinstance(value, (dict, list)) and not isinstance(value, SON): + value = self._deepcopy(value, memo) # noqa: PLW2901 + elif not isinstance(value, RE_TYPE): + value = copy.deepcopy(value, memo) # noqa: PLW2901 + + if is_list: + y.append(value) # type: ignore[union-attr] + else: + if not isinstance(key, RE_TYPE): + key = copy.deepcopy(key, memo) # noqa: PLW2901 + y[key] = value # type:ignore[index] + return y + + def _prepare_to_die(self, already_killed: bool) -> tuple[int, Optional[_CursorAddress]]: + self._killed = True + if self._id and not already_killed: + cursor_id = self._id + assert self._address is not None + address = _CursorAddress(self._address, f"{self._dbname}.{self._collname}") + else: + # Skip killCursors. + cursor_id = 0 + address = None + return cursor_id, address + + def _die_no_lock(self) -> None: + """Closes this cursor without acquiring a lock.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_no_lock( + cursor_id, address, self._sock_mgr, self._session + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def _die_lock(self) -> None: + """Closes this cursor.""" + try: + already_killed = self._killed + except AttributeError: + # ___init__ did not run to completion (or at all). + return + + cursor_id, address = self._prepare_to_die(already_killed) + self._collection.database.client._cleanup_cursor_lock( + cursor_id, + address, + self._sock_mgr, + self._session, + ) + if self._session and self._session._implicit: + self._session._attached_to_cursor = False + self._session = None + self._sock_mgr = None + + def close(self) -> None: + """Explicitly close / kill this cursor.""" + self._die_lock() + + def distinct(self, key: str) -> list[Any]: + """Get a list of distinct values for `key` among all documents + in the result set of this query. + + Raises :class:`TypeError` if `key` is not an instance of + :class:`str`. + + The :meth:`distinct` method obeys the + :attr:`~pymongo.collection.Collection.read_preference` of the + :class:`~pymongo.collection.Collection` instance on which + :meth:`~pymongo.collection.Collection.find` was called. + + :param key: name of key for which we want to get the distinct values + + .. seealso:: :meth:`pymongo.collection.Collection.distinct` + """ + options: dict[str, Any] = {} + if self._spec: + options["query"] = self._spec + if self._max_time_ms is not None: + options["maxTimeMS"] = self._max_time_ms + if self._comment: + options["comment"] = self._comment + if self._collation is not None: + options["collation"] = self._collation + + return self._collection.distinct(key, session=self._session, **options) + + def _send_message(self, operation: Union[_Query, _GetMore]) -> None: + """Send a query or getmore operation and handles the response. + + If operation is ``None`` this is an exhaust cursor, which reads + the next result batch off the exhaust socket instead of + sending getMore messages to the server. + + Can raise ConnectionFailure. + """ + client = self._collection.database.client + # OP_MSG is required to support exhaust cursors with encryption. + if client._encrypter and self._exhaust: + raise InvalidOperation("exhaust cursors do not support auto encryption") + + try: + response = client._run_operation( + operation, self._unpack_response, address=self._address + ) + except OperationFailure as exc: + if exc.code in _CURSOR_CLOSED_ERRORS or self._exhaust: + # Don't send killCursors because the cursor is already closed. + self._killed = True + if exc.timeout: + self._die_no_lock() + else: + self.close() + # If this is a tailable cursor the error is likely + # due to capped collection roll over. Setting + # self._killed to True ensures Cursor.alive will be + # False. No need to re-raise. + if ( + exc.code in _CURSOR_CLOSED_ERRORS + and self._query_flags & _QUERY_OPTIONS["tailable_cursor"] + ): + return + raise + except ConnectionFailure: + self._killed = True + self.close() + raise + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + self.close() + raise + self._address = response.address + if isinstance(response, PinnedResponse): + if not self._sock_mgr: + self._sock_mgr = _ConnectionManager(response.conn, response.more_to_come) # type: ignore[arg-type] + + cmd_name = operation.name + docs = response.docs + if response.from_command: + if cmd_name != "explain": + cursor = docs[0]["cursor"] + self._id = cursor["id"] + if cmd_name == "find": + documents = cursor["firstBatch"] + # Update the namespace used for future getMore commands. + ns = cursor.get("ns") + if ns: + self._dbname, self._collname = ns.split(".", 1) + else: + documents = cursor["nextBatch"] + self._data = deque(documents) + self._retrieved += len(documents) + else: + self._id = 0 + self._data = deque(docs) + self._retrieved += len(docs) + else: + assert isinstance(response.data, _OpReply) + self._id = response.data.cursor_id + self._data = deque(docs) + self._retrieved += response.data.number_returned + + if self._id == 0: + # Don't wait for garbage collection to call __del__, return the + # socket and the session to the pool now. + self.close() + + if self._limit and self._id and self._limit <= self._retrieved: + self.close() + + def _refresh(self) -> int: + """Refreshes the cursor with more data from Mongo. + + Returns the length of self._data after refresh. Will exit early if + self._data is already non-empty. Raises OperationFailure when the + cursor cannot be refreshed due to an error on the query. + """ + if len(self._data) or self._killed: + return len(self._data) + + if not self._session: + self._session = self._collection.database.client._ensure_session() + + if self._id is None: # Query + if (self._min or self._max) and not self._hint: + raise InvalidOperation( + "Passing a 'hint' is required when using the min/max query" + " option to ensure the query utilizes the correct index" + ) + q = self._query_class( + self._query_flags, + self._collection.database.name, + self._collection.name, + self._skip, + self._query_spec(), + self._projection, + self._codec_options, + self._get_read_preference(), + self._limit, + self._batch_size, + self._read_concern, + self._collation, + self._session, + self._collection.database.client, + self._allow_disk_use, + self._exhaust, + ) + self._send_message(q) + elif self._id: # Get More + if self._limit: + limit = self._limit - self._retrieved + if self._batch_size: + limit = min(limit, self._batch_size) + else: + limit = self._batch_size + # Exhaust cursors don't send getMore messages. + g = self._getmore_class( + self._dbname, + self._collname, + limit, + self._id, + self._codec_options, + self._get_read_preference(), + self._session, + self._collection.database.client, + self._max_await_time_ms, + self._sock_mgr, + self._exhaust, + self._comment, + ) + self._send_message(g) + + return len(self._data) + + def rewind(self) -> Cursor[_DocumentType]: + """Rewind this cursor to its unevaluated state. + + Reset this cursor if it has been partially or completely evaluated. + Any options that are present on the cursor will remain in effect. + Future iterating performed on this cursor will cause new queries to + be sent to the server, even if the resultant data has already been + retrieved by this cursor. + """ + self.close() + self._data = deque() + self._id = None + self._address = None + self._retrieved = 0 + self._killed = False + + return self + + def next(self) -> _DocumentType: + """Advance the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + raise StopIteration + if len(self._data) or self._refresh(): + return self._data.popleft() + else: + raise StopIteration + + def _next_batch(self, result: list, total: Optional[int] = None) -> bool: # type: ignore[type-arg] + """Get all or some documents from the cursor.""" + if not self._exhaust_checked: + self._exhaust_checked = True + self._supports_exhaust() + if self._empty: + return False + if len(self._data) or self._refresh(): + if total is None: + result.extend(self._data) + self._data.clear() + else: + for _ in range(min(len(self._data), total)): + result.append(self._data.popleft()) + return True + else: + return False + + def __next__(self) -> _DocumentType: + return self.next() + + def __iter__(self) -> Cursor[_DocumentType]: + return self + + def __enter__(self) -> Cursor[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + @_csot.apply + def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: + """Converts the contents of this cursor to a list more efficiently than ``[doc for doc in cursor]``. + + To use:: + + >>> cursor.to_list() + + Or, to read at most n items from the cursor:: + + >>> cursor.to_list(n) + + If the cursor is empty or has no more results, an empty list will be returned. + + .. versionadded:: 4.9 + """ + res: list[_DocumentType] = [] + remaining = length + if isinstance(length, int) and length < 1: + raise ValueError("to_list() length must be greater than 0") + while self.alive: + if not self._next_batch(res, remaining): + break + if length is not None: + remaining = length - len(res) + if remaining == 0: + break + return res + + +class RawBatchCursor(Cursor, Generic[_DocumentType]): # type: ignore[type-arg] + """A cursor / iterator over raw batches of BSON data from a query result.""" + + _query_class = _RawBatchQuery + _getmore_class = _RawBatchGetMore + + def __init__(self, collection: Collection[_DocumentType], *args: Any, **kwargs: Any) -> None: + """Create a new cursor / iterator over raw batches of BSON data. + + Should not be called directly by application developers - + see :meth:`~pymongo.collection.Collection.find_raw_batches` + instead. + + .. seealso:: The MongoDB documentation on `cursors `_. + """ + super().__init__(collection, *args, **kwargs) + + def _unpack_response( + self, + response: Union[_OpReply, _OpMsg], + cursor_id: Optional[int], + codec_options: CodecOptions[Mapping[str, Any]], + user_fields: Optional[Mapping[str, Any]] = None, + legacy_response: bool = False, + ) -> list[_DocumentOut]: + raw_response = response.raw_response(cursor_id, user_fields=user_fields) + if not legacy_response: + # OP_MSG returns firstBatch/nextBatch documents as a BSON array + # Re-assemble the array of documents into a document stream + _convert_raw_document_lists_to_streams(raw_response[0]) + return cast(List["_DocumentOut"], raw_response) + + def explain(self) -> _DocumentType: + """Returns an explain plan record for this cursor. + + .. seealso:: The MongoDB documentation on `explain `_. + """ + clone = self._clone(deepcopy=True, base=Cursor(self.collection)) + return clone.explain() + + def __getitem__(self, index: Any) -> NoReturn: + raise InvalidOperation("Cannot call __getitem__ on RawBatchCursor") diff --git a/pymongo/synchronous/database.py b/pymongo/synchronous/database.py new file mode 100644 index 0000000000..0d129ba972 --- /dev/null +++ b/pymongo/synchronous/database.py @@ -0,0 +1,1462 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Database level operations.""" +from __future__ import annotations + +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, + cast, + overload, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions +from bson.dbref import DBRef +from bson.timestamp import Timestamp +from pymongo import _csot, common +from pymongo.common import _ecoc_coll_name, _esc_coll_name +from pymongo.database_shared import _check_name, _CodecDocumentType +from pymongo.errors import CollectionInvalid, InvalidOperation +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.synchronous.aggregation import _DatabaseAggregationCommand +from pymongo.synchronous.change_stream import DatabaseChangeStream +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.typings import _CollationIn, _DocumentType, _DocumentTypeArg, _Pipeline + +if TYPE_CHECKING: + import bson + import bson.codec_options + from pymongo.read_concern import ReadConcern + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +class Database(common.BaseObject, Generic[_DocumentType]): + def __init__( + self, + client: MongoClient[_DocumentType], + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> None: + """Get a database by client and name. + + Raises :class:`TypeError` if `name` is not an instance of + :class:`str`. Raises :class:`~pymongo.errors.InvalidName` if + `name` is not a valid database name. + + :param client: A :class:`~pymongo.mongo_client.MongoClient` instance. + :param name: The database name. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) client.codec_options is used. + :param read_preference: The read preference to use. If + ``None`` (the default) client.read_preference is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) client.write_concern is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) client.read_concern is used. + + .. seealso:: The MongoDB documentation on `databases `_. + + .. versionchanged:: 4.0 + Removed the eval, system_js, error, last_status, previous_error, + reset_error_history, authenticate, logout, collection_names, + current_op, add_user, remove_user, profiling_level, + set_profiling_level, and profiling_info methods. + See the :ref:`pymongo4-migration-guide`. + + .. versionchanged:: 3.2 + Added the read_concern option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + :class:`~pymongo.database.Database` no longer returns an instance + of :class:`~pymongo.collection.Collection` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + db['__my_collection__'] + + Not: + + db.__my_collection__ + """ + super().__init__( + codec_options or client.codec_options, + read_preference or client.read_preference, + write_concern or client.write_concern, + read_concern or client.read_concern, + ) + + from pymongo.synchronous.mongo_client import MongoClient + + if not isinstance(name, str): + raise TypeError(f"name must be an instance of str, not {type(name)}") + + if not isinstance(client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") + + if name != "$external": + _check_name(name) + + self._name = name + self._client: MongoClient[_DocumentType] = client + self._timeout = client.options.timeout + + @property + def client(self) -> MongoClient[_DocumentType]: + """The client instance for this :class:`Database`.""" + return self._client + + @property + def name(self) -> str: + """The name of this :class:`Database`.""" + return self._name + + @overload + def with_options( + self, + codec_options: None = None, + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentType]: + ... + + @overload + def with_options( + self, + codec_options: bson.CodecOptions[_DocumentTypeArg], + read_preference: Optional[_ServerMode] = ..., + write_concern: Optional[WriteConcern] = ..., + read_concern: Optional[ReadConcern] = ..., + ) -> Database[_DocumentTypeArg]: + ... + + def with_options( + self, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Database[_DocumentType] | Database[_DocumentTypeArg]: + """Get a clone of this database changing the specified settings. + + >>> db1.read_preference + Primary() + >>> from pymongo.read_preferences import Secondary + >>> db2 = db1.with_options(read_preference=Secondary([{'node': 'analytics'}])) + >>> db1.read_preference + Primary() + >>> db2.read_preference + Secondary(tag_sets=[{'node': 'analytics'}], max_staleness=-1, hedge=None) + + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Collection` + is used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Collection` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Collection` + is used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Collection` + is used. + + .. versionadded:: 3.8 + """ + return Database( + self._client, + self._name, + codec_options or self.codec_options, + read_preference or self.read_preference, + write_concern or self.write_concern, + read_concern or self.read_concern, + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Database): + return self._client == other.client and self._name == other.name + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash((self._client, self._name)) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._client!r}, {self._name!r})" + + def __getattr__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" collection, use database[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> Collection[_DocumentType]: + """Get a collection of this database by name. + + Raises InvalidName if an invalid collection name is used. + + :param name: the name of the collection to get + """ + return Collection(self, name) + + def get_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> Collection[_DocumentType]: + """Get a :class:`~pymongo.collection.Collection` with the given name + and options. + + Useful for creating a :class:`~pymongo.collection.Collection` with + different codec options, read preference, and/or write concern from + this :class:`Database`. + + >>> db.read_preference + Primary() + >>> coll1 = db.test + >>> coll1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> coll2 = db.get_collection( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> coll2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the collection - a string. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + """ + return Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + + def _get_encrypted_fields( + self, kwargs: Mapping[str, Any], coll_name: str, ask_db: bool + ) -> Optional[Mapping[str, Any]]: + encrypted_fields = kwargs.get("encryptedFields") + if encrypted_fields: + return cast(Mapping[str, Any], deepcopy(encrypted_fields)) + if ( + self.client.options.auto_encryption_opts + and self.client.options.auto_encryption_opts._encrypted_fields_map + and self.client.options.auto_encryption_opts._encrypted_fields_map.get( + f"{self.name}.{coll_name}" + ) + ): + return cast( + Mapping[str, Any], + deepcopy( + self.client.options.auto_encryption_opts._encrypted_fields_map[ + f"{self.name}.{coll_name}" + ] + ), + ) + if ask_db and self.client.options.auto_encryption_opts: + options = self[coll_name].options() + if options.get("encryptedFields"): + return cast(Mapping[str, Any], deepcopy(options["encryptedFields"])) + return None + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'Database' object is not iterable") + + next = __next__ + + def __bool__(self) -> NoReturn: + raise NotImplementedError( + f"{type(self).__name__} objects do not implement truth " + "value testing or bool(). Please compare " + "with None instead: database is not None" + ) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> DatabaseChangeStream[_DocumentType]: + """Watch changes on this database. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.DatabaseChangeStream` cursor which + iterates over changes on all collections in this database. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with db.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.DatabaseChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.DatabaseChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with db.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.DatabaseChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = DatabaseChangeStream( + self, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @_csot.apply + def create_collection( + self, + name: str, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + session: Optional[ClientSession] = None, + check_exists: Optional[bool] = True, + **kwargs: Any, + ) -> Collection[_DocumentType]: + """Create a new :class:`~pymongo.collection.Collection` in this + database. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.CollectionInvalid` will be + raised if the collection already exists. + + :param name: the name of the collection to create + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`Database` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`Database` is used. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`Database` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`Database` is + used. + :param collation: An instance of + :class:`~pymongo.collation.Collation`. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param check_exists: if True (the default), send a listCollections command to + check if the collection already exists before creation. + :param kwargs: additional keyword arguments will + be passed as options for the `create collection command`_ + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. Valid options include, but are not + limited to: + + - ``size`` (int): desired initial size for the collection (in + bytes). For capped collections this size is the max + size of the collection. + - ``capped`` (bool): if True, this is a capped collection + - ``max`` (int): maximum number of objects if capped (optional) + - ``timeseries`` (dict): a document specifying configuration options for + timeseries collections + - ``expireAfterSeconds`` (int): the number of seconds after which a + document in a timeseries collection expires + - ``validator`` (dict): a document specifying validation rules or expressions + for the collection + - ``validationLevel`` (str): how strictly to apply the + validation rules to existing documents during an update. The default level + is "strict" + - ``validationAction`` (str): whether to "error" on invalid documents + (the default) or just "warn" about the violations but allow invalid + documents to be inserted + - ``indexOptionDefaults`` (dict): a document specifying a default configuration + for indexes when creating a collection + - ``viewOn`` (str): the name of the source collection or view from which + to create the view + - ``pipeline`` (list): a list of aggregation pipeline stages + - ``comment`` (str): a user-provided comment to attach to this command. + This option is only supported on MongoDB >= 4.4. + - ``encryptedFields`` (dict): **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + - ``clusteredIndex`` (dict): Document that specifies the clustered index + configuration. It must have the following form:: + + { + // key pattern must be {_id: 1} + key: , // required + unique: , // required, must be `true` + name: , // optional, otherwise automatically generated + v: , // optional, must be `2` if provided + } + - ``changeStreamPreAndPostImages`` (dict): a document with a boolean field ``enabled`` for + enabling pre- and post-images. + + .. versionchanged:: 4.2 + Added the ``check_exists``, ``clusteredIndex``, and ``encryptedFields`` parameters. + + .. versionchanged:: 3.11 + This method is now supported inside multi-document transactions + with MongoDB 4.4+. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Added the collation option. + + .. versionchanged:: 3.0 + Added the codec_options, read_preference, and write_concern options. + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + """ + encrypted_fields = self._get_encrypted_fields(kwargs, name, False) + if encrypted_fields: + common.validate_is_mapping("encryptedFields", encrypted_fields) + kwargs["encryptedFields"] = encrypted_fields + + clustered_index = kwargs.get("clusteredIndex") + if clustered_index: + common.validate_is_mapping("clusteredIndex", clustered_index) + + with self._client._tmp_session(session) as s: + if s and not s.in_transaction: + s._leave_alive = True + # Skip this check in a transaction where listCollections is not + # supported. + if ( + check_exists + and (not s or not s.in_transaction) + and name in self._list_collection_names(filter={"name": name}, session=s) + ): + raise CollectionInvalid("collection %s already exists" % name) + if s: + s._leave_alive = False + coll = Collection( + self, + name, + False, + codec_options, + read_preference, + write_concern, + read_concern, + ) + coll._create(kwargs, s) + + return coll + + def aggregate( + self, pipeline: _Pipeline, session: Optional[ClientSession] = None, **kwargs: Any + ) -> CommandCursor[_DocumentType]: + """Perform a database-level aggregation. + + See the `aggregation pipeline`_ documentation for a list of stages + that are supported. + + .. code-block:: python + + # Lists all operations currently running on the server. + with client.admin.aggregate([{"$currentOp": {}}]) as cursor: + for operation in cursor: + print(operation) + + The :meth:`aggregate` method obeys the :attr:`read_preference` of this + :class:`Database`, except when ``$out`` or ``$merge`` are used, in + which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY` + is used. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement. + + .. note:: This method does not support the 'explain' option. Please + use :meth:`~pymongo.database.Database.command` instead. + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this collection is automatically applied to this operation. + + :param pipeline: a list of aggregation pipeline stages + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param kwargs: extra `aggregate command`_ parameters. + + All optional `aggregate command`_ parameters should be passed as + keyword arguments to this method. Valid options include, but are not + limited to: + + - `allowDiskUse` (bool): Enables writing to temporary files. When set + to True, aggregation stages can write data to the _tmp subdirectory + of the --dbpath directory. The default is False. + - `maxTimeMS` (int): The maximum amount of time to allow the operation + to run in milliseconds. + - `batchSize` (int): The maximum number of documents to return per + batch. Ignored if the connected mongod or mongos does not support + returning aggregate results using a cursor. + - `collation` (optional): An instance of + :class:`~pymongo.collation.Collation`. + - `let` (dict): A dict of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. ``"$$var"``). This option is + only supported on MongoDB >= 5.0. + + :return: A :class:`~pymongo.command_cursor.CommandCursor` over the result + set. + + .. versionadded:: 3.9 + + .. _aggregation pipeline: + https://mongodb.com/docs/manual/reference/operator/aggregation-pipeline + + .. _aggregate command: + https://mongodb.com/docs/manual/reference/command/aggregate + """ + with self.client._tmp_session(session) as s: + cmd = _DatabaseAggregationCommand( + self, + CommandCursor, + pipeline, + kwargs, + user_fields={"cursor": {"firstBatch": 1}}, + ) + return self.client._retryable_read( + cmd.get_cursor, + cmd.get_read_preference(s), # type: ignore[arg-type] + s, + retryable=not cmd._performs_write, + operation=_Op.AGGREGATE, + ) + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[dict[str, Any]] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[_CodecDocumentType] = ..., + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + def _command( + self, + conn: Connection, + command: Union[str, MutableMapping[str, Any]], + value: int = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: Union[ + CodecOptions[dict[str, Any]], CodecOptions[_CodecDocumentType] + ] = DEFAULT_CODEC_OPTIONS, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + session: Optional[ClientSession] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Internal command helper.""" + if isinstance(command, str): + command = {command: value} + + command.update(kwargs) + with self._client._tmp_session(session) as s: + return conn.command( + self._name, + command, + read_preference, + codec_options, # type: ignore[arg-type] + check, + allowable_errors, + write_concern=write_concern, + parse_write_concern_error=parse_write_concern_error, + session=s, + client=self._client, + ) + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: None = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> dict[str, Any]: + ... + + @overload + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: CodecOptions[_CodecDocumentType] = ..., + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> _CodecDocumentType: + ... + + @_csot.apply + def command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[bson.codec_options.CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Union[dict[str, Any], _CodecDocumentType]: + """Issue a MongoDB command. + + Send command `command` to the database and return the + response. If `command` is an instance of :class:`str` + then the command {`command`: `value`} will be sent. + Otherwise, `command` must be an instance of + :class:`dict` and will be sent as is. + + Any additional keyword arguments will be added to the final + command document before it is sent. + + For example, a command like ``{buildinfo: 1}`` can be sent + using: + + >>> db.command("buildinfo") + OR + >>> db.command({"buildinfo": 1}) + + For a command where the value matters, like ``{count: + collection_name}`` we can do: + + >>> db.command("count", collection_name) + OR + >>> db.command({"count": collection_name}) + + For commands that take additional arguments we can use + kwargs. So ``{count: collection_name, query: query}`` becomes: + + >>> db.command("count", collection_name, query=query) + OR + >>> db.command({"count": collection_name, "query": query}) + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should be done with this in mind. + + :param value: value to use for the command verb when + `command` is passed as a string + :param check: check the response for errors, raising + :class:`~pymongo.errors.OperationFailure` if there are any + :param allowable_errors: if `check` is ``True``, error messages + in this list will be ignored by error-checking + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.0 + Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`, + and `secondary_acceptable_latency_ms` option. + Removed `compile_re` option: PyMongo now always represents BSON + regular expressions as :class:`~bson.regex.Regex` objects. Use + :meth:`~bson.regex.Regex.try_compile` to attempt to convert from a + BSON regular expression to a Python regular expression object. + Added the ``codec_options`` parameter. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + opts = codec_options or DEFAULT_CODEC_OPTIONS + if comment is not None: + kwargs["comment"] = comment + + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + if read_preference is None: + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, session, operation=command_name) as ( + connection, + read_preference, + ): + return self._command( + connection, + command, + value, + check, + allowable_errors, + read_preference, + opts, # type: ignore[arg-type] + session=session, + **kwargs, + ) + + @_csot.apply + def cursor_command( + self, + command: Union[str, MutableMapping[str, Any]], + value: Any = 1, + read_preference: Optional[_ServerMode] = None, + codec_options: Optional[CodecOptions[_CodecDocumentType]] = None, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + max_await_time_ms: Optional[int] = None, + **kwargs: Any, + ) -> CommandCursor[_DocumentType]: + """Issue a MongoDB command and parse the response as a cursor. + + If the response from the server does not include a cursor field, an error will be thrown. + + Otherwise, behaves identically to issuing a normal MongoDB command. + + :param command: document representing the command to be issued, + or the name of the command (for simple commands only). + + .. note:: the order of keys in the `command` document is + significant (the "verb" must come first), so commands + which require multiple keys (e.g. `findandmodify`) + should use an instance of :class:`~bson.son.SON` or + a string and kwargs instead of a Python `dict`. + + :param value: value to use for the command verb when + `command` is passed as a string + :param read_preference: The read preference for this + operation. See :mod:`~pymongo.read_preferences` for options. + If the provided `session` is in a transaction, defaults to the + read preference configured for the transaction. + Otherwise, defaults to + :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. + :param codec_options: A :class:`~bson.codec_options.CodecOptions` + instance. + :param session: A + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to future getMores for this + command. + :param max_await_time_ms: The number of ms to wait for more data on future getMores for this command. + :param kwargs: additional keyword arguments will + be added to the command document before it is sent + + .. note:: :meth:`command` does **not** obey this Database's + :attr:`read_preference` or :attr:`codec_options`. You must use the + ``read_preference`` and ``codec_options`` parameters instead. + + .. note:: :meth:`command` does **not** apply any custom TypeDecoders + when decoding the command response. + + .. note:: If this client has been configured to use MongoDB Stable + API (see `versioned API `_), then :meth:`command` will + automatically add API versioning options to the given command. + Explicitly adding API versioning options in the command and + declaring an API version on the client is not supported. + + .. seealso:: The MongoDB documentation on `commands `_. + """ + if isinstance(command, str): + command_name = command + else: + command_name = next(iter(command)) + + with self._client._tmp_session(session) as tmp_session: + opts = codec_options or DEFAULT_CODEC_OPTIONS + + if read_preference is None: + read_preference = ( + tmp_session and tmp_session._txn_read_preference() + ) or ReadPreference.PRIMARY + with self._client._conn_for_reads(read_preference, tmp_session, command_name) as ( + conn, + read_preference, + ): + response = self._command( + conn, + command, + value, + True, + None, + read_preference, + opts, + session=tmp_session, + **kwargs, + ) + coll = self.get_collection("$cmd", read_preference=read_preference) + if response.get("cursor"): + cmd_cursor = CommandCursor( + coll, + response["cursor"], + conn.address, + max_await_time_ms=max_await_time_ms, + session=tmp_session, + comment=comment, + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + else: + raise InvalidOperation("Command does not return a cursor.") + + def _retryable_read_command( + self, + command: Union[str, MutableMapping[str, Any]], + operation: str, + session: Optional[ClientSession] = None, + ) -> dict[str, Any]: + """Same as command but used for retryable read commands.""" + read_preference = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> dict[str, Any]: + return self._command( + conn, + command, + read_preference=read_preference, + session=session, + ) + + return self._client._retryable_read(_cmd, read_preference, session, operation) + + def _list_collections( + self, + conn: Connection, + session: Optional[ClientSession], + read_preference: _ServerMode, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Internal listCollections helper.""" + coll = cast( + Collection[MutableMapping[str, Any]], + self.get_collection("$cmd", read_preference=read_preference), + ) + cmd = {"listCollections": 1, "cursor": {}} + cmd.update(kwargs) + with self._client._tmp_session(session) as tmp_session: + cursor = ( + self._command(conn, cmd, read_preference=read_preference, session=tmp_session) + )["cursor"] + cmd_cursor = CommandCursor( + coll, + cursor, + conn.address, + session=tmp_session, + comment=cmd.get("comment"), + ) + cmd_cursor._maybe_pin_connection(conn) + return cmd_cursor + + def _list_collections_helper( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + if filter is not None: + kwargs["filter"] = filter + read_pref = (session and session._txn_read_preference()) or ReadPreference.PRIMARY + if comment is not None: + kwargs["comment"] = comment + + def _cmd( + session: Optional[ClientSession], + _server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> CommandCursor[MutableMapping[str, Any]]: + return self._list_collections(conn, session, read_preference=read_preference, **kwargs) + + return self._client._retryable_read( + _cmd, read_pref, session, operation=_Op.LIST_COLLECTIONS + ) + + def list_collections( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[MutableMapping[str, Any]]: + """Get a cursor over the collections of this database. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with database.list_collections() as cursor: + for collection in cursor: + print(collection) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_collections_helper(session, filter, comment, **kwargs) + + def _list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + if comment is not None: + kwargs["comment"] = comment + if filter is None: + kwargs["nameOnly"] = True + + else: + # The enumerate collections spec states that "drivers MUST NOT set + # nameOnly if a filter specifies any keys other than name." + common.validate_is_mapping("filter", filter) + kwargs["filter"] = filter + if not filter or (len(filter) == 1 and "name" in filter): + kwargs["nameOnly"] = True + + return [ + result["name"] for result in self._list_collections_helper(session=session, **kwargs) + ] + + def list_collection_names( + self, + session: Optional[ClientSession] = None, + filter: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> list[str]: + """Get a list of all the collection names in this database. + + For example, to list all non-system collections:: + + filter = {"name": {"$regex": r"^(?!system\\.)"}} + db.list_collection_names(filter=filter) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param filter: A query document to filter the list of + collections returned from the listCollections command. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listCollections command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + .. versionchanged:: 3.8 + Added the ``filter`` and ``**kwargs`` parameters. + + .. versionadded:: 3.6 + """ + return self._list_collection_names(session, filter, comment, **kwargs) + + def _drop_helper( + self, name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None + ) -> dict[str, Any]: + command = {"drop": name} + if comment is not None: + command["comment"] = comment + + with self._client._conn_for_writes(session, operation=_Op.DROP) as connection: + return self._command( + connection, + command, + allowable_errors=["ns not found", 26], + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def drop_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + encrypted_fields: Optional[Mapping[str, Any]] = None, + ) -> dict[str, Any]: + """Drop a collection. + + :param name_or_collection: the name of a collection to drop or the + collection object itself + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for + Queryable Encryption. For example:: + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + + } + + + .. note:: The :attr:`~pymongo.database.Database.write_concern` of + this database is automatically applied to this operation. + + .. versionchanged:: 4.2 + Added ``encrypted_fields`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. versionchanged:: 3.4 + Apply this database's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError(f"name_or_collection must be an instance of str, not {type(name)}") + encrypted_fields = self._get_encrypted_fields( + {"encryptedFields": encrypted_fields}, + name, + True, + ) + if encrypted_fields: + common.validate_is_mapping("encrypted_fields", encrypted_fields) + self._drop_helper( + _esc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + self._drop_helper( + _ecoc_coll_name(encrypted_fields, name), session=session, comment=comment + ) + + return self._drop_helper(name, session, comment) + + def validate_collection( + self, + name_or_collection: Union[str, Collection[_DocumentTypeArg]], + scandata: bool = False, + full: bool = False, + session: Optional[ClientSession] = None, + background: Optional[bool] = None, + comment: Optional[Any] = None, + ) -> dict[str, Any]: + """Validate a collection. + + Returns a dict of validation info. Raises CollectionInvalid if + validation fails. + + See also the MongoDB documentation on the `validate command`_. + + :param name_or_collection: A Collection object or the name of a + collection to validate. + :param scandata: Do extra checks beyond checking the overall + structure of the collection. + :param full: Have the server do a more thorough scan of the + collection. Use with `scandata` for a thorough scan + of the structure of the collection and the individual + documents. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param background: A boolean flag that determines whether + the command runs in the background. Requires MongoDB 4.4+. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.11 + Added ``background`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. _validate command: https://mongodb.com/docs/manual/reference/command/validate/ + """ + name = name_or_collection + if isinstance(name, Collection): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_collection must be an instance of str or Collection, not {type(name)}" + ) + cmd = {"validate": name, "scandata": scandata, "full": full} + if comment is not None: + cmd["comment"] = comment + + if background is not None: + cmd["background"] = background + + result = self.command(cmd, session=session) + + valid = True + # Pre 1.9 results + if "result" in result: + info = result["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + # Sharded results + elif "raw" in result: + for _, res in result["raw"].items(): + if "result" in res: + info = res["result"] + if info.find("exception") != -1 or info.find("corrupt") != -1: + raise CollectionInvalid(f"{name} invalid: {info}") + elif not res.get("valid", False): + valid = False + break + # Post 1.9 non-sharded results. + elif not result.get("valid", False): + valid = False + + if not valid: + raise CollectionInvalid(f"{name} invalid: {result!r}") + + return result + + def dereference( + self, + dbref: DBRef, + session: Optional[ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> Optional[_DocumentType]: + """Dereference a :class:`~bson.dbref.DBRef`, getting the + document it points to. + + Raises :class:`TypeError` if `dbref` is not an instance of + :class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if + the reference does not point to a valid document. Raises + :class:`ValueError` if `dbref` has a database specified that + is different from the current database. + + :param dbref: the reference + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: any additional keyword arguments + are the same as the arguments to + :meth:`~pymongo.collection.Collection.find`. + + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + if not isinstance(dbref, DBRef): + raise TypeError("cannot dereference a %s" % type(dbref)) + if dbref.database is not None and dbref.database != self._name: + raise ValueError( + "trying to dereference a DBRef that points to " + f"another database ({dbref.database!r} not {self._name!r})" + ) + return self[dbref.collection].find_one( + {"_id": dbref.id}, session=session, comment=comment, **kwargs + ) diff --git a/pymongo/synchronous/encryption.py b/pymongo/synchronous/encryption.py new file mode 100644 index 0000000000..2d666b9763 --- /dev/null +++ b/pymongo/synchronous/encryption.py @@ -0,0 +1,1276 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Support for explicit client-side field level encryption.""" +from __future__ import annotations + +import contextlib +import enum +import socket +import time as time # noqa: PLC0414 # needed in sync version +import uuid +import weakref +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generator, + Generic, + Iterator, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +try: + from pymongocrypt.errors import MongoCryptError # type:ignore[import] + from pymongocrypt.mongocrypt import MongoCryptOptions # type:ignore[import] + from pymongocrypt.synchronous.auto_encrypter import AutoEncrypter # type:ignore[import] + from pymongocrypt.synchronous.explicit_encrypter import ( # type:ignore[import] + ExplicitEncrypter, + ) + from pymongocrypt.synchronous.state_machine import ( # type:ignore[import] + MongoCryptCallback, + ) + + _HAVE_PYMONGOCRYPT = True +except ImportError: + _HAVE_PYMONGOCRYPT = False + MongoCryptCallback = object + +from bson import _dict_to_bson, decode, encode +from bson.binary import STANDARD, UUID_SUBTYPE, Binary +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument, _inflate_bson +from pymongo import _csot +from pymongo.common import CONNECT_TIMEOUT +from pymongo.daemon import _spawn_daemon +from pymongo.encryption_options import ( + AutoEncryptionOpts, + RangeOpts, + TextOpts, + check_min_pymongocrypt, +) +from pymongo.errors import ( + ConfigurationError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + NetworkTimeout, + ServerSelectionTimeoutError, +) +from pymongo.helpers_shared import _get_timeout_details +from pymongo.network_layer import sendall +from pymongo.operations import UpdateOne +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + _configured_socket, + _raise_connection_failure, +) +from pymongo.read_concern import ReadConcern +from pymongo.results import BulkWriteResult, DeleteResult +from pymongo.ssl_support import BLOCKING_IO_ERRORS, get_ssl_context +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.cursor import Cursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.typings import _DocumentType, _DocumentTypeArg +from pymongo.uri_parser_shared import _parse_kms_tls_options, parse_host +from pymongo.write_concern import WriteConcern + +if TYPE_CHECKING: + from pymongocrypt.mongocrypt import MongoCryptKmsContext + + from pymongo.pyopenssl_context import _sslConn + from pymongo.typings import _Address + + +_IS_SYNC = True + +_HTTPS_PORT = 443 +_KMS_CONNECT_TIMEOUT = CONNECT_TIMEOUT # CDRIVER-3262 redefined this value to CONNECT_TIMEOUT +_MONGOCRYPTD_TIMEOUT_MS = 10000 + +_DATA_KEY_OPTS: CodecOptions[dict[str, Any]] = CodecOptions( + document_class=Dict[str, Any], uuid_representation=STANDARD +) +# Use RawBSONDocument codec options to avoid needlessly decoding +# documents from the key vault. +_KEY_VAULT_OPTS = CodecOptions(document_class=RawBSONDocument) + + +def _connect_kms(address: _Address, opts: PoolOptions) -> Union[socket.socket, _sslConn]: + try: + return _configured_socket(address, opts) + except Exception as exc: + _raise_connection_failure(address, exc, timeout_details=_get_timeout_details(opts)) + + +@contextlib.contextmanager +def _wrap_encryption_errors() -> Iterator[None]: + """Context manager to wrap encryption related errors.""" + try: + yield + except BSONError: + # BSON encoding/decoding errors are unrelated to encryption so + # we should propagate them unchanged. + raise + except Exception as exc: + raise EncryptionError(exc) from exc + + +class _EncryptionIO(MongoCryptCallback): # type: ignore[misc] + def __init__( + self, + client: Optional[MongoClient[_DocumentTypeArg]], + key_vault_coll: Collection[_DocumentTypeArg], + mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], + opts: AutoEncryptionOpts, + ): + """Internal class to perform I/O on behalf of pymongocrypt.""" + self.client_ref: Any + # Use a weak ref to break reference cycle. + if client is not None: + self.client_ref = weakref.ref(client) + else: + self.client_ref = None + self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast( + Collection[RawBSONDocument], + key_vault_coll.with_options( + codec_options=_KEY_VAULT_OPTS, + read_concern=ReadConcern(level="majority"), + write_concern=WriteConcern(w="majority"), + ), + ) + self.mongocryptd_client = mongocryptd_client + self.opts = opts + self._spawned = False + self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC) + + def kms_request(self, kms_context: MongoCryptKmsContext) -> None: + """Complete a KMS request. + + :param kms_context: A :class:`MongoCryptKmsContext`. + + :return: None + """ + endpoint = kms_context.endpoint + message = kms_context.message + provider = kms_context.kms_provider + ctx = self._kms_ssl_contexts.get(provider) + if ctx is None: + # Enable strict certificate verification, OCSP, match hostname, and + # SNI using the system default CA certificates. + ctx = get_ssl_context( + None, # certfile + None, # passphrase + None, # ca_certs + None, # crlfile + False, # allow_invalid_certificates + False, # allow_invalid_hostnames + False, # disable_ocsp_endpoint_check + _IS_SYNC, + ) + # CSOT: set timeout for socket creation. + connect_timeout = max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0.001) + opts = PoolOptions( + connect_timeout=connect_timeout, + socket_timeout=connect_timeout, + ssl_context=ctx, + ) + address = parse_host(endpoint, _HTTPS_PORT) + sleep_u = kms_context.usleep + if sleep_u: + sleep_sec = float(sleep_u) / 1e6 + time.sleep(sleep_sec) + try: + conn = _connect_kms(address, opts) + try: + sendall(conn, message) + while kms_context.bytes_needed > 0: + # CSOT: update timeout. + conn.settimeout(max(_csot.clamp_remaining(_KMS_CONNECT_TIMEOUT), 0)) + data: memoryview | bytes + if _IS_SYNC: + data = conn.recv(kms_context.bytes_needed) + else: + from pymongo.network_layer import ( # type: ignore[attr-defined] + receive_data_socket, + ) + + data = receive_data_socket(conn, kms_context.bytes_needed) + if not data: + raise OSError("KMS connection closed") + kms_context.feed(data) + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + # Wrap I/O errors in PyMongo exceptions. + if isinstance(exc, BLOCKING_IO_ERRORS): + exc = socket.timeout("timed out") + # Async raises an OSError instead of returning empty bytes. + if isinstance(exc, OSError): + msg_prefix = "KMS connection closed" + else: + msg_prefix = None + _raise_connection_failure( + address, exc, msg_prefix=msg_prefix, timeout_details=_get_timeout_details(opts) + ) + finally: + conn.close() + except MongoCryptError: + raise # Propagate MongoCryptError errors directly. + except Exception as exc: + remaining = _csot.remaining() + if isinstance(exc, NetworkTimeout) or (remaining is not None and remaining <= 0): + raise + # Mark this attempt as failed and defer to libmongocrypt to retry. + try: + kms_context.fail() + except MongoCryptError as final_err: + exc = MongoCryptError( + f"{final_err}, last attempt failed with: {exc}", final_err.code + ) + raise exc from final_err + + def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: + """Get the collection info for a namespace. + + The returned collection info is passed to libmongocrypt which reads + the JSON schema. + + :param database: The database on which to run listCollections. + :param filter: The filter to pass to listCollections. + + :return: All documents from the listCollections command response as BSON. + """ + with self.client_ref()[database].list_collections(filter=RawBSONDocument(filter)) as cursor: + return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) for doc in cursor] + + def spawn(self) -> None: + """Spawn mongocryptd. + + Note this method is thread safe; at most one mongocryptd will start + successfully. + """ + self._spawned = True + args = [self.opts._mongocryptd_spawn_path or "mongocryptd"] + args.extend(self.opts._mongocryptd_spawn_args) + _spawn_daemon(args) + + def mark_command(self, database: str, cmd: bytes) -> bytes | memoryview: + """Mark a command for encryption. + + :param database: The database on which to run this command. + :param cmd: The BSON command to run. + + :return: The marked command response from mongocryptd. + """ + if not self._spawned and not self.opts._mongocryptd_bypass_spawn: + self.spawn() + # Database.command only supports mutable mappings so we need to decode + # the raw BSON command first. + inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) + assert self.mongocryptd_client is not None + try: + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + except ServerSelectionTimeoutError: + if self.opts._mongocryptd_bypass_spawn: + raise + self.spawn() + res = self.mongocryptd_client[database].command( + inflated_cmd, codec_options=DEFAULT_RAW_BSON_OPTIONS + ) + return res.raw + + def fetch_keys(self, filter: bytes) -> Generator[bytes | memoryview, None]: + """Yields one or more keys from the key vault. + + :param filter: The filter to pass to find. + + :return: A generator which yields the requested keys from the key vault. + """ + assert self.key_vault_coll is not None + with self.key_vault_coll.find(RawBSONDocument(filter)) as cursor: + for key in cursor: + yield key.raw + + def insert_data_key(self, data_key: bytes) -> Binary: + """Insert a data key into the key vault. + + :param data_key: The data key document to insert. + + :return: The _id of the inserted data key document. + """ + raw_doc = RawBSONDocument(data_key, _KEY_VAULT_OPTS) + data_key_id = raw_doc.get("_id") + if not isinstance(data_key_id, Binary) or data_key_id.subtype != UUID_SUBTYPE: + raise TypeError( + f"data_key _id must be Binary with a UUID subtype, not {type(data_key_id)}" + ) + + assert self.key_vault_coll is not None + self.key_vault_coll.insert_one(raw_doc) + return data_key_id + + def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: + """Encode a document to BSON. + + A document can be any mapping type (like :class:`dict`). + + :param doc: mapping type representing a document + + :return: The encoded BSON bytes. + """ + return encode(doc) + + def close(self) -> None: + """Release resources. + + Note it is not safe to call this method from __del__ or any GC hooks. + """ + self.client_ref = None + self.key_vault_coll = None + if self.mongocryptd_client: + self.mongocryptd_client.close() + self.mongocryptd_client = None + + +class RewrapManyDataKeyResult: + """Result object returned by a :meth:`~ClientEncryption.rewrap_many_data_key` operation. + + .. versionadded:: 4.2 + """ + + def __init__(self, bulk_write_result: Optional[BulkWriteResult] = None) -> None: + self._bulk_write_result = bulk_write_result + + @property + def bulk_write_result(self) -> Optional[BulkWriteResult]: + """The result of the bulk write operation used to update the key vault + collection with one or more rewrapped data keys. If + :meth:`~ClientEncryption.rewrap_many_data_key` does not find any matching keys to rewrap, + no bulk write operation will be executed and this field will be + ``None``. + """ + return self._bulk_write_result + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._bulk_write_result!r})" + + +class _Encrypter: + """Encrypts and decrypts MongoDB commands. + + This class is used to support automatic encryption and decryption of + MongoDB commands. + """ + + def __init__(self, client: MongoClient[_DocumentTypeArg], opts: AutoEncryptionOpts): + """Create a _Encrypter for a client. + + :param client: The encrypted MongoClient. + :param opts: The encrypted client's :class:`AutoEncryptionOpts`. + """ + if opts._schema_map is None: + schema_map = None + else: + schema_map = _dict_to_bson(opts._schema_map, False, _DATA_KEY_OPTS) + + if opts._encrypted_fields_map is None: + encrypted_fields_map = None + else: + encrypted_fields_map = _dict_to_bson(opts._encrypted_fields_map, False, _DATA_KEY_OPTS) + self._bypass_auto_encryption = opts._bypass_auto_encryption + self._internal_client = None + # parsing kms_ssl_contexts here so that parsing errors will be raised before internal clients are created + opts._kms_ssl_contexts(_IS_SYNC) + + def _get_internal_client( + encrypter: _Encrypter, mongo_client: MongoClient[_DocumentTypeArg] + ) -> MongoClient[_DocumentTypeArg]: + if mongo_client.options.pool_options.max_pool_size is None: + # Unlimited pool size, use the same client. + return mongo_client + # Else - limited pool size, use an internal client. + if encrypter._internal_client is not None: + return encrypter._internal_client + internal_client = mongo_client._duplicate(minPoolSize=0, auto_encryption_opts=None) + encrypter._internal_client = internal_client + return internal_client + + if opts._key_vault_client is not None: + key_vault_client = opts._key_vault_client + else: + key_vault_client = _get_internal_client(self, client) + + if opts._bypass_auto_encryption: + metadata_client = None + else: + metadata_client = _get_internal_client(self, client) + + db, coll = opts._key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + mongocryptd_client: MongoClient[Mapping[str, Any]] = MongoClient( + opts._mongocryptd_uri, connect=False, serverSelectionTimeoutMS=_MONGOCRYPTD_TIMEOUT_MS + ) + + io_callbacks = _EncryptionIO( # type:ignore[misc] + metadata_client, + key_vault_coll, # type:ignore[arg-type] + mongocryptd_client, + opts, + ) + self._auto_encrypter = AutoEncrypter( + io_callbacks, + _create_mongocrypt_options( + kms_providers=opts._kms_providers, + schema_map=schema_map, + crypt_shared_lib_path=opts._crypt_shared_lib_path, + crypt_shared_lib_required=opts._crypt_shared_lib_required, + bypass_encryption=opts._bypass_auto_encryption, + encrypted_fields_map=encrypted_fields_map, + bypass_query_analysis=opts._bypass_query_analysis, + key_expiration_ms=opts._key_expiration_ms, + ), + ) + self._closed = False + + def encrypt( + self, database: str, cmd: Mapping[str, Any], codec_options: CodecOptions[_DocumentTypeArg] + ) -> dict[str, Any]: + """Encrypt a MongoDB command. + + :param database: The database for this command. + :param cmd: A command document. + :param codec_options: The CodecOptions to use while encoding `cmd`. + + :return: The encrypted command to execute. + """ + self._check_closed() + encoded_cmd = _dict_to_bson(cmd, False, codec_options) + with _wrap_encryption_errors(): + encrypted_cmd = self._auto_encrypter.encrypt(database, encoded_cmd) + # TODO: PYTHON-1922 avoid decoding the encrypted_cmd. + return _inflate_bson(encrypted_cmd, DEFAULT_RAW_BSON_OPTIONS) + + def decrypt(self, response: bytes | memoryview) -> Optional[bytes]: + """Decrypt a MongoDB command response. + + :param response: A MongoDB command response as BSON. + + :return: The decrypted command response. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast(bytes, self._auto_encrypter.decrypt(response)) + + def _check_closed(self) -> None: + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + + def close(self) -> None: + """Cleanup resources.""" + self._closed = True + self._auto_encrypter.close() + if self._internal_client: + self._internal_client.close() + self._internal_client = None + + +class Algorithm(str, enum.Enum): + """An enum that defines the supported encryption algorithms.""" + + AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic = "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + """AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic.""" + AEAD_AES_256_CBC_HMAC_SHA_512_Random = "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + """AEAD_AES_256_CBC_HMAC_SHA_512_Random.""" + INDEXED = "Indexed" + """Indexed. + + .. versionadded:: 4.2 + """ + UNINDEXED = "Unindexed" + """Unindexed. + + .. versionadded:: 4.2 + """ + RANGE = "Range" + """Range. + + .. versionadded:: 4.9 + """ + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - RangePreview. + + .. note:: Support for RangePreview is deprecated. Use :attr:`Algorithm.RANGE` instead. + + .. versionadded:: 4.4 + """ + TEXTPREVIEW = "TextPreview" + """**BETA** - TextPreview. + + .. versionadded:: 4.15 + """ + + +class QueryType(str, enum.Enum): + """An enum that defines the supported values for explicit encryption query_type. + + .. versionadded:: 4.2 + """ + + EQUALITY = "equality" + """Used to encrypt a value for an equality query.""" + + RANGE = "range" + """Used to encrypt a value for a range query. + + .. versionadded:: 4.9 + """ + + RANGEPREVIEW = "RangePreview" + """**DEPRECATED** - Used to encrypt a value for a rangePreview query. + + .. note:: Support for RangePreview is deprecated. Use :attr:`QueryType.RANGE` instead. + + .. versionadded:: 4.4 + """ + + PREFIXPREVIEW = "prefixPreview" + """**BETA** - Used to encrypt a value for a prefixPreview query. + + .. versionadded:: 4.15 + """ + + SUFFIXPREVIEW = "suffixPreview" + """**BETA** - Used to encrypt a value for a suffixPreview query. + + .. versionadded:: 4.15 + """ + + SUBSTRINGPREVIEW = "substringPreview" + """**BETA** - Used to encrypt a value for a substringPreview query. + + .. versionadded:: 4.15 + """ + + +def _create_mongocrypt_options(**kwargs: Any) -> MongoCryptOptions: + # For compat with pymongocrypt <1.13, avoid setting the default key_expiration_ms. + if kwargs.get("key_expiration_ms") is None: + kwargs.pop("key_expiration_ms", None) + return MongoCryptOptions(**kwargs, enable_multiple_collinfo=True) + + +class ClientEncryption(Generic[_DocumentType]): + """Explicit client-side field level encryption.""" + + def __init__( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient[_DocumentTypeArg], + codec_options: CodecOptions[_DocumentTypeArg], + kms_tls_options: Optional[Mapping[str, Any]] = None, + key_expiration_ms: Optional[int] = None, + ) -> None: + """Explicit client-side field level encryption. + + The ClientEncryption class encapsulates explicit operations on a key + vault collection that cannot be done directly on a MongoClient. Similar + to configuring auto encryption on a MongoClient, it is constructed with + a MongoClient (to a MongoDB cluster containing the key vault + collection), KMS provider configuration, and keyVaultNamespace. It + provides an API for explicitly encrypting and decrypting values, and + creating data keys. It does not provide an API to query keys from the + key vault collection, as this can be done directly on the MongoClient. + + See `explicit client-side encryption `_ for an example. + + :param kms_providers: Map of KMS provider options. The `kms_providers` + map values differ by provider: + + - `aws`: Map with "accessKeyId" and "secretAccessKey" as strings. + These are the AWS access key ID and AWS secret access key used + to generate KMS messages. An optional "sessionToken" may be + included to support temporary AWS credentials. + - `azure`: Map with "tenantId", "clientId", and "clientSecret" as + strings. Additionally, "identityPlatformEndpoint" may also be + specified as a string (defaults to 'login.microsoftonline.com'). + These are the Azure Active Directory credentials used to + generate Azure Key Vault messages. + - `gcp`: Map with "email" as a string and "privateKey" + as `bytes` or a base64 encoded string. + Additionally, "endpoint" may also be specified as a string + (defaults to 'oauth2.googleapis.com'). These are the + credentials used to generate Google Cloud KMS messages. + - `kmip`: Map with "endpoint" as a host with required port. + For example: ``{"endpoint": "example.com:443"}``. + - `local`: Map with "key" as `bytes` (96 bytes in length) or + a base64 encoded string which decodes + to 96 bytes. "key" is the master key used to encrypt/decrypt + data keys. This key should be generated and stored as securely + as possible. + + KMS providers may be specified with an optional name suffix + separated by a colon, for example "kmip:name" or "aws:name". + Named KMS providers do not support `CSFLE on-demand credentials `_. + :param key_vault_namespace: The namespace for the key vault collection. + The key vault collection contains all data keys used for encryption + and decryption. Data keys are stored as documents in this MongoDB + collection. Data keys are protected with encryption by a KMS + provider. + :param key_vault_client: A MongoClient connected to a MongoDB cluster + containing the `key_vault_namespace` collection. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions` to use when encoding a + value for encryption and decoding the decrypted BSON value. This + should be the same CodecOptions instance configured on the + MongoClient, Database, or Collection used to access application + data. + :param kms_tls_options: A map of KMS provider names to TLS + options to use when creating secure connections to KMS providers. + Accepts the same TLS options as + :class:`pymongo.mongo_client.MongoClient`. For example, to + override the system default CA file:: + + kms_tls_options={'kmip': {'tlsCAFile': certifi.where()}} + + Or to supply a client certificate:: + + kms_tls_options={'kmip': {'tlsCertificateKeyFile': 'client.pem'}} + :param key_expiration_ms: The cache expiration time for data encryption keys. + Defaults to ``None`` which defers to libmongocrypt's default which is currently 60000. + Set to 0 to disable key expiration. + + .. versionchanged:: 4.12 + Added the `key_expiration_ms` parameter. + .. versionchanged:: 4.0 + Added the `kms_tls_options` parameter and the "kmip" KMS provider. + + .. versionadded:: 3.9 + """ + if not _HAVE_PYMONGOCRYPT: + raise ConfigurationError( + "client-side field level encryption requires the pymongocrypt " + "library: install a compatible version with: " + "python -m pip install --upgrade 'pymongo[encryption]'" + ) + + check_min_pymongocrypt() + + if not isinstance(codec_options, CodecOptions): + raise TypeError( + f"codec_options must be an instance of bson.codec_options.CodecOptions, not {type(codec_options)}" + ) + + if not isinstance(key_vault_client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(key_vault_client).__mro__): + raise TypeError(f"MongoClient required but given {type(key_vault_client).__name__}") + + self._kms_providers = kms_providers + self._key_vault_namespace = key_vault_namespace + self._key_vault_client = key_vault_client + self._codec_options = codec_options + + db, coll = key_vault_namespace.split(".", 1) + key_vault_coll = key_vault_client[db][coll] + + opts = AutoEncryptionOpts( + kms_providers, + key_vault_namespace, + kms_tls_options=kms_tls_options, + key_expiration_ms=key_expiration_ms, + ) + self._kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self._io_callbacks: Optional[_EncryptionIO] = _EncryptionIO( + None, key_vault_coll, None, opts + ) + self._encryption = ExplicitEncrypter( + self._io_callbacks, + _create_mongocrypt_options( + kms_providers=kms_providers, schema_map=None, key_expiration_ms=key_expiration_ms + ), + ) + # Use the same key vault collection as the callback. + assert self._io_callbacks.key_vault_coll is not None + self._key_vault_coll = self._io_callbacks.key_vault_coll + + def create_encrypted_collection( + self, + database: Database[_DocumentTypeArg], + name: str, + encrypted_fields: Mapping[str, Any], + kms_provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + **kwargs: Any, + ) -> tuple[Collection[_DocumentTypeArg], Mapping[str, Any]]: + """Create a collection with encryptedFields. + + .. warning:: + This function does not update the encryptedFieldsMap in the client's + AutoEncryptionOpts, thus the user must create a new client after calling this function with + the encryptedFields returned. + + Normally collection creation is automatic. This method should + only be used to specify options on + creation. :class:`~pymongo.errors.EncryptionError` will be + raised if the collection already exists. + + :param database: the database to create the collection + :param name: the name of the collection to create + :param encrypted_fields: Document that describes the encrypted fields for + Queryable Encryption. The "keyId" may be set to ``None`` to auto-generate the data keys. For example: + + .. code-block: python + + { + "escCollection": "enxcol_.encryptedCollection.esc", + "ecocCollection": "enxcol_.encryptedCollection.ecoc", + "fields": [ + { + "path": "firstName", + "keyId": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + { + "path": "ssn", + "keyId": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), + "bsonType": "string" + } + ] + } + + :param kms_provider: the KMS provider to be used + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + :param kwargs: additional keyword arguments are the same as "create_collection". + + All optional `create collection command`_ parameters should be passed + as keyword arguments to this method. + See the documentation for :meth:`~pymongo.database.Database.create_collection` for all valid options. + + :raises: - :class:`~pymongo.errors.EncryptedCollectionError`: When either data-key creation or creating the collection fails. + + .. versionadded:: 4.4 + + .. _create collection command: + https://mongodb.com/docs/manual/reference/command/create + + """ + if not isinstance(database, Database): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "Database" for cls in type(database).__mro__): + raise TypeError(f"Database required but given {type(database).__name__}") + + encrypted_fields = deepcopy(encrypted_fields) + for i, field in enumerate(encrypted_fields["fields"]): + if isinstance(field, dict) and field.get("keyId") is None: + try: + encrypted_fields["fields"][i]["keyId"] = self.create_data_key( + kms_provider=kms_provider, # type:ignore[arg-type] + master_key=master_key, + ) + except EncryptionError as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + kwargs["encryptedFields"] = encrypted_fields + kwargs["check_exists"] = False + try: + return ( + database.create_collection(name=name, **kwargs), + encrypted_fields, + ) + except Exception as exc: + raise EncryptedCollectionError(exc, encrypted_fields) from exc + + def create_data_key( + self, + kms_provider: str, + master_key: Optional[Mapping[str, Any]] = None, + key_alt_names: Optional[Sequence[str]] = None, + key_material: Optional[bytes] = None, + ) -> Binary: + """Create and insert a new data key into the key vault collection. + + :param kms_provider: The KMS provider to use. Supported values are + "aws", "azure", "gcp", "kmip", "local", or a named provider like + "kmip:name". + :param master_key: Identifies a KMS-specific key used to encrypt the + new data key. If the kmsProvider is "local" the `master_key` is + not applicable and may be omitted. + + If the `kms_provider` type is "aws" it is required and has the + following fields:: + + - `region` (string): Required. The AWS region, e.g. "us-east-1". + - `key` (string): Required. The Amazon Resource Name (ARN) to + the AWS customer. + - `endpoint` (string): Optional. An alternate host to send KMS + requests to. May include port number, e.g. + "kms.us-east-1.amazonaws.com:443". + + If the `kms_provider` type is "azure" it is required and has the + following fields:: + + - `keyVaultEndpoint` (string): Required. Host with optional + port, e.g. "example.vault.azure.net". + - `keyName` (string): Required. Key name in the key vault. + - `keyVersion` (string): Optional. Version of the key to use. + + If the `kms_provider` type is "gcp" it is required and has the + following fields:: + + - `projectId` (string): Required. The Google cloud project ID. + - `location` (string): Required. The GCP location, e.g. "us-east1". + - `keyRing` (string): Required. Name of the key ring that contains + the key to use. + - `keyName` (string): Required. Name of the key to use. + - `keyVersion` (string): Optional. Version of the key to use. + - `endpoint` (string): Optional. Host with optional port. + Defaults to "cloudkms.googleapis.com". + + If the `kms_provider` type is "kmip" it is optional and has the + following fields:: + + - `keyId` (string): Optional. `keyId` is the KMIP Unique + Identifier to a 96 byte KMIP Secret Data managed object. If + keyId is omitted, the driver creates a random 96 byte KMIP + Secret Data managed object. + - `endpoint` (string): Optional. Host with optional + port, e.g. "example.vault.azure.net:". + - `delegated` (bool): Optional. If True (recommended), the + KMIP server will perform encryption and decryption. If + delegated is not provided, defaults to false. + + :param key_alt_names: An optional list of string alternate + names used to reference a key. If a key is created with alternate + names, then encryption may refer to the key by the unique alternate + name instead of by ``key_id``. The following example shows creating + and referring to a data key by alternate name:: + + client_encryption.create_data_key("local", key_alt_names=["name1"]) + # reference the key with the alternate name + client_encryption.encrypt("457-55-5462", key_alt_name="name1", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + :param key_material: Sets the custom key material to be used + by the data key for encryption and decryption. + + :return: The ``_id`` of the created data key document as a + :class:`~bson.binary.Binary` with subtype + :data:`~bson.binary.UUID_SUBTYPE`. + + .. versionchanged:: 4.2 + Added the `key_material` parameter. + """ + self._check_closed() + with _wrap_encryption_errors(): + return cast( + Binary, + self._encryption.create_data_key( + kms_provider, + master_key=master_key, + key_alt_names=key_alt_names, + key_material=key_material, + ), + ) + + def _encrypt_helper( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + is_expression: bool = False, + text_opts: Optional[TextOpts] = None, + ) -> Any: + self._check_closed() + if isinstance(key_id, uuid.UUID): + key_id = Binary.from_uuid(key_id) + if key_id is not None and not ( + isinstance(key_id, Binary) and key_id.subtype == UUID_SUBTYPE + ): + raise TypeError("key_id must be a bson.binary.Binary with subtype 4") + + doc = encode( + {"v": value}, + codec_options=self._codec_options, + ) + range_opts_bytes = None + if range_opts: + range_opts_bytes = encode( + range_opts.document, + codec_options=self._codec_options, + ) + text_opts_bytes = None + if text_opts: + text_opts_bytes = encode( + text_opts.document, + codec_options=self._codec_options, + ) + with _wrap_encryption_errors(): + encrypted_doc = self._encryption.encrypt( + value=doc, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts_bytes, + is_expression=is_expression, + # For compatibility with pymongocrypt < 1.16: + **{"text_opts": text_opts_bytes} if text_opts_bytes else {}, + ) + return decode(encrypted_doc)["v"] + + def encrypt( + self, + value: Any, + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + text_opts: Optional[TextOpts] = None, + ) -> Binary: + """Encrypt a BSON value with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param value: The BSON value to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + :param text_opts: Index options for `textPreview` queries. See + :class:`TextOpts` for some valid options. + + :return: The encrypted value, a :class:`~bson.binary.Binary` with subtype 6. + + .. versionchanged:: 4.9 + Added the `text_opts` parameter. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionchanged:: 4.2 + Added the `query_type` and `contention_factor` parameters. + """ + return cast( + Binary, + self._encrypt_helper( + value=value, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=False, + text_opts=text_opts, + ), + ) + + def encrypt_expression( + self, + expression: Mapping[str, Any], + algorithm: str, + key_id: Optional[Union[Binary, uuid.UUID]] = None, + key_alt_name: Optional[str] = None, + query_type: Optional[str] = None, + contention_factor: Optional[int] = None, + range_opts: Optional[RangeOpts] = None, + ) -> RawBSONDocument: + """Encrypt a BSON expression with a given key and algorithm. + + Note that exactly one of ``key_id`` or ``key_alt_name`` must be + provided. + + :param expression: The BSON aggregate or match expression to encrypt. + :param algorithm` (string): The encryption algorithm to use. See + :class:`Algorithm` for some valid options. + :param key_id: Identifies a data key by ``_id`` which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: Identifies a key vault document by 'keyAltName'. + :param query_type` (str): The query type to execute. See + :class:`QueryType` for valid options. + :param contention_factor` (int): The contention factor to use + when the algorithm is :attr:`Algorithm.INDEXED`. An integer value + *must* be given when the :attr:`Algorithm.INDEXED` algorithm is + used. + :param range_opts: Index options for `range` queries. See + :class:`RangeOpts` for some valid options. + + :return: The encrypted expression, a :class:`~bson.RawBSONDocument`. + + .. versionchanged:: 4.9 + Added the `range_opts` parameter. + + .. versionchanged:: 4.7 + ``key_id`` can now be passed in as a :class:`uuid.UUID`. + + .. versionadded:: 4.4 + """ + return cast( + RawBSONDocument, + self._encrypt_helper( + value=expression, + algorithm=algorithm, + key_id=key_id, + key_alt_name=key_alt_name, + query_type=query_type, + contention_factor=contention_factor, + range_opts=range_opts, + is_expression=True, + ), + ) + + def decrypt(self, value: Binary) -> Any: + """Decrypt an encrypted value. + + :param value` (Binary): The encrypted value, a + :class:`~bson.binary.Binary` with subtype 6. + + :return: The decrypted BSON value. + """ + self._check_closed() + if not (isinstance(value, Binary) and value.subtype == 6): + raise TypeError("value to decrypt must be a bson.binary.Binary with subtype 6") + + with _wrap_encryption_errors(): + doc = encode({"v": value}) + decrypted_doc = self._encryption.decrypt(doc) + return decode(decrypted_doc, codec_options=self._codec_options)["v"] + + def get_key(self, id: Binary) -> Optional[RawBSONDocument]: + """Get a data key by id. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"_id": id}) + + def get_keys(self) -> Cursor[RawBSONDocument]: + """Get all of the data keys. + + :return: An instance of :class:`~pymongo.cursor.Cursor` over the data key + documents. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find({}) + + def delete_key(self, id: Binary) -> DeleteResult: + """Delete a key document in the key vault collection that has the given ``key_id``. + + :param id` (Binary): The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + + :return: The delete result. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.delete_one({"_id": id}) + + def add_key_alt_name(self, id: Binary, key_alt_name: str) -> Any: + """Add ``key_alt_name`` to the set of alternate names in the key document with UUID ``key_id``. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to add. + + :return: The previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + update = {"$addToSet": {"keyAltNames": key_alt_name}} + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, update) + + def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: + """Get a key document in the key vault collection that has the given ``key_alt_name``. + + :param key_alt_name: (str): The key alternate name of the key to get. + + :return: The key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one({"keyAltNames": key_alt_name}) + + def remove_key_alt_name(self, id: Binary, key_alt_name: str) -> Optional[RawBSONDocument]: + """Remove ``key_alt_name`` from the set of keyAltNames in the key document with UUID ``id``. + + Also removes the ``keyAltNames`` field from the key document if it would otherwise be empty. + + :param id: The UUID of a key a which must be a + :class:`~bson.binary.Binary` with subtype 4 ( + :attr:`~bson.binary.UUID_SUBTYPE`). + :param key_alt_name: The key alternate name to remove. + + :return: Returns the previous version of the key document. + + .. versionadded:: 4.2 + """ + self._check_closed() + pipeline = [ + { + "$set": { + "keyAltNames": { + "$cond": [ + {"$eq": ["$keyAltNames", [key_alt_name]]}, + "$$REMOVE", + { + "$filter": { + "input": "$keyAltNames", + "cond": {"$ne": ["$$this", key_alt_name]}, + } + }, + ] + } + } + } + ] + assert self._key_vault_coll is not None + return self._key_vault_coll.find_one_and_update({"_id": id}, pipeline) + + def rewrap_many_data_key( + self, + filter: Mapping[str, Any], + provider: Optional[str] = None, + master_key: Optional[Mapping[str, Any]] = None, + ) -> RewrapManyDataKeyResult: + """Decrypts and encrypts all matching data keys in the key vault with a possibly new `master_key` value. + + :param filter: A document used to filter the data keys. + :param provider: The new KMS provider to use to encrypt the data keys, + or ``None`` to use the current KMS provider(s). + :param master_key: The master key fields corresponding to the new KMS + provider when ``provider`` is not ``None``. + + :return: A :class:`RewrapManyDataKeyResult`. + + This method allows you to re-encrypt all of your data-keys with a new CMK, or master key. + Note that this does *not* require re-encrypting any of the data in your encrypted collections, + but rather refreshes the key that protects the keys that encrypt the data: + + .. code-block:: python + + client_encryption.rewrap_many_data_key( + filter={"keyAltNames": "optional filter for which keys you want to update"}, + master_key={ + "provider": "azure", # replace with your cloud provider + "master_key": { + # put the rest of your master_key options here + "key": "" + }, + }, + ) + + .. versionadded:: 4.2 + """ + if master_key is not None and provider is None: + raise ConfigurationError("A provider must be given if a master_key is given") + self._check_closed() + with _wrap_encryption_errors(): + raw_result = self._encryption.rewrap_many_data_key(filter, provider, master_key) + if raw_result is None: + return RewrapManyDataKeyResult() + + raw_doc = RawBSONDocument(raw_result, DEFAULT_RAW_BSON_OPTIONS) + replacements = [] + for key in raw_doc["v"]: + update_model = { + "$set": {"keyMaterial": key["keyMaterial"], "masterKey": key["masterKey"]}, + "$currentDate": {"updateDate": True}, + } + op = UpdateOne({"_id": key["_id"]}, update_model) + replacements.append(op) + if not replacements: + return RewrapManyDataKeyResult() + assert self._key_vault_coll is not None + result = self._key_vault_coll.bulk_write(replacements) + return RewrapManyDataKeyResult(result) + + def __enter__(self) -> ClientEncryption[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + def _check_closed(self) -> None: + if self._encryption is None: + raise InvalidOperation("Cannot use closed ClientEncryption") + + def close(self) -> None: + """Release resources. + + Note that using this class in a with-statement will automatically call + :meth:`close`:: + + with ClientEncryption(...) as client_encryption: + encrypted = client_encryption.encrypt(value, ...) + decrypted = client_encryption.decrypt(encrypted) + + """ + if self._io_callbacks: + self._io_callbacks.close() + self._encryption.close() + self._io_callbacks = None + self._encryption = None diff --git a/pymongo/synchronous/helpers.py b/pymongo/synchronous/helpers.py new file mode 100644 index 0000000000..c1b75a3c95 --- /dev/null +++ b/pymongo/synchronous/helpers.py @@ -0,0 +1,86 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous pieces that need to be synchronized.""" +from __future__ import annotations + +import asyncio +import socket +from typing import ( + Any, + Callable, + TypeVar, + cast, +) + +from pymongo.errors import ( + OperationFailure, +) +from pymongo.helpers_shared import _REAUTHENTICATION_REQUIRED_CODE + +_IS_SYNC = True + +# See https://mypy.readthedocs.io/en/stable/generics.html?#decorator-factories +F = TypeVar("F", bound=Callable[..., Any]) + + +def _handle_reauth(func: F) -> F: + def inner(*args: Any, **kwargs: Any) -> Any: + no_reauth = kwargs.pop("no_reauth", False) + from pymongo.message import _BulkWriteContext + from pymongo.synchronous.pool import Connection + + try: + return func(*args, **kwargs) + except OperationFailure as exc: + if no_reauth: + raise + if exc.code == _REAUTHENTICATION_REQUIRED_CODE: + # Look for an argument that either is a Connection + # or has a connection attribute, so we can trigger + # a reauth. + conn = None + for arg in args: + if isinstance(arg, Connection): + conn = arg + break + if isinstance(arg, _BulkWriteContext): + conn = arg.conn # type: ignore[assignment] + break + if conn: + conn.authenticate(reauthenticate=True) + else: + raise + return func(*args, **kwargs) + raise + + return cast(F, inner) + + +def _getaddrinfo( + host: Any, port: Any, **kwargs: Any +) -> list[ + tuple[ + socket.AddressFamily, + socket.SocketKind, + int, + str, + tuple[str, int] | tuple[str, int, int, int] | tuple[int, bytes], + ] +]: + if not _IS_SYNC: + loop = asyncio.get_running_loop() + return loop.getaddrinfo(host, port, **kwargs) # type: ignore[return-value] + else: + return socket.getaddrinfo(host, port, **kwargs) diff --git a/pymongo/synchronous/mongo_client.py b/pymongo/synchronous/mongo_client.py new file mode 100644 index 0000000000..6e716402f4 --- /dev/null +++ b/pymongo/synchronous/mongo_client.py @@ -0,0 +1,2968 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Tools for connecting to MongoDB. + +.. seealso:: `Read and Write Settings `_ for examples of connecting + to replica sets or sets of mongos servers. + +To get a :class:`~pymongo.database.Database` instance from a +:class:`MongoClient` use either dictionary-style or attribute-style +access: + +.. doctest:: + + >>> from pymongo import MongoClient + >>> c = MongoClient() + >>> c.test_database + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test_database') + >>> c["test-database"] + Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True), 'test-database') +""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import warnings +import weakref +from collections import defaultdict +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Collection, + ContextManager, + FrozenSet, + Generator, + Generic, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Type, + TypeVar, + Union, + cast, +) + +from bson.codec_options import DEFAULT_CODEC_OPTIONS, CodecOptions, TypeRegistry +from bson.timestamp import Timestamp +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.client_options import ClientOptions +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.lock import ( + _HAS_REGISTER_AT_FORK, + _create_lock, + _release_locks, +) +from pymongo.logger import ( + _CLIENT_LOGGER, + _COMMAND_LOGGER, + _debug_log, + _log_client_error, + _log_or_warn, +) +from pymongo.message import _CursorAddress, _GetMore, _Query +from pymongo.monitoring import ConnectionClosedReason, _EventListeners +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, + _Op, +) +from pymongo.read_preferences import ReadPreference, _ServerMode +from pymongo.results import ClientBulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous import client_session, database, uri_parser +from pymongo.synchronous.change_stream import ChangeStream, ClusterChangeStream +from pymongo.synchronous.client_bulk import _ClientBulk +from pymongo.synchronous.client_session import _EmptyServerSession +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.topology_description import TOPOLOGY_TYPE, TopologyDescription +from pymongo.typings import ( + ClusterTime, + _Address, + _CollationIn, + _DocumentType, + _DocumentTypeArg, + _Pipeline, +) +from pymongo.uri_parser_shared import ( + SRV_SCHEME, + _check_options, + _handle_option_deprecations, + _handle_security_options, + _normalize_options, + _validate_uri, + split_hosts, +) +from pymongo.write_concern import DEFAULT_WRITE_CONCERN, WriteConcern + +if TYPE_CHECKING: + from types import TracebackType + + from bson.objectid import ObjectId + from pymongo.read_concern import ReadConcern + from pymongo.response import Response + from pymongo.server_selectors import Selection + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_session import ClientSession, _ServerSession + from pymongo.synchronous.cursor import _ConnectionManager + from pymongo.synchronous.encryption import _Encrypter + from pymongo.synchronous.pool import Connection + from pymongo.synchronous.server import Server + + +T = TypeVar("T") + +_WriteCall = Callable[[Optional["ClientSession"], "Connection", bool], T] +_ReadCall = Callable[ + [Optional["ClientSession"], "Server", "Connection", _ServerMode], + T, +] + +_IS_SYNC = True + +_WriteOp = Union[ + InsertOne, # type: ignore[type-arg] + DeleteOne, + DeleteMany, + ReplaceOne, # type: ignore[type-arg] + UpdateOne, + UpdateMany, +] + + +class MongoClient(common.BaseObject, Generic[_DocumentType]): + HOST = "localhost" + PORT = 27017 + # Define order to retrieve options from ClientOptions for __repr__. + # No host/port; these are retrieved from TopologySettings. + _constructor_args = ("document_class", "tz_aware", "connect") + _clients: weakref.WeakValueDictionary = weakref.WeakValueDictionary() # type: ignore[type-arg] + + def __init__( + self, + host: Optional[Union[str, Sequence[str]]] = None, + port: Optional[int] = None, + document_class: Optional[Type[_DocumentType]] = None, + tz_aware: Optional[bool] = None, + connect: Optional[bool] = None, + type_registry: Optional[TypeRegistry] = None, + **kwargs: Any, + ) -> None: + """Client for a MongoDB instance, a replica set, or a set of mongoses. + + .. warning:: Starting in PyMongo 4.0, ``directConnection`` now has a default value of + False instead of None. + For more details, see the relevant section of the PyMongo 4.x migration guide: + :ref:`pymongo4-migration-direct-connection`. + + The client object is thread-safe and has connection-pooling built in. + If an operation fails because of a network error, + :class:`~pymongo.errors.ConnectionFailure` is raised and the client + reconnects in the background. Application code should handle this + exception (recognizing that the operation failed) and then continue to + execute. + + Best practice is to call :meth:`MongoClient.close` when the client is no longer needed, + or use the client in a with statement:: + + with MongoClient(url) as client: + # Use client here. + + The `host` parameter can be a full `mongodb URI + `_, in addition to + a simple hostname. It can also be a list of hostnames but no more + than one URI. Any port specified in the host string(s) will override + the `port` parameter. For username and + passwords reserved characters like ':', '/', '+' and '@' must be + percent encoded following RFC 2396:: + + from urllib.parse import quote_plus + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), host) + client = MongoClient(uri) + + Unix domain sockets are also supported. The socket path must be percent + encoded in the URI:: + + uri = "mongodb://%s:%s@%s" % ( + quote_plus(user), quote_plus(password), quote_plus(socket_path)) + client = MongoClient(uri) + + But not when passed as a simple hostname:: + + client = MongoClient('/tmp/mongodb-27017.sock') + + Starting with version 3.6, PyMongo supports mongodb+srv:// URIs. The + URI must include one, and only one, hostname. The hostname will be + resolved to one or more DNS `SRV records + `_ which will be used + as the seed list for connecting to the MongoDB deployment. When using + SRV URIs, the `authSource` and `replicaSet` configuration options can + be specified using `TXT records + `_. See the + `Initial DNS Seedlist Discovery spec + `_ + for more details. Note that the use of SRV URIs implicitly enables + TLS support. Pass tls=false in the URI to override. + + .. note:: MongoClient creation will block waiting for answers from + DNS when mongodb+srv:// URIs are used. + + .. note:: Starting with version 3.0 the :class:`MongoClient` + constructor no longer blocks while connecting to the server or + servers, and it no longer raises + :class:`~pymongo.errors.ConnectionFailure` if they are + unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. You can check if the server is available + like this:: + + from pymongo.errors import ConnectionFailure + client = MongoClient() + try: + # The ping command is cheap and does not require auth. + client.admin.command('ping') + except ConnectionFailure: + print("Server not available") + + .. warning:: When using PyMongo in a multiprocessing context, please + read `PyMongo multiprocessing `_ first. + + .. note:: Many of the following options can be passed using a MongoDB + URI or keyword parameters. If the same option is passed in a URI and + as a keyword parameter the keyword parameter takes precedence. + + :param host: hostname or IP address or Unix domain socket + path of a single mongod or mongos instance to connect to, or a + mongodb URI, or a list of hostnames (but no more than one mongodb + URI). If `host` is an IPv6 literal it must be enclosed in '[' + and ']' characters + following the RFC2732 URL syntax (e.g. '[::1]' for localhost). + Multihomed and round robin DNS addresses are **not** supported. + :param port: port number on which to connect + :param document_class: default class to use for + documents returned from queries on this client + :param tz_aware: if ``True``, + :class:`~datetime.datetime` instances returned as values + in a document by this :class:`MongoClient` will be timezone + aware (otherwise they will be naive) + :param connect: If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment. + :param type_registry: instance of + :class:`~bson.codec_options.TypeRegistry` to enable encoding + and decoding of custom types. + :param kwargs: **Additional optional parameters available as keyword arguments:** + + - `datetime_conversion` (optional): Specifies how UTC datetimes should be decoded + within BSON. Valid options include 'datetime_ms' to return as a + DatetimeMS, 'datetime' to return as a datetime.datetime and + raising a ValueError for out-of-range values, 'datetime_auto' to + return DatetimeMS objects when the underlying datetime is + out-of-range and 'datetime_clamp' to clamp to the minimum and + maximum possible datetimes. Defaults to 'datetime'. See + `handling out of range datetimes `_ for details. + - `directConnection` (optional): if ``True``, forces this client to + connect directly to the specified MongoDB host as a standalone. + If ``false``, the client connects to the entire replica set of + which the given MongoDB host(s) is a part. If this is ``True`` + and a mongodb+srv:// URI or a URI containing multiple seeds is + provided, an exception will be raised. + - `maxPoolSize` (optional): The maximum allowable number of + concurrent connections to each connected server. Requests to a + server will block if there are `maxPoolSize` outstanding + connections to the requested server. Defaults to 100. Can be + either 0 or None, in which case there is no limit on the number + of concurrent connections. + - `minPoolSize` (optional): The minimum required number of concurrent + connections that the pool will maintain to each connected server. + Default is 0. + - `maxIdleTimeMS` (optional): The maximum number of milliseconds that + a connection can remain idle in the pool before being removed and + replaced. Defaults to `None` (no limit). + - `maxConnecting` (optional): The maximum number of connections that + each pool can establish concurrently. Defaults to `2`. + - `timeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait when executing an operation + (including retry attempts) before raising a timeout error. + ``0`` or ``None`` means no timeout. + - `socketTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait for a response after sending an + ordinary (non-monitoring) database operation before concluding that + a network error has occurred. ``0`` or ``None`` means no timeout. + Defaults to ``None`` (no timeout). + - `connectTimeoutMS`: (integer or None) Controls how long (in + milliseconds) the driver will wait during server monitoring when + connecting a new socket to a server before concluding the server + is unavailable. ``0`` or ``None`` means no timeout. + Defaults to ``20000`` (20 seconds). + - `server_selector`: (callable or None) Optional, user-provided + function that augments server selection rules. The function should + accept as an argument a list of + :class:`~pymongo.server_description.ServerDescription` objects and + return a list of server descriptions that should be considered + suitable for the desired operation. + - `serverSelectionTimeoutMS`: (integer) Controls how long (in + milliseconds) the driver will wait to find an available, + appropriate server to carry out a database operation; while it is + waiting, multiple server monitoring operations may be carried out, + each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30 + seconds). + - `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds) + a thread will wait for a socket from the pool if the pool has no + free sockets. Defaults to ``None`` (no timeout). + - `heartbeatFrequencyMS`: (optional) The number of milliseconds + between periodic server checks, or None to accept the default + frequency of 10 seconds. + - `serverMonitoringMode`: (optional) The server monitoring mode to use. + Valid values are the strings: "auto", "stream", "poll". Defaults to "auto". + - `appname`: (string or None) The name of the application that + created this MongoClient instance. The server will log this value + upon establishing each connection. It is also recorded in the slow + query log and profile collections. + - `driver`: (pair or None) A driver implemented on top of PyMongo can + pass a :class:`~pymongo.driver_info.DriverInfo` to add its name, + version, and platform to the message printed in the server log when + establishing a connection. + - `event_listeners`: a list or tuple of event listeners. See + :mod:`~pymongo.monitoring` for details. + - `retryWrites`: (boolean) Whether supported write operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported write operations are: + + - :meth:`~pymongo.collection.Collection.bulk_write`, as long as + :class:`~pymongo.operations.UpdateMany` or + :class:`~pymongo.operations.DeleteMany` are not included. + - :meth:`~pymongo.collection.Collection.delete_one` + - :meth:`~pymongo.collection.Collection.insert_one` + - :meth:`~pymongo.collection.Collection.insert_many` + - :meth:`~pymongo.collection.Collection.replace_one` + - :meth:`~pymongo.collection.Collection.update_one` + - :meth:`~pymongo.collection.Collection.find_one_and_delete` + - :meth:`~pymongo.collection.Collection.find_one_and_replace` + - :meth:`~pymongo.collection.Collection.find_one_and_update` + + Unsupported write operations include, but are not limited to, + :meth:`~pymongo.collection.Collection.aggregate` using the ``$out`` + pipeline operator and any operation with an unacknowledged write + concern (e.g. {w: 0})). See + https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md + - `retryReads`: (boolean) Whether supported read operations + executed within this MongoClient will be retried once after a + network error. Defaults to ``True``. + The supported read operations are: + :meth:`~pymongo.collection.Collection.find`, + :meth:`~pymongo.collection.Collection.find_one`, + :meth:`~pymongo.collection.Collection.aggregate` without ``$out``, + :meth:`~pymongo.collection.Collection.distinct`, + :meth:`~pymongo.collection.Collection.count`, + :meth:`~pymongo.collection.Collection.estimated_document_count`, + :meth:`~pymongo.collection.Collection.count_documents`, + :meth:`pymongo.collection.Collection.watch`, + :meth:`~pymongo.collection.Collection.list_indexes`, + :meth:`pymongo.database.Database.watch`, + :meth:`~pymongo.database.Database.list_collections`, + :meth:`pymongo.mongo_client.MongoClient.watch`, + and :meth:`~pymongo.mongo_client.MongoClient.list_databases`. + + Unsupported read operations include, but are not limited to + :meth:`~pymongo.database.Database.command` and any getMore + operation on a cursor. + + Enabling retryable reads makes applications more resilient to + transient errors such as network failures, database upgrades, and + replica set failovers. For an exact definition of which errors + trigger a retry, see the `retryable reads specification + `_. + + - `compressors`: Comma separated list of compressors for wire + protocol compression. The list is used to negotiate a compressor + with the server. Currently supported options are "snappy", "zlib" + and "zstd". Support for snappy requires the + `python-snappy `_ package. + zlib support requires the Python standard library zlib module. zstd + requires the `zstandard `_ + package. By default no compression is used. Compression support + must also be enabled on the server. MongoDB 3.6+ supports snappy + and zlib compression. MongoDB 4.2+ adds support for zstd. + See `compress network traffic `_ for details. + - `zlibCompressionLevel`: (int) The zlib compression level to use + when zlib is used as the wire protocol compressor. Supported values + are -1 through 9. -1 tells the zlib library to use its default + compression level (usually 6). 0 means no compression. 1 is best + speed. 9 is best compression. Defaults to -1. + - `uuidRepresentation`: The BSON representation to use when encoding + from and decoding to instances of :class:`~uuid.UUID`. Valid + values are the strings: "standard", "pythonLegacy", "javaLegacy", + "csharpLegacy", and "unspecified" (the default). New applications + should consider setting this to "standard" for cross language + compatibility. See `handling UUID data `_ for details. + - `unicode_decode_error_handler`: The error handler to apply when + a Unicode-related error occurs during BSON decoding that would + otherwise raise :exc:`UnicodeDecodeError`. Valid options include + 'strict', 'replace', 'backslashreplace', 'surrogateescape', and + 'ignore'. Defaults to 'strict'. + - `srvServiceName`: (string) The SRV service name to use for + "mongodb+srv://" URIs. Defaults to "mongodb". Use it like so:: + + MongoClient("mongodb+srv://example.com/?srvServiceName=customname") + - `srvMaxHosts`: (int) limits the number of mongos-like hosts a client will + connect to. More specifically, when a "mongodb+srv://" connection string + resolves to more than srvMaxHosts number of hosts, the client will randomly + choose an srvMaxHosts sized subset of hosts. + + + | **Write Concern options:** + | (Only set if passed. No default values.) + + - `w`: (integer or string) If this is a replica set, write operations + will block until they have been replicated to the specified number + or tagged set of servers. `w=` always includes the replica set + primary (e.g. w=3 means write to the primary and wait until + replicated to **two** secondaries). Passing w=0 **disables write + acknowledgement** and all other write concern options. + - `wTimeoutMS`: **DEPRECATED** (integer) Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write propagation + to complete. If replication does not complete in the given + timeframe, a timeout exception is raised. Passing wTimeoutMS=0 + will cause **write operations to wait indefinitely**. + - `journal`: If ``True`` block until write operations have been + committed to the journal. Cannot be used in combination with + `fsync`. Write operations will fail with an exception if this + option is used when the server is running without journaling. + - `fsync`: If ``True`` and the server is running without journaling, + blocks until the server has synced all data files to disk. If the + server is running with journaling, this acts the same as the `j` + option, blocking until write operations have been committed to the + journal. Cannot be used in combination with `j`. + + | **Replica set keyword arguments for connecting with a replica set + - either directly or via a mongos:** + + - `replicaSet`: (string or None) The name of the replica set to + connect to. The driver will verify that all servers it connects to + match this name. Implies that the hosts specified are a seed list + and the driver should attempt to find all members of the set. + Defaults to ``None``. + + | **Read Preference:** + + - `readPreference`: The replica set read preference for this client. + One of ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. Defaults to ``primary``. + - `readPreferenceTags`: Specifies a tag set as a comma-separated list + of colon-separated key-value pairs. For example ``dc:ny,rack:1``. + Defaults to ``None``. + - `maxStalenessSeconds`: (integer) The maximum estimated + length of time a replica set secondary can fall behind the primary + in replication before it will no longer be selected for operations. + Defaults to ``-1``, meaning no maximum. If maxStalenessSeconds + is set, it must be a positive integer greater than or equal to + 90 seconds. + + .. seealso:: `Customize Server Selection `_ + + | **Authentication:** + + - `username`: A string. + - `password`: A string. + + Although username and password must be percent-escaped in a MongoDB + URI, they must not be percent-escaped when passed as parameters. In + this example, both the space and slash special characters are passed + as-is:: + + MongoClient(username="user name", password="pass/word") + + - `authSource`: The database to authenticate on. Defaults to the + database specified in the URI, if provided, or to "admin". + - `authMechanism`: See :data:`~pymongo.auth.MECHANISMS` for options. + If no mechanism is specified, PyMongo automatically negotiates the + mechanism to use (SCRAM-SHA-1 or SCRAM-SHA-256) with the MongoDB server. + - `authMechanismProperties`: Used to specify authentication mechanism + specific options. To specify the service name for GSSAPI + authentication pass authMechanismProperties='SERVICE_NAME:'. + To specify the session token for MONGODB-AWS authentication pass + ``authMechanismProperties='AWS_SESSION_TOKEN:'``. + + .. seealso:: `Authentication `_ + + | **TLS/SSL configuration:** + + - `tls`: (boolean) If ``True``, create the connection to the server + using transport layer security. Defaults to ``False``. + - `tlsInsecure`: (boolean) Specify whether TLS constraints should be + relaxed as much as possible. Setting ``tlsInsecure=True`` implies + ``tlsAllowInvalidCertificates=True`` and + ``tlsAllowInvalidHostnames=True``. Defaults to ``False``. Think + very carefully before setting this to ``True`` as it dramatically + reduces the security of TLS. + - `tlsAllowInvalidCertificates`: (boolean) If ``True``, continues + the TLS handshake regardless of the outcome of the certificate + verification process. If this is ``False``, and a value is not + provided for ``tlsCAFile``, PyMongo will attempt to load system + provided CA certificates. If the python version in use does not + support loading system CA certificates then the ``tlsCAFile`` + parameter must point to a file of CA certificates. + ``tlsAllowInvalidCertificates=False`` implies ``tls=True``. + Defaults to ``False``. Think very carefully before setting this + to ``True`` as that could make your application vulnerable to + on-path attackers. + - `tlsAllowInvalidHostnames`: (boolean) If ``True``, disables TLS + hostname verification. ``tlsAllowInvalidHostnames=False`` implies + ``tls=True``. Defaults to ``False``. Think very carefully before + setting this to ``True`` as that could make your application + vulnerable to on-path attackers. + - `tlsCAFile`: A file containing a single or a bundle of + "certification authority" certificates, which are used to validate + certificates passed from the other end of the connection. + Implies ``tls=True``. Defaults to ``None``. + - `tlsCertificateKeyFile`: A file containing the client certificate + and private key. Implies ``tls=True``. Defaults to ``None``. + - `tlsCRLFile`: A file containing a PEM or DER formatted + certificate revocation list. Implies ``tls=True``. Defaults to + ``None``. + - `tlsCertificateKeyFilePassword`: The password or passphrase for + decrypting the private key in ``tlsCertificateKeyFile``. Only + necessary if the private key is encrypted. Defaults to ``None``. + - `tlsDisableOCSPEndpointCheck`: (boolean) If ``True``, disables + certificate revocation status checking via the OCSP responder + specified on the server certificate. + ``tlsDisableOCSPEndpointCheck=False`` implies ``tls=True``. + Defaults to ``False``. + - `ssl`: (boolean) Alias for ``tls``. + + | **Read Concern options:** + | (If not set explicitly, this will use the server default) + + - `readConcernLevel`: (string) The read concern level specifies the + level of isolation for read operations. For example, a read + operation using a read concern level of ``majority`` will only + return data that has been written to a majority of nodes. If the + level is left unspecified, the server default will be used. + + | **Client side encryption options:** + | (If not set explicitly, client side encryption will not be enabled.) + + - `auto_encryption_opts`: A + :class:`~pymongo.encryption_options.AutoEncryptionOpts` which + configures this client to automatically encrypt collection commands + and automatically decrypt results. See + `client-side field level encryption `_ for an example. + If a :class:`MongoClient` is configured with + ``auto_encryption_opts`` and a non-None ``maxPoolSize``, a + separate internal ``MongoClient`` is created if any of the + following are true: + + - A ``key_vault_client`` is not passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + - ``bypass_auto_encrpytion=False`` is passed to + :class:`~pymongo.encryption_options.AutoEncryptionOpts` + + | **Stable API options:** + | (If not set explicitly, Stable API will not be enabled.) + + - `server_api`: A + :class:`~pymongo.server_api.ServerApi` which configures this + client to use Stable API. See `versioned API `_ for + details. + + .. seealso:: The MongoDB documentation on `connections `_. + + .. versionchanged:: 4.5 + Added the ``serverMonitoringMode`` keyword argument. + + .. versionchanged:: 4.2 + Added the ``timeoutMS`` keyword argument. + + .. versionchanged:: 4.0 + + - Removed the fsync, unlock, is_locked, database_names, and + close_cursor methods. + See the :ref:`pymongo4-migration-guide`. + - Removed the ``waitQueueMultiple`` and ``socketKeepAlive`` + keyword arguments. + - The default for `uuidRepresentation` was changed from + ``pythonLegacy`` to ``unspecified``. + - Added the ``srvServiceName``, ``maxConnecting``, and ``srvMaxHosts`` URI and + keyword arguments. + + .. versionchanged:: 3.12 + Added the ``server_api`` keyword argument. + The following keyword arguments were deprecated: + + - ``ssl_certfile`` and ``ssl_keyfile`` were deprecated in favor + of ``tlsCertificateKeyFile``. + + .. versionchanged:: 3.11 + Added the following keyword arguments and URI options: + + - ``tlsDisableOCSPEndpointCheck`` + - ``directConnection`` + + .. versionchanged:: 3.9 + Added the ``retryReads`` keyword argument and URI option. + Added the ``tlsInsecure`` keyword argument and URI option. + The following keyword arguments and URI options were deprecated: + + - ``wTimeout`` was deprecated in favor of ``wTimeoutMS``. + - ``j`` was deprecated in favor of ``journal``. + - ``ssl_cert_reqs`` was deprecated in favor of + ``tlsAllowInvalidCertificates``. + - ``ssl_match_hostname`` was deprecated in favor of + ``tlsAllowInvalidHostnames``. + - ``ssl_ca_certs`` was deprecated in favor of ``tlsCAFile``. + - ``ssl_certfile`` was deprecated in favor of + ``tlsCertificateKeyFile``. + - ``ssl_crlfile`` was deprecated in favor of ``tlsCRLFile``. + - ``ssl_pem_passphrase`` was deprecated in favor of + ``tlsCertificateKeyFilePassword``. + + .. versionchanged:: 3.9 + ``retryWrites`` now defaults to ``True``. + + .. versionchanged:: 3.8 + Added the ``server_selector`` keyword argument. + Added the ``type_registry`` keyword argument. + + .. versionchanged:: 3.7 + Added the ``driver`` keyword argument. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + Added the ``retryWrites`` keyword argument and URI option. + + .. versionchanged:: 3.5 + Add ``username`` and ``password`` options. Document the + ``authSource``, ``authMechanism``, and ``authMechanismProperties`` + options. + Deprecated the ``socketKeepAlive`` keyword argument and URI option. + ``socketKeepAlive`` now defaults to ``True``. + + .. versionchanged:: 3.0 + :class:`~pymongo.mongo_client.MongoClient` is now the one and only + client class for a standalone server, mongos, or replica set. + It includes the functionality that had been split into + :class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect + to a replica set, discover all its members, and monitor the set for + stepdowns, elections, and reconfigs. + + The :class:`~pymongo.mongo_client.MongoClient` constructor no + longer blocks while connecting to the server or servers, and it no + longer raises :class:`~pymongo.errors.ConnectionFailure` if they + are unavailable, nor :class:`~pymongo.errors.ConfigurationError` + if the user's credentials are wrong. Instead, the constructor + returns immediately and launches the connection process on + background threads. + + Therefore the ``alive`` method is removed since it no longer + provides meaningful information; even if the client is disconnected, + it may discover a server in time to fulfill the next operation. + + In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of + standalone MongoDB servers and used the first it could connect to:: + + MongoClient(['host1.com:27017', 'host2.com:27017']) + + A list of multiple standalones is no longer supported; if multiple + servers are listed they must be members of the same replica set, or + mongoses in the same sharded cluster. + + The behavior for a list of mongoses is changed from "high + availability" to "load balancing". Before, the client connected to + the lowest-latency mongos in the list, and used it until a network + error prompted it to re-evaluate all mongoses' latencies and + reconnect to one of them. In PyMongo 3, the client monitors its + network latency to all the mongoses continuously, and distributes + operations evenly among those with the lowest latency. See + `load balancing `_ for more information. + + The ``connect`` option is added. + + The ``start_request``, ``in_request``, and ``end_request`` methods + are removed, as well as the ``auto_start_request`` option. + + The ``copy_database`` method is removed, see + `Copy and Clone Databases `_ for alternatives. + + The :meth:`MongoClient.disconnect` method is removed; it was a + synonym for :meth:`~pymongo.MongoClient.close`. + + :class:`~pymongo.mongo_client.MongoClient` no longer returns an + instance of :class:`~pymongo.database.Database` for attribute names + with leading underscores. You must use dict-style lookups instead:: + + client['__my_database__'] + + Not:: + + client.__my_database__ + + .. versionchanged:: 4.7 + Deprecated parameter ``wTimeoutMS``, use :meth:`~pymongo.timeout`. + + .. versionchanged:: 4.9 + The default value of ``connect`` is changed to ``False`` when running in a + Function-as-a-service environment. + """ + doc_class = document_class or dict + self._init_kwargs: dict[str, Any] = { + "host": host, + "port": port, + "document_class": doc_class, + "tz_aware": tz_aware, + "connect": connect, + "type_registry": type_registry, + **kwargs, + } + + if host is None: + host = self.HOST + if isinstance(host, str): + host = [host] + if port is None: + port = self.PORT + if not isinstance(port, int): + raise TypeError(f"port must be an instance of int, not {type(port)}") + self._host = host + self._port = port + self._topology: Topology = None # type: ignore[assignment] + self._timeout: float | None = None + self._topology_settings: TopologySettings = None # type: ignore[assignment] + self._event_listeners: _EventListeners | None = None + + # _pool_class, _monitor_class, and _condition_class are for deep + # customization of PyMongo, e.g. Motor. + pool_class = kwargs.pop("_pool_class", None) + monitor_class = kwargs.pop("_monitor_class", None) + condition_class = kwargs.pop("_condition_class", None) + + # Parse options passed as kwargs. + keyword_opts = common._CaseInsensitiveDictionary(kwargs) + keyword_opts["document_class"] = doc_class + self._resolve_srv_info: dict[str, Any] = {"keyword_opts": keyword_opts} + + self._seeds = set() + is_srv = False + username = None + password = None + dbase = None + opts = common._CaseInsensitiveDictionary() + fqdn = None + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + if len([h for h in self._host if "/" in h]) > 1: + raise ConfigurationError("host must not contain multiple MongoDB URIs") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + res = _validate_uri( + entity, + port, + validate=True, + warn=True, + normalize=False, + srv_max_hosts=srv_max_hosts, + ) + is_srv = entity.startswith(SRV_SCHEME) + self._seeds.update(res["nodelist"]) + username = res["username"] or username + password = res["password"] or password + dbase = res["database"] or dbase + opts = res["options"] + fqdn = res["fqdn"] + else: + self._seeds.update(split_hosts(entity, self._port)) + if not self._seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in self._seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + if type_registry is not None: + keyword_opts["type_registry"] = type_registry + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, self._seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", username) + password = opts.get("password", password) + self._options = ClientOptions(username, password, dbase, opts, _IS_SYNC) + + self._default_database_name = dbase + self._lock = _create_lock() + self._kill_cursors_queue: list = [] # type: ignore[type-arg] + + self._encrypter: Optional[_Encrypter] = None + + self._resolve_srv_info.update( + { + "is_srv": is_srv, + "username": username, + "password": password, + "dbase": dbase, + "seeds": self._seeds, + "fqdn": fqdn, + "srv_service_name": srv_service_name, + "pool_class": pool_class, + "monitor_class": monitor_class, + "condition_class": condition_class, + } + ) + + super().__init__( + self._options.codec_options, + self._options.read_preference, + self._options.write_concern, + self._options.read_concern, + ) + + self._init_based_on_options(self._seeds, srv_max_hosts, srv_service_name) + + self._opened = False + self._closed = False + self._loop: Optional[asyncio.AbstractEventLoop] = None + if not is_srv: + self._init_background() + + if _IS_SYNC and connect: + self._get_topology() # type: ignore[unused-coroutine] + + def _resolve_srv(self) -> None: + keyword_opts = self._resolve_srv_info["keyword_opts"] + seeds = set() + opts = common._CaseInsensitiveDictionary() + srv_service_name = keyword_opts.get("srvservicename") + srv_max_hosts = keyword_opts.get("srvmaxhosts") + for entity in self._host: + # A hostname can only include a-z, 0-9, '-' and '.'. If we find a '/' + # it must be a URI, + # https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + if "/" in entity: + # Determine connection timeout from kwargs. + timeout = keyword_opts.get("connecttimeoutms") + if timeout is not None: + timeout = common.validate_timeout_or_none_or_zero( + keyword_opts.cased_key("connecttimeoutms"), timeout + ) + res = uri_parser._parse_srv( + entity, + self._port, + validate=True, + warn=True, + normalize=False, + connect_timeout=timeout, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + ) + seeds.update(res["nodelist"]) + opts = res["options"] + else: + seeds.update(split_hosts(entity, self._port)) + + if not seeds: + raise ConfigurationError("need to specify at least one host") + + for hostname in [node[0] for node in seeds]: + if _detect_external_db(hostname): + break + + # Add options with named keyword arguments to the parsed kwarg options. + tz_aware = keyword_opts["tz_aware"] + connect = keyword_opts["connect"] + if tz_aware is None: + tz_aware = opts.get("tz_aware", False) + if connect is None: + # Default to connect=True unless on a FaaS system, which might use fork. + from pymongo.pool_options import _is_faas + + connect = opts.get("connect", not _is_faas()) + keyword_opts["tz_aware"] = tz_aware + keyword_opts["connect"] = connect + + opts = self._validate_kwargs_and_update_opts(keyword_opts, opts) + + if srv_service_name is None: + srv_service_name = opts.get("srvServiceName", common.SRV_SERVICE_NAME) + + srv_max_hosts = srv_max_hosts or opts.get("srvmaxhosts") + opts = self._normalize_and_validate_options(opts, seeds) + + # Username and password passed as kwargs override user info in URI. + username = opts.get("username", self._resolve_srv_info["username"]) + password = opts.get("password", self._resolve_srv_info["password"]) + self._options = ClientOptions( + username, password, self._resolve_srv_info["dbase"], opts, _IS_SYNC + ) + + self._init_based_on_options(seeds, srv_max_hosts, srv_service_name) + + def _init_based_on_options( + self, seeds: Collection[tuple[str, int]], srv_max_hosts: Any, srv_service_name: Any + ) -> None: + self._event_listeners = self._options.pool_options._event_listeners + self._topology_settings = TopologySettings( + seeds=seeds, + replica_set_name=self._options.replica_set_name, + pool_class=self._resolve_srv_info["pool_class"], + pool_options=self._options.pool_options, + monitor_class=self._resolve_srv_info["monitor_class"], + condition_class=self._resolve_srv_info["condition_class"], + local_threshold_ms=self._options.local_threshold_ms, + server_selection_timeout=self._options.server_selection_timeout, + server_selector=self._options.server_selector, + heartbeat_frequency=self._options.heartbeat_frequency, + fqdn=self._resolve_srv_info["fqdn"], + direct_connection=self._options.direct_connection, + load_balanced=self._options.load_balanced, + srv_service_name=srv_service_name, + srv_max_hosts=srv_max_hosts, + server_monitoring_mode=self._options.server_monitoring_mode, + topology_id=self._topology_settings._topology_id if self._topology_settings else None, + ) + if self._options.auto_encryption_opts: + from pymongo.synchronous.encryption import _Encrypter + + self._encrypter = _Encrypter(self, self._options.auto_encryption_opts) + self._timeout = self._options.timeout + + def _normalize_and_validate_options( + self, opts: common._CaseInsensitiveDictionary, seeds: set[tuple[str, int | None]] + ) -> common._CaseInsensitiveDictionary: + # Handle security-option conflicts in combined options. + opts = _handle_security_options(opts) + # Normalize combined options. + opts = _normalize_options(opts) + _check_options(seeds, opts) + return opts + + def _validate_kwargs_and_update_opts( + self, + keyword_opts: common._CaseInsensitiveDictionary, + opts: common._CaseInsensitiveDictionary, + ) -> common._CaseInsensitiveDictionary: + # Handle deprecated options in kwarg options. + keyword_opts = _handle_option_deprecations(keyword_opts) + # Validate kwarg options. + keyword_opts = common._CaseInsensitiveDictionary( + dict(common.validate(keyword_opts.cased_key(k), v) for k, v in keyword_opts.items()) + ) + # Override connection string options with kwarg options. + opts.update(keyword_opts) + return opts + + def _connect(self) -> None: + """Explicitly connect to MongoDB synchronously instead of on the first operation.""" + self._get_topology() + + def _init_background(self, old_pid: Optional[int] = None) -> None: + self._topology = Topology(self._topology_settings) + if _HAS_REGISTER_AT_FORK: + # Add this client to the list of weakly referenced items. + # This will be used later if we fork. + MongoClient._clients[self._topology._topology_id] = self + # Seed the topology with the old one's pid so we can detect clients + # that are opened before a fork and used after. + self._topology._pid = old_pid + + def target() -> bool: + client = self_ref() + if client is None: + return False # Stop the executor. + MongoClient._process_periodic_tasks(client) + return True + + executor = periodic_executor.PeriodicExecutor( + interval=common.KILL_CURSOR_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_kill_cursors_thread", + ) + + # We strongly reference the executor and it weakly references us via + # this closure. When the client is freed, stop the executor soon. + self_ref: Any = weakref.ref(self, executor.close) + self._kill_cursors_executor = executor + self._opened = False + + def append_metadata(self, driver_info: DriverInfo) -> None: + """Appends the given metadata to existing driver metadata. + + :param driver_info: a :class:`~pymongo.driver_info.DriverInfo` + + .. versionadded:: 4.14 + """ + + if not isinstance(driver_info, DriverInfo): + raise TypeError( + f"driver_info must be an instance of DriverInfo, not {type(driver_info)}" + ) + self._options.pool_options._update_metadata(driver_info) + + def _should_pin_cursor(self, session: Optional[ClientSession]) -> Optional[bool]: + return self._options.load_balanced and not (session and session.in_transaction) + + def _after_fork(self) -> None: + """Resets topology in a child after successfully forking.""" + self._init_background(self._topology._pid) + # Reset the session pool to avoid duplicate sessions in the child process. + self._topology._session_pool.reset() + + def _duplicate(self, **kwargs: Any) -> MongoClient: # type: ignore[type-arg] + args = self._init_kwargs.copy() + args.update(kwargs) + return MongoClient(**args) + + def watch( + self, + pipeline: Optional[_Pipeline] = None, + full_document: Optional[str] = None, + resume_after: Optional[Mapping[str, Any]] = None, + max_await_time_ms: Optional[int] = None, + batch_size: Optional[int] = None, + collation: Optional[_CollationIn] = None, + start_at_operation_time: Optional[Timestamp] = None, + session: Optional[client_session.ClientSession] = None, + start_after: Optional[Mapping[str, Any]] = None, + comment: Optional[Any] = None, + full_document_before_change: Optional[str] = None, + show_expanded_events: Optional[bool] = None, + ) -> ChangeStream[_DocumentType]: + """Watch changes on this cluster. + + Performs an aggregation with an implicit initial ``$changeStream`` + stage and returns a + :class:`~pymongo.change_stream.ClusterChangeStream` cursor which + iterates over changes on all databases on this cluster. + + Introduced in MongoDB 4.0. + + .. code-block:: python + + with client.watch() as stream: + for change in stream: + print(change) + + The :class:`~pymongo.change_stream.ClusterChangeStream` iterable + blocks until the next change document is returned or an error is + raised. If the + :meth:`~pymongo.change_stream.ClusterChangeStream.next` method + encounters a network error when retrieving a batch from the server, + it will automatically attempt to recreate the cursor such that no + change events are missed. Any error encountered during the resume + attempt indicates there may be an outage and will be raised. + + .. code-block:: python + + try: + with client.watch([{"$match": {"operationType": "insert"}}]) as stream: + for insert_change in stream: + print(insert_change) + except pymongo.errors.PyMongoError: + # The ChangeStream encountered an unrecoverable error or the + # resume attempt failed to recreate the cursor. + logging.error("...") + + For a precise description of the resume process see the + `change streams specification`_. + + :param pipeline: A list of aggregation pipeline stages to + append to an initial ``$changeStream`` stage. Not all + pipeline stages are valid after a ``$changeStream`` stage, see the + MongoDB documentation on change streams for the supported stages. + :param full_document: The fullDocument to pass as an option + to the ``$changeStream`` stage. Allowed values: 'updateLookup', + 'whenAvailable', 'required'. When set to 'updateLookup', the + change notification for partial updates will include both a delta + describing the changes to the document, as well as a copy of the + entire document that was changed from some time after the change + occurred. + :param full_document_before_change: Allowed values: 'whenAvailable' + and 'required'. Change events may now result in a + 'fullDocumentBeforeChange' response field. + :param resume_after: A resume token. If provided, the + change stream will start returning changes that occur directly + after the operation specified in the resume token. A resume token + is the _id value of a change document. + :param max_await_time_ms: The maximum time in milliseconds + for the server to wait for changes before responding to a getMore + operation. + :param batch_size: The maximum number of documents to return + per batch. + :param collation: The :class:`~pymongo.collation.Collation` + to use for the aggregation. + :param start_at_operation_time: If provided, the resulting + change stream will only return changes that occurred at or after + the specified :class:`~bson.timestamp.Timestamp`. Requires + MongoDB >= 4.0. + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param start_after: The same as `resume_after` except that + `start_after` can resume notifications after an invalidate event. + This option and `resume_after` are mutually exclusive. + :param comment: A user-provided comment to attach to this + command. + :param show_expanded_events: Include expanded events such as DDL events like `dropIndexes`. + + :return: A :class:`~pymongo.change_stream.ClusterChangeStream` cursor. + + .. versionchanged:: 4.3 + Added `show_expanded_events` parameter. + + .. versionchanged:: 4.2 + Added ``full_document_before_change`` parameter. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.9 + Added the ``start_after`` parameter. + + .. versionadded:: 3.7 + + .. seealso:: The MongoDB documentation on `changeStreams `_. + + .. _change streams specification: + https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.md + """ + change_stream = ClusterChangeStream( + self.admin, + pipeline, + full_document, + resume_after, + max_await_time_ms, + batch_size, + collation, + start_at_operation_time, + session, + start_after, + comment, + full_document_before_change, + show_expanded_events=show_expanded_events, + ) + + change_stream._initialize_cursor() + return change_stream + + @property + def topology_description(self) -> TopologyDescription: + """The description of the connected MongoDB deployment. + + >>> client.topology_description + , , ]> + >>> client.topology_description.topology_type_name + 'ReplicaSetWithPrimary' + + Note that the description is periodically updated in the background + but the returned object itself is immutable. Access this property again + to get a more recent + :class:`~pymongo.topology_description.TopologyDescription`. + + :return: An instance of + :class:`~pymongo.topology_description.TopologyDescription`. + + .. versionadded:: 4.0 + """ + if self._topology is None: + servers = {(host, port): ServerDescription((host, port)) for host, port in self._seeds} + return TopologyDescription( + TOPOLOGY_TYPE.Unknown, + servers, + None, + None, + None, + self._topology_settings, + ) + return self._topology.description + + @property + def nodes(self) -> FrozenSet[_Address]: + """Set of all currently connected servers. + + .. warning:: When connected to a replica set the value of :attr:`nodes` + can change over time as :class:`MongoClient`'s view of the replica + set changes. :attr:`nodes` can also be an empty set when + :class:`MongoClient` is first instantiated and hasn't yet connected + to any servers, or a network partition causes it to lose connection + to all servers. + """ + if self._topology is None: + return frozenset() + description = self._topology.description + return frozenset(s.address for s in description.known_servers) + + @property + def options(self) -> ClientOptions: + """The configuration options for this client. + + :return: An instance of :class:`~pymongo.client_options.ClientOptions`. + + .. versionadded:: 4.0 + """ + return self._options + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + return ( + tuple(sorted(self._resolve_srv_info["seeds"])), + self._options.replica_set_name, + self._resolve_srv_info["fqdn"], + self._resolve_srv_info["srv_service_name"], + ) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.eq_props()) + + def _repr_helper(self) -> str: + def option_repr(option: str, value: Any) -> str: + """Fix options whose __repr__ isn't usable in a constructor.""" + if option == "document_class": + if value is dict: + return "document_class=dict" + else: + return f"document_class={value.__module__}.{value.__name__}" + if option in common.TIMEOUT_OPTIONS and value is not None: + return f"{option}={int(value * 1000)}" + + return f"{option}={value!r}" + + # Host first... + if self._topology is None: + options = [f"host='mongodb+srv://{self._resolve_srv_info['fqdn']}'"] + else: + options = [ + "host=%r" + % [ + "%s:%d" % (host, port) if port is not None else host + for host, port in self._topology_settings.seeds + ] + ] + # ... then everything in self._constructor_args... + options.extend( + option_repr(key, self._options._options[key]) for key in self._constructor_args + ) + # ... then everything else. + options.extend( + option_repr(key, self._options._options[key]) + for key in self._options._options + if key not in set(self._constructor_args) and key != "username" and key != "password" + ) + return ", ".join(options) + + def __repr__(self) -> str: + return f"{type(self).__name__}({self._repr_helper()})" + + def __getattr__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + if name.startswith("_"): + raise AttributeError( + f"{type(self).__name__} has no attribute {name!r}. To access the {name}" + f" database, use client[{name!r}]." + ) + return self.__getitem__(name) + + def __getitem__(self, name: str) -> database.Database[_DocumentType]: + """Get a database by name. + + Raises :class:`~pymongo.errors.InvalidName` if an invalid + database name is used. + + :param name: the name of the database to get + """ + return database.Database(self, name) + + def __del__(self) -> None: + """Check that this MongoClient has been closed and issue a warning if not.""" + try: + if self._opened and not self._closed: + warnings.warn( + ( + f"Unclosed {type(self).__name__} opened at:\n{self._topology_settings._stack}" + f"Call {type(self).__name__}.close() to safely shut down your client and free up resources." + ), + ResourceWarning, + stacklevel=2, + ) + except (AttributeError, TypeError): + # Ignore errors at interpreter exit. + pass + + def _close_cursor_soon( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Request that a cursor and/or connection be cleaned up soon.""" + self._kill_cursors_queue.append((address, cursor_id, conn_mgr)) + + def _start_session(self, implicit: bool, **kwargs: Any) -> ClientSession: + server_session = _EmptyServerSession() + opts = client_session.SessionOptions(**kwargs) + return client_session.ClientSession(self, server_session, opts, implicit) + + def start_session( + self, + causal_consistency: Optional[bool] = None, + default_transaction_options: Optional[client_session.TransactionOptions] = None, + snapshot: Optional[bool] = False, + ) -> client_session.ClientSession: + """Start a logical session. + + This method takes the same parameters as + :class:`~pymongo.client_session.SessionOptions`. See the + :mod:`~pymongo.client_session` module for details and examples. + + A :class:`~pymongo.client_session.ClientSession` may only be used with + the MongoClient that started it. :class:`ClientSession` instances are + **not thread-safe or fork-safe**. They can only be used by one thread + or process at a time. A single :class:`ClientSession` cannot be used + to run multiple operations concurrently. + + :return: An instance of :class:`~pymongo.client_session.ClientSession`. + + .. versionadded:: 3.6 + """ + return self._start_session( + False, + causal_consistency=causal_consistency, + default_transaction_options=default_transaction_options, + snapshot=snapshot, + ) + + def _ensure_session(self, session: Optional[ClientSession] = None) -> Optional[ClientSession]: + """If provided session is None, lend a temporary session.""" + if session: + return session + + try: + # Don't make implicit sessions causally consistent. Applications + # should always opt-in. + return self._start_session(True, causal_consistency=False) + except (ConfigurationError, InvalidOperation): + # Sessions not supported. + return None + + def _send_cluster_time( + self, command: MutableMapping[str, Any], session: Optional[ClientSession] + ) -> None: + topology_time = self._topology.max_cluster_time() + session_time = session.cluster_time if session else None + if topology_time and session_time: + if topology_time["clusterTime"] > session_time["clusterTime"]: + cluster_time: Optional[ClusterTime] = topology_time + else: + cluster_time = session_time + else: + cluster_time = topology_time or session_time + if cluster_time: + command["$clusterTime"] = cluster_time + + def get_default_database( + self, + default: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get the database named in the MongoDB connection URI. + + >>> uri = 'mongodb://host/my_database' + >>> client = MongoClient(uri) + >>> db = client.get_default_database() + >>> assert db.name == 'my_database' + >>> db = client.get_database() + >>> assert db.name == 'my_database' + + Useful in scripts where you want to choose which database to use + based only on the URI in a configuration file. + + :param default: the database name to use if no database name + was provided in the URI. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.8 + Undeprecated. Added the ``default``, ``codec_options``, + ``read_preference``, ``write_concern`` and ``read_concern`` + parameters. + + .. versionchanged:: 3.5 + Deprecated, use :meth:`get_database` instead. + """ + if self._default_database_name is None and default is None: + raise ConfigurationError("No default database name defined or provided.") + + name = cast(str, self._default_database_name or default) + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def get_database( + self, + name: Optional[str] = None, + codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, + read_preference: Optional[_ServerMode] = None, + write_concern: Optional[WriteConcern] = None, + read_concern: Optional[ReadConcern] = None, + ) -> database.Database[_DocumentType]: + """Get a :class:`~pymongo.database.Database` with the given name and + options. + + Useful for creating a :class:`~pymongo.database.Database` with + different codec options, read preference, and/or write concern from + this :class:`MongoClient`. + + >>> client.read_preference + Primary() + >>> db1 = client.test + >>> db1.read_preference + Primary() + >>> from pymongo import ReadPreference + >>> db2 = client.get_database( + ... 'test', read_preference=ReadPreference.SECONDARY) + >>> db2.read_preference + Secondary(tag_sets=None) + + :param name: The name of the database - a string. If ``None`` + (the default) the database named in the MongoDB connection URI is + returned. + :param codec_options: An instance of + :class:`~bson.codec_options.CodecOptions`. If ``None`` (the + default) the :attr:`codec_options` of this :class:`MongoClient` is + used. + :param read_preference: The read preference to use. If + ``None`` (the default) the :attr:`read_preference` of this + :class:`MongoClient` is used. See :mod:`~pymongo.read_preferences` + for options. + :param write_concern: An instance of + :class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the + default) the :attr:`write_concern` of this :class:`MongoClient` is + used. + :param read_concern: An instance of + :class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the + default) the :attr:`read_concern` of this :class:`MongoClient` is + used. + + .. versionchanged:: 3.5 + The `name` parameter is now optional, defaulting to the database + named in the MongoDB connection URI. + """ + if name is None: + if self._default_database_name is None: + raise ConfigurationError("No default database defined") + name = self._default_database_name + + return database.Database( + self, name, codec_options, read_preference, write_concern, read_concern + ) + + def _database_default_options(self, name: str) -> database.Database: # type: ignore[type-arg] + """Get a Database instance with the default settings.""" + return self.get_database( + name, + codec_options=DEFAULT_CODEC_OPTIONS, + read_preference=ReadPreference.PRIMARY, + write_concern=DEFAULT_WRITE_CONCERN, + ) + + def __enter__(self) -> MongoClient[_DocumentType]: + return self + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + self.close() + + # See PYTHON-3084. + __iter__ = None + + def __next__(self) -> NoReturn: + raise TypeError("'MongoClient' object is not iterable") + + next = __next__ + + def _server_property(self, attr_name: str) -> Any: + """An attribute of the current server's description. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + Not threadsafe if used multiple times in a single method, since + the server may change. In such cases, store a local reference to a + ServerDescription first, then use its properties. + """ + server = (self._get_topology()).select_server(writable_server_selector, _Op.TEST) + + return getattr(server.description, attr_name) + + @property + def address(self) -> Optional[tuple[str, int]]: + """(host, port) of the current standalone, primary, or mongos, or None. + + Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if + the client is load-balancing among mongoses, since there is no single + address. Use :attr:`nodes` instead. + + If the client is not connected, this will block until a connection is + established or raise ServerSelectionTimeoutError if no server is + available. + + .. versionadded:: 3.0 + """ + if self._topology is None: + self._get_topology() + topology_type = self._topology._description.topology_type + if ( + topology_type == TOPOLOGY_TYPE.Sharded + and len(self.topology_description.server_descriptions()) > 1 + ): + raise InvalidOperation( + 'Cannot use "address" property when load balancing among' + ' mongoses, use "nodes" instead.' + ) + return self._server_property("address") + + @property + def primary(self) -> Optional[tuple[str, int]]: + """The (host, port) of the current primary of the replica set. + + Returns ``None`` if this client is not connected to a replica set, + there is no primary, or this client was created without the + `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_primary() # type: ignore[return-value] + + @property + def secondaries(self) -> set[_Address]: + """The secondary members known to this client. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no visible secondaries, or this + client was created without the `replicaSet` option. + + .. versionadded:: 3.0 + MongoClient gained this property in version 3.0. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_secondaries() + + @property + def arbiters(self) -> set[_Address]: + """Arbiters in the replica set. + + A sequence of (host, port) pairs. Empty if this client is not + connected to a replica set, there are no arbiters, or this client was + created without the `replicaSet` option. + """ + if self._topology is None: + self._get_topology() + return self._topology.get_arbiters() + + @property + def is_primary(self) -> bool: + """If this client is connected to a server that can accept writes. + + True if the current server is a standalone, mongos, or the primary of + a replica set. If the client is not connected, this will block until a + connection is established or raise ServerSelectionTimeoutError if no + server is available. + """ + return self._server_property("is_writable") + + @property + def is_mongos(self) -> bool: + """If this client is connected to mongos. If the client is not + connected, this will block until a connection is established or raise + ServerSelectionTimeoutError if no server is available. + """ + return self._server_property("server_type") == SERVER_TYPE.Mongos + + def _end_sessions(self, session_ids: list[_ServerSession]) -> None: + """Send endSessions command(s) with the given session ids.""" + try: + # Use Connection.command directly to avoid implicitly creating + # another session. + with self._conn_for_reads( + ReadPreference.PRIMARY_PREFERRED, None, operation=_Op.END_SESSIONS + ) as ( + conn, + read_pref, + ): + if not conn.supports_sessions: + return + + for i in range(0, len(session_ids), common._MAX_END_SESSIONS): + spec = {"endSessions": session_ids[i : i + common._MAX_END_SESSIONS]} + conn.command("admin", spec, read_preference=read_pref, client=self) + except PyMongoError: + # Drivers MUST ignore any errors returned by the endSessions + # command. + pass + + def close(self) -> None: + """Cleanup client resources and disconnect from MongoDB. + + End all server sessions created by this client by sending one or more + endSessions commands. + + Close all sockets in the connection pools and stop the monitor threads. + + .. versionchanged:: 4.0 + Once closed, the client cannot be used again and any attempt will + raise :exc:`~pymongo.errors.InvalidOperation`. + + .. versionchanged:: 3.6 + End all server sessions created by this client. + """ + if self._topology is None: + return + session_ids = self._topology.pop_all_sessions() + if session_ids: + self._end_sessions(session_ids) + # Stop the periodic task thread and then send pending killCursor + # requests before closing the topology. + self._kill_cursors_executor.close() + self._process_kill_cursors() + self._topology.close() + if self._encrypter: + # TODO: PYTHON-1921 Encrypted MongoClients cannot be re-opened. + self._encrypter.close() + self._closed = True + if not _IS_SYNC: + asyncio.gather( + self._topology.cleanup_monitors(), # type: ignore[func-returns-value] + self._kill_cursors_executor.join(), # type: ignore[func-returns-value] + return_exceptions=True, + ) + + if not _IS_SYNC: + # Add support for contextlib.closing. + close = close + + def _get_topology(self) -> Topology: + """Get the internal :class:`~pymongo.topology.Topology` object. + + If this client was created with "connect=False", calling _get_topology + launches the connection process in the background. + """ + if not _IS_SYNC: + if self._loop is None: + self._loop = asyncio.get_running_loop() + elif self._loop != asyncio.get_running_loop(): + raise RuntimeError( + "Cannot use MongoClient in different event loop. MongoClient uses low-level asyncio APIs that bind it to the event loop it was created on." + ) + if not self._opened: + if self._resolve_srv_info["is_srv"]: + self._resolve_srv() + self._init_background() + self._topology.open() + with self._lock: + self._kill_cursors_executor.open() + self._opened = True + return self._topology + + @contextlib.contextmanager + def _checkout( + self, server: Server, session: Optional[ClientSession] + ) -> Generator[Connection, None]: + in_txn = session and session.in_transaction + with _MongoClientErrorHandler(self, server, session) as err_handler: + # Reuse the pinned connection, if it exists. + if in_txn and session and session._pinned_connection: + err_handler.contribute_socket(session._pinned_connection) + yield session._pinned_connection + return + with server.checkout(handler=err_handler) as conn: + # Pin this session to the selected server or connection. + if ( + in_txn + and session + and server.description.server_type + in ( + SERVER_TYPE.Mongos, + SERVER_TYPE.LoadBalancer, + ) + ): + session._pin(server, conn) + err_handler.contribute_socket(conn) + if ( + self._encrypter + and not self._encrypter._bypass_auto_encryption + and conn.max_wire_version < 8 + ): + raise ConfigurationError( + "Auto-encryption requires a minimum MongoDB version of 4.2" + ) + yield conn + + def _select_server( + self, + server_selector: Callable[[Selection], Selection], + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Select a server to run an operation on this client. + + :Parameters: + - `server_selector`: The server selector to use if the session is + not pinned and no address is given. + - `session`: The ClientSession for the next operation, or None. May + be pinned to a mongos server address. + - `address` (optional): Address when sending a message + to a specific server, used for getMore. + """ + try: + topology = self._get_topology() + if session and not session.in_transaction: + session._transaction.reset() + if not address and session: + address = session._pinned_address + if address: + # We're running a getMore or this session is pinned to a mongos. + server = topology.select_server_by_address( + address, operation, operation_id=operation_id + ) + if not server: + raise AutoReconnect("server %s:%s no longer available" % address) # noqa: UP031 + else: + server = topology.select_server( + server_selector, + operation, + deprioritized_servers=deprioritized_servers, + operation_id=operation_id, + ) + return server + except PyMongoError as exc: + # Server selection errors in a transaction are transient. + if session and session.in_transaction: + exc._add_error_label("TransientTransactionError") + session._unpin() + raise + + def _conn_for_writes( + self, session: Optional[ClientSession], operation: str + ) -> ContextManager[Connection]: + server = self._select_server(writable_server_selector, session, operation) + return self._checkout(server, session) + + @contextlib.contextmanager + def _conn_from_server( + self, read_preference: _ServerMode, server: Server, session: Optional[ClientSession] + ) -> Generator[tuple[Connection, _ServerMode], None]: + assert read_preference is not None, "read_preference must not be None" + # Get a connection for a server matching the read preference, and yield + # conn with the effective read preference. The Server Selection + # Spec says not to send any $readPreference to standalones and to + # always send primaryPreferred when directly connected to a repl set + # member. + # Thread safe: if the type is single it cannot change. + # NOTE: We already opened the Topology when selecting a server so there's no need + # to call _get_topology() again. + single = self._topology.description.topology_type == TOPOLOGY_TYPE.Single + with self._checkout(server, session) as conn: + if single: + if conn.is_repl and not (session and session.in_transaction): + # Use primary preferred to ensure any repl set member + # can handle the request. + read_preference = ReadPreference.PRIMARY_PREFERRED + elif conn.is_standalone: + # Don't send read preference to standalones. + read_preference = ReadPreference.PRIMARY + yield conn, read_preference + + def _conn_for_reads( + self, + read_preference: _ServerMode, + session: Optional[ClientSession], + operation: str, + ) -> ContextManager[tuple[Connection, _ServerMode]]: + assert read_preference is not None, "read_preference must not be None" + server = self._select_server(read_preference, session, operation) + return self._conn_from_server(read_preference, server, session) + + @_csot.apply + def _run_operation( + self, + operation: Union[_Query, _GetMore], + unpack_res: Callable, # type: ignore[type-arg] + address: Optional[_Address] = None, + ) -> Response: + """Run a _Query/_GetMore operation and return a Response. + + :param operation: a _Query or _GetMore object. + :param unpack_res: A callable that decodes the wire protocol response. + :param address: Optional address when sending a message + to a specific server, used for getMore. + """ + if operation.conn_mgr: + server = self._select_server( + operation.read_preference, + operation.session, # type: ignore[arg-type] + operation.name, + address=address, + ) + + with operation.conn_mgr._lock: + with _MongoClientErrorHandler(self, server, operation.session) as err_handler: # type: ignore[arg-type] + err_handler.contribute_socket(operation.conn_mgr.conn) + return server.run_operation( + operation.conn_mgr.conn, + operation, + operation.read_preference, + self._event_listeners, + unpack_res, + self, + ) + + def _cmd( + _session: Optional[ClientSession], + server: Server, + conn: Connection, + read_preference: _ServerMode, + ) -> Response: + operation.reset() # Reset op in case of retry. + return server.run_operation( + conn, + operation, + read_preference, + self._event_listeners, + unpack_res, + self, + ) + + return self._retryable_read( + _cmd, + operation.read_preference, + operation.session, # type: ignore[arg-type] + address=address, + retryable=isinstance(operation, _Query), + operation=operation.name, + ) + + def _retry_with_session( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with at most one consecutive retries + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + """ + # Ensure that the options supports retry_writes and there is a valid session not in + # transaction, otherwise, we will not support retry behavior for this txn. + retryable = bool( + retryable and self.options.retry_writes and session and not session.in_transaction + ) + return self._retry_internal( + func=func, + session=session, + bulk=bulk, + operation=operation, + retryable=retryable, + operation_id=operation_id, + ) + + @_csot.apply + def _retry_internal( + self, + func: _WriteCall[T] | _ReadCall[T], + session: Optional[ClientSession], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + is_read: bool = False, + address: Optional[_Address] = None, + read_pref: Optional[_ServerMode] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ) -> T: + """Internal retryable helper for all client transactions. + + :param func: Callback function we want to retry + :param session: Client Session on which the transaction should occur + :param bulk: Abstraction to handle bulk write operations + :param operation: The name of the operation that the server is being selected for + :param is_read: If this is an exclusive read transaction, defaults to False + :param address: Server Address, defaults to None + :param read_pref: Topology of read operation, defaults to None + :param retryable: If the operation should be retried once, defaults to None + + :return: Output of the calling func() + """ + return _ClientConnectionRetryable( + mongo_client=self, + func=func, + bulk=bulk, + operation=operation, + is_read=is_read, + session=session, + read_pref=read_pref, + address=address, + retryable=retryable, + operation_id=operation_id, + ).run() + + def _retryable_read( + self, + func: _ReadCall[T], + read_pref: _ServerMode, + session: Optional[ClientSession], + operation: str, + address: Optional[_Address] = None, + retryable: bool = True, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param func: Read call we want to execute + :param read_pref: Desired topology of read operation + :param session: Client session we should use to execute operation + :param operation: The name of the operation that the server is being selected for + :param address: Optional address when sending a message, defaults to None + :param retryable: if we should attempt retries + (may not always be supported even if supplied), defaults to False + """ + + # Ensure that the client supports retrying on reads and there is no session in + # transaction, otherwise, we will not support retry behavior for this call. + retryable = bool( + retryable and self.options.retry_reads and not (session and session.in_transaction) + ) + with self._tmp_session(session) as s: + return self._retry_internal( + func, + s, + None, + operation, + is_read=True, + address=address, + read_pref=read_pref, + retryable=retryable, + operation_id=operation_id, + ) + + def _retryable_write( + self, + retryable: bool, + func: _WriteCall[T], + session: Optional[ClientSession], + operation: str, + bulk: Optional[Union[_Bulk, _ClientBulk]] = None, + operation_id: Optional[int] = None, + ) -> T: + """Execute an operation with consecutive retries if possible + + Returns func()'s return value on success. On error retries the same + command. + + Re-raises any exception thrown by func(). + + :param retryable: if we should attempt retries (may not always be supported) + :param func: write call we want to execute during a session + :param session: Client session we will use to execute write operation + :param operation: The name of the operation that the server is being selected for + :param bulk: bulk abstraction to execute operations in bulk, defaults to None + """ + with self._tmp_session(session) as s: + return self._retry_with_session(retryable, func, s, bulk, operation, operation_id) + + def _cleanup_cursor_no_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + ) -> None: + """Cleanup a cursor from __del__ without locking. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection attached at the time the cursor + was garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + """ + # The cursor will be closed later in a different session. + if cursor_id or conn_mgr: + self._close_cursor_soon(cursor_id, address, conn_mgr) + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + def _cleanup_cursor_lock( + self, + cursor_id: int, + address: Optional[_CursorAddress], + conn_mgr: _ConnectionManager, + session: Optional[ClientSession], + ) -> None: + """Cleanup a cursor from cursor.close() using a lock. + + This method handles cleanup for Cursors/CommandCursors including any + pinned connection or implicit session attached at the time the cursor + was closed or garbage collected. + + :param cursor_id: The cursor id which may be 0. + :param address: The _CursorAddress. + :param conn_mgr: The _ConnectionManager for the pinned connection or None. + :param session: The cursor's session. + """ + if cursor_id: + if conn_mgr and conn_mgr.more_to_come: + # If this is an exhaust cursor and we haven't completely + # exhausted the result set we *must* close the socket + # to stop the server from sending more data. + assert conn_mgr.conn is not None + conn_mgr.conn.close_conn(ConnectionClosedReason.ERROR) + else: + self._close_cursor_now(cursor_id, address, session=session, conn_mgr=conn_mgr) + if conn_mgr: + conn_mgr.close() + if session and session._implicit and not session._leave_alive: + session._end_implicit_session() + + def _close_cursor_now( + self, + cursor_id: int, + address: Optional[_CursorAddress], + session: Optional[ClientSession] = None, + conn_mgr: Optional[_ConnectionManager] = None, + ) -> None: + """Send a kill cursors message with the given id. + + The cursor is closed synchronously on the current thread. + """ + if not isinstance(cursor_id, int): + raise TypeError(f"cursor_id must be an instance of int, not {type(cursor_id)}") + + try: + if conn_mgr: + with conn_mgr._lock: + # Cursor is pinned to LB outside of a transaction. + assert address is not None + assert conn_mgr.conn is not None + self._kill_cursor_impl([cursor_id], address, session, conn_mgr.conn) + else: + self._kill_cursors([cursor_id], address, self._get_topology(), session) + except PyMongoError: + # Make another attempt to kill the cursor later. + self._close_cursor_soon(cursor_id, address) + + def _kill_cursors( + self, + cursor_ids: Sequence[int], + address: Optional[_CursorAddress], + topology: Topology, + session: Optional[ClientSession], + ) -> None: + """Send a kill cursors message with the given ids.""" + if address: + # address could be a tuple or _CursorAddress, but + # select_server_by_address needs (host, port). + server = topology.select_server_by_address(tuple(address), _Op.KILL_CURSORS) # type: ignore[arg-type] + else: + # Application called close_cursor() with no address. + server = topology.select_server(writable_server_selector, _Op.KILL_CURSORS) + + with self._checkout(server, session) as conn: + assert address is not None + self._kill_cursor_impl(cursor_ids, address, session, conn) + + def _kill_cursor_impl( + self, + cursor_ids: Sequence[int], + address: _CursorAddress, + session: Optional[ClientSession], + conn: Connection, + ) -> None: + namespace = address.namespace + db, coll = namespace.split(".", 1) + spec = {"killCursors": coll, "cursors": cursor_ids} + conn.command(db, spec, session=session, client=self) + + def _process_kill_cursors(self) -> None: + """Process any pending kill cursors requests.""" + address_to_cursor_ids = defaultdict(list) + pinned_cursors = [] + + # Other threads or the GC may append to the queue concurrently. + while True: + try: + address, cursor_id, conn_mgr = self._kill_cursors_queue.pop() + except IndexError: + break + + if conn_mgr: + pinned_cursors.append((address, cursor_id, conn_mgr)) + else: + address_to_cursor_ids[address].append(cursor_id) + + for address, cursor_id, conn_mgr in pinned_cursors: + try: + self._cleanup_cursor_lock(cursor_id, address, conn_mgr, None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + # Raise the exception when client is closed so that it + # can be caught in _process_periodic_tasks + raise + else: + _log_client_error() + + # Don't re-open topology if it's closed and there's no pending cursors. + if address_to_cursor_ids: + topology = self._get_topology() + for address, cursor_ids in address_to_cursor_ids.items(): + try: + self._kill_cursors(cursor_ids, address, topology, session=None) + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + raise + else: + _log_client_error() + + # This method is run periodically by a background thread. + def _process_periodic_tasks(self) -> None: + """Process any pending kill cursors requests and + maintain connection pool parameters. + """ + try: + self._process_kill_cursors() + self._topology.update_pool() + except Exception as exc: + if isinstance(exc, InvalidOperation) and self._topology._closed: + return + else: + _log_client_error() + + def _return_server_session( + self, server_session: Union[_ServerSession, _EmptyServerSession] + ) -> None: + """Internal: return a _ServerSession to the pool.""" + if isinstance(server_session, _EmptyServerSession): + return None + return self._topology.return_server_session(server_session) + + @contextlib.contextmanager + def _tmp_session( + self, session: Optional[client_session.ClientSession] + ) -> Generator[Optional[client_session.ClientSession], None]: + """If provided session is None, lend a temporary session.""" + if session is not None: + if not isinstance(session, client_session.ClientSession): + raise ValueError( + f"'session' argument must be a ClientSession or None, not {type(session)}" + ) + # Don't call end_session. + yield session + return + + s = self._ensure_session(session) + if s: + try: + yield s + except Exception as exc: + if isinstance(exc, ConnectionFailure): + s._server_session.mark_dirty() + + # Always call end_session on error. + s.end_session() + raise + finally: + # Call end_session when we exit this scope. + if not s._attached_to_cursor: + s.end_session() + else: + yield None + + def _process_response(self, reply: Mapping[str, Any], session: Optional[ClientSession]) -> None: + self._topology.receive_cluster_time(reply.get("$clusterTime")) + if session is not None: + session._process_response(reply) + + def server_info(self, session: Optional[client_session.ClientSession] = None) -> dict[str, Any]: + """Get information about the MongoDB server we're connected to. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + """ + return cast( # type: ignore[redundant-cast] + dict[str, Any], + self.admin.command( + "buildinfo", read_preference=ReadPreference.PRIMARY, session=session + ), + ) + + def _list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + cmd = {"listDatabases": 1} + cmd.update(kwargs) + if comment is not None: + cmd["comment"] = comment + admin = self._database_default_options("admin") + res = admin._retryable_read_command(cmd, session=session, operation=_Op.LIST_DATABASES) + # listDatabases doesn't return a cursor (yet). Fake one. + cursor = { + "id": 0, + "firstBatch": res["databases"], + "ns": "admin.$cmd", + } + return CommandCursor(admin["$cmd"], cursor, None, comment=comment) + + def list_databases( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + **kwargs: Any, + ) -> CommandCursor[dict[str, Any]]: + """Get a cursor over the databases of the connected server. + + Cursors are closed automatically when they are exhausted (the last batch of data is retrieved from the database). + If a cursor is not exhausted, it will be closed automatically upon garbage collection, which leaves resources open but unused for a potentially long period of time. + To avoid this, best practice is to call :meth:`Cursor.close` when the cursor is no longer needed, + or use the cursor in a with statement:: + + with client.list_databases() as cursor: + for database in cursor: + print(database) + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + :param kwargs: Optional parameters of the + `listDatabases command + `_ + can be passed as keyword arguments to this method. The supported + options differ by server version. + + + :return: An instance of :class:`~pymongo.command_cursor.CommandCursor`. + + .. versionadded:: 3.6 + """ + return self._list_databases(session, comment, **kwargs) + + def list_database_names( + self, + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> list[str]: + """Get a list of the names of all databases on the connected server. + + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionadded:: 3.6 + """ + res = self._list_databases(session, nameOnly=True, comment=comment) + return [doc["name"] for doc in res] + + @_csot.apply + def drop_database( + self, + name_or_database: Union[str, database.Database[_DocumentTypeArg]], + session: Optional[client_session.ClientSession] = None, + comment: Optional[Any] = None, + ) -> None: + """Drop a database. + + Raises :class:`TypeError` if `name_or_database` is not an instance of + :class:`str` or :class:`~pymongo.database.Database`. + + :param name_or_database: the name of a database to drop, or a + :class:`~pymongo.database.Database` instance representing the + database to drop + :param session: a + :class:`~pymongo.client_session.ClientSession`. + :param comment: A user-provided comment to attach to this + command. + + .. versionchanged:: 4.1 + Added ``comment`` parameter. + + .. versionchanged:: 3.6 + Added ``session`` parameter. + + .. note:: The :attr:`~pymongo.mongo_client.MongoClient.write_concern` of + this client is automatically applied to this operation. + + .. versionchanged:: 3.4 + Apply this client's write concern automatically to this operation + when connected to MongoDB >= 3.4. + + """ + name = name_or_database + if isinstance(name, database.Database): + name = name.name + + if not isinstance(name, str): + raise TypeError( + f"name_or_database must be an instance of str or a Database, not {type(name)}" + ) + + with self._conn_for_writes(session, operation=_Op.DROP_DATABASE) as conn: + self[name]._command( + conn, + {"dropDatabase": 1, "comment": comment}, + read_preference=ReadPreference.PRIMARY, + write_concern=self._write_concern_for(session), + parse_write_concern_error=True, + session=session, + ) + + @_csot.apply + def bulk_write( + self, + models: Sequence[_WriteOp], + session: Optional[ClientSession] = None, + ordered: bool = True, + verbose_results: bool = False, + bypass_document_validation: Optional[bool] = None, + comment: Optional[Any] = None, + let: Optional[Mapping[str, Any]] = None, + write_concern: Optional[WriteConcern] = None, + ) -> ClientBulkWriteResult: + """Send a batch of write operations, potentially across multiple namespaces, to the server. + + Requests are passed as a list of write operation instances ( + :class:`~pymongo.operations.InsertOne`, + :class:`~pymongo.operations.UpdateOne`, + :class:`~pymongo.operations.UpdateMany`, + :class:`~pymongo.operations.ReplaceOne`, + :class:`~pymongo.operations.DeleteOne`, or + :class:`~pymongo.operations.DeleteMany`). + + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')} + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + ... + >>> # DeleteMany, UpdateOne, and UpdateMany are also available. + >>> from pymongo import InsertOne, DeleteOne, ReplaceOne + >>> models = [InsertOne(namespace="db.test", document={'y': 1}), + ... DeleteOne(namespace="db.test", filter={'x': 1}), + ... InsertOne(namespace="db.coll", document={'y': 2}), + ... ReplaceOne(namespace="db.test", filter={'w': 1}, replacement={'z': 1}, upsert=True)] + >>> result = client.bulk_write(models=models) + >>> result.inserted_count + 2 + >>> result.deleted_count + 1 + >>> result.modified_count + 0 + >>> result.upserted_count + 1 + >>> for doc in db.test.find({}): + ... print(doc) + ... + {'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')} + {'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')} + {'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')} + ... + >>> for doc in db.coll.find({}): + ... print(doc) + ... + {'x': 2, '_id': ObjectId('507f1f77bcf86cd799439011')} + {'y': 2, '_id': ObjectId('507f1f77bcf86cd799439012')} + + :param models: A list of write operation instances. + :param session: (optional) An instance of + :class:`~pymongo.client_session.ClientSession`. + :param ordered: If ``True`` (the default), requests will be + performed on the server serially, in the order provided. If an error + occurs all remaining operations are aborted. If ``False``, requests + will be still performed on the server serially, in the order provided, + but all operations will be attempted even if any errors occur. + :param verbose_results: If ``True``, detailed results for each + successful operation will be included in the returned + :class:`~pymongo.results.ClientBulkWriteResult`. Default is ``False``. + :param bypass_document_validation: (optional) If ``True``, allows the + write to opt-out of document level validation. Default is ``False``. + :param comment: (optional) A user-provided comment to attach to this + command. + :param let: (optional) Map of parameter names and values. Values must be + constant or closed expressions that do not reference document + fields. Parameters can then be accessed as variables in an + aggregate expression context (e.g. "$$var"). + :param write_concern: (optional) The write concern to use for this bulk write. + + :return: An instance of :class:`~pymongo.results.ClientBulkWriteResult`. + + .. seealso:: For more info, see `Client Bulk Write `_. + + .. seealso:: `Writes and ids `_ + + .. note:: requires MongoDB server version 8.0+. + + .. versionadded:: 4.9 + """ + if self._options.auto_encryption_opts: + raise InvalidOperation( + "MongoClient.bulk_write does not currently support automatic encryption" + ) + + if session and session.in_transaction: + # Inherit the transaction write concern. + if write_concern: + raise InvalidOperation("Cannot set write concern after starting a transaction") + write_concern = session._transaction.opts.write_concern # type: ignore[union-attr] + else: + # Inherit the client's write concern if none is provided. + if not write_concern: + write_concern = self.write_concern + + if write_concern and not write_concern.acknowledged and verbose_results: + raise InvalidOperation( + "Cannot request unacknowledged write concern and verbose results" + ) + elif write_concern and not write_concern.acknowledged and ordered: + raise InvalidOperation("Cannot request unacknowledged write concern and ordered writes") + + common.validate_list("models", models) + + blk = _ClientBulk( + self, + write_concern=write_concern, # type: ignore[arg-type] + ordered=ordered, + bypass_document_validation=bypass_document_validation, + comment=comment, + let=let, + verbose_results=verbose_results, + ) + for model in models: + try: + model._add_to_client_bulk(blk) + except AttributeError: + raise TypeError(f"{model!r} is not a valid request") from None + + return blk.execute(session, _Op.BULK_WRITE) + + +def _retryable_error_doc(exc: PyMongoError) -> Optional[Mapping[str, Any]]: + """Return the server response from PyMongo exception or None.""" + if isinstance(exc, (BulkWriteError, ClientBulkWriteException)): + # Check the last writeConcernError to determine if this + # BulkWriteError is retryable. + wces = exc.details["writeConcernErrors"] + return wces[-1] if wces else None + if isinstance(exc, (NotPrimaryError, OperationFailure)): + return cast(Mapping[str, Any], exc.details) + return None + + +def _add_retryable_write_error(exc: PyMongoError, max_wire_version: int, is_mongos: bool) -> None: + doc = _retryable_error_doc(exc) + if doc: + code = doc.get("code", 0) + # retryWrites on MMAPv1 should raise an actionable error. + if code == 20 and str(exc).startswith("Transaction numbers"): + errmsg = ( + "This MongoDB deployment does not support " + "retryable writes. Please add retryWrites=false " + "to your connection string." + ) + raise OperationFailure(errmsg, code, exc.details) # type: ignore[attr-defined] + if max_wire_version >= 9: + # In MongoDB 4.4+, the server reports the error labels. + for label in doc.get("errorLabels", []): + exc._add_error_label(label) + else: + # Do not consult writeConcernError for pre-4.4 mongos. + if isinstance(exc, WriteConcernError) and is_mongos: + pass + elif code in helpers_shared._RETRYABLE_ERROR_CODES: + exc._add_error_label("RetryableWriteError") + + # Connection errors are always retryable except NotPrimaryError and WaitQueueTimeoutError which is + # handled above. + if isinstance(exc, ClientBulkWriteException): + exc_to_check = exc.error + else: + exc_to_check = exc + if isinstance(exc_to_check, ConnectionFailure) and not isinstance( + exc_to_check, (NotPrimaryError, WaitQueueTimeoutError) + ): + exc_to_check._add_error_label("RetryableWriteError") + + +class _MongoClientErrorHandler: + """Handle errors raised when executing an operation.""" + + __slots__ = ( + "client", + "server_address", + "session", + "max_wire_version", + "sock_generation", + "completed_handshake", + "service_id", + "handled", + ) + + def __init__( + self, + client: MongoClient, # type: ignore[type-arg] + server: Server, + session: Optional[ClientSession], + ): + if not isinstance(client, MongoClient): + # This is for compatibility with mocked and subclassed types, such as in Motor. + if not any(cls.__name__ == "MongoClient" for cls in type(client).__mro__): + raise TypeError(f"MongoClient required but given {type(client).__name__}") + + self.client = client + self.server_address = server.description.address + self.session = session + self.max_wire_version = common.MIN_WIRE_VERSION + # XXX: When get_socket fails, this generation could be out of date: + # "Note that when a network error occurs before the handshake + # completes then the error's generation number is the generation + # of the pool at the time the connection attempt was started." + self.sock_generation = server.pool.gen.get_overall() + self.completed_handshake = False + self.service_id: Optional[ObjectId] = None + self.handled = False + + def contribute_socket(self, conn: Connection, completed_handshake: bool = True) -> None: + """Provide socket information to the error handler.""" + self.max_wire_version = conn.max_wire_version + self.sock_generation = conn.generation + self.service_id = conn.service_id + self.completed_handshake = completed_handshake + + def handle( + self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException] + ) -> None: + if self.handled or exc_val is None: + return + self.handled = True + if self.session: + if isinstance(exc_val, ClientBulkWriteException): + exc_val = exc_val.error + if isinstance(exc_val, ConnectionFailure): + if self.session.in_transaction: + exc_val._add_error_label("TransientTransactionError") + self.session._server_session.mark_dirty() + + if isinstance(exc_val, PyMongoError): + if exc_val.has_error_label("TransientTransactionError") or exc_val.has_error_label( + "RetryableWriteError" + ): + self.session._unpin() + err_ctx = _ErrorContext( + exc_val, # type: ignore[arg-type] + self.max_wire_version, + self.sock_generation, + self.completed_handshake, + self.service_id, + ) + assert self.client._topology is not None + self.client._topology.handle_error(self.server_address, err_ctx) + + def __enter__(self) -> _MongoClientErrorHandler: + return self + + def __exit__( + self, + exc_type: Optional[Type[Exception]], + exc_val: Optional[Exception], + exc_tb: Optional[TracebackType], + ) -> None: + return self.handle(exc_type, exc_val) + + +class _ClientConnectionRetryable(Generic[T]): + """Responsible for executing retryable connections on read or write operations""" + + def __init__( + self, + mongo_client: MongoClient, # type: ignore[type-arg] + func: _WriteCall[T] | _ReadCall[T], + bulk: Optional[Union[_Bulk, _ClientBulk]], + operation: str, + is_read: bool = False, + session: Optional[ClientSession] = None, + read_pref: Optional[_ServerMode] = None, + address: Optional[_Address] = None, + retryable: bool = False, + operation_id: Optional[int] = None, + ): + self._last_error: Optional[Exception] = None + self._retrying = False + self._multiple_retries = _csot.get_timeout() is not None + self._client = mongo_client + + self._func = func + self._bulk = bulk + self._session = session + self._is_read = is_read + self._retryable = retryable + self._read_pref = read_pref + self._server_selector: Callable[[Selection], Selection] = ( + read_pref if is_read else writable_server_selector # type: ignore + ) + self._address = address + self._server: Server = None # type: ignore + self._deprioritized_servers: list[Server] = [] + self._operation = operation + self._operation_id = operation_id + self._attempt_number = 0 + + def run(self) -> T: + """Runs the supplied func() and attempts a retry + + :raises: self._last_error: Last exception raised + + :return: Result of the func() call + """ + # Increment the transaction id up front to ensure any retry attempt + # will use the proper txnNumber, even if server or socket selection + # fails before the command can be sent. + if self._is_session_state_retryable() and self._retryable and not self._is_read: + self._session._start_retryable_write() # type: ignore + if self._bulk: + self._bulk.started_retryable_write = True + + while True: + self._check_last_error(check_csot=True) + try: + return self._read() if self._is_read else self._write() + except ServerSelectionTimeoutError: + # The application may think the write was never attempted + # if we raise ServerSelectionTimeoutError on the retry + # attempt. Raise the original exception instead. + self._check_last_error() + # A ServerSelectionTimeoutError error indicates that there may + # be a persistent outage. Attempting to retry in this case will + # most likely be a waste of time. + raise + except PyMongoError as exc: + # Execute specialized catch on read + if self._is_read: + if isinstance(exc, (ConnectionFailure, OperationFailure)): + # ConnectionFailures do not supply a code property + exc_code = getattr(exc, "code", None) + if self._is_not_eligible_for_retry() or ( + isinstance(exc, OperationFailure) + and exc_code not in helpers_shared._RETRYABLE_ERROR_CODES + ): + raise + self._retrying = True + self._last_error = exc + self._attempt_number += 1 + else: + raise + + # Specialized catch on write operation + if not self._is_read: + if not self._retryable: + raise + if isinstance(exc, ClientBulkWriteException) and exc.error: + retryable_write_error_exc = isinstance( + exc.error, PyMongoError + ) and exc.error.has_error_label("RetryableWriteError") + else: + retryable_write_error_exc = exc.has_error_label("RetryableWriteError") + if retryable_write_error_exc: + assert self._session + self._session._unpin() + if not retryable_write_error_exc or self._is_not_eligible_for_retry(): + if exc.has_error_label("NoWritesPerformed") and self._last_error: + raise self._last_error from exc + else: + raise + self._attempt_number += 1 + if self._bulk: + self._bulk.retrying = True + else: + self._retrying = True + if not exc.has_error_label("NoWritesPerformed"): + self._last_error = exc + if self._last_error is None: + self._last_error = exc + + if self._client.topology_description.topology_type == TOPOLOGY_TYPE.Sharded: + self._deprioritized_servers.append(self._server) + + def _is_not_eligible_for_retry(self) -> bool: + """Checks if the exchange is not eligible for retry""" + return not self._retryable or (self._is_retrying() and not self._multiple_retries) + + def _is_retrying(self) -> bool: + """Checks if the exchange is currently undergoing a retry""" + return self._bulk.retrying if self._bulk else self._retrying + + def _is_session_state_retryable(self) -> bool: + """Checks if provided session is eligible for retry + + reads: Make sure there is no ongoing transaction (if provided a session) + writes: Make sure there is a session without an active transaction + """ + if self._is_read: + return not (self._session and self._session.in_transaction) + return bool(self._session and not self._session.in_transaction) + + def _check_last_error(self, check_csot: bool = False) -> None: + """Checks if the ongoing client exchange experienced a exception previously. + If so, raise last error + + :param check_csot: Checks CSOT to ensure we are retrying with time remaining defaults to False + """ + if self._is_retrying(): + remaining = _csot.remaining() + if not check_csot or (remaining is not None and remaining <= 0): + assert self._last_error is not None + raise self._last_error + + def _get_server(self) -> Server: + """Retrieves a server object based on provided object context + + :return: Abstraction to connect to server + """ + return self._client._select_server( + self._server_selector, + self._session, + self._operation, + address=self._address, + deprioritized_servers=self._deprioritized_servers, + operation_id=self._operation_id, + ) + + def _write(self) -> T: + """Wrapper method for write-type retryable client executions + + :return: Output for func()'s call + """ + try: + max_wire_version = 0 + is_mongos = False + self._server = self._get_server() + with self._client._checkout(self._server, self._session) as conn: + max_wire_version = conn.max_wire_version + sessions_supported = ( + self._session + and self._server.description.retryable_writes_supported + and conn.supports_sessions + ) + is_mongos = conn.is_mongos + if not sessions_supported: + # A retry is not possible because this server does + # not support sessions raise the last error. + self._check_last_error() + self._retryable = False + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying write attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return self._func(self._session, conn, self._retryable) # type: ignore + except PyMongoError as exc: + if not self._retryable: + raise + # Add the RetryableWriteError label, if applicable. + _add_retryable_write_error(exc, max_wire_version, is_mongos) + raise + + def _read(self) -> T: + """Wrapper method for read-type retryable client executions + + :return: Output for func()'s call + """ + self._server = self._get_server() + assert self._read_pref is not None, "Read Preference required on read calls" + with self._client._conn_from_server(self._read_pref, self._server, self._session) as ( + conn, + read_pref, + ): + if self._retrying and not self._retryable: + self._check_last_error() + if self._retrying: + _debug_log( + _COMMAND_LOGGER, + message=f"Retrying read attempt number {self._attempt_number}", + clientId=self._client._topology_settings._topology_id, + commandName=self._operation, + operationId=self._operation_id, + ) + return self._func(self._session, self._server, conn, read_pref) # type: ignore + + +def _after_fork_child() -> None: + """Releases the locks in child process and resets the + topologies in all MongoClients. + """ + # Reinitialize locks + _release_locks() + + # Perform cleanup in clients (i.e. get rid of topology) + for _, client in MongoClient._clients.items(): + client._after_fork() + + +def _detect_external_db(entity: str) -> bool: + """Detects external database hosts and logs an informational message at the INFO level.""" + entity = entity.lower() + cosmos_db_hosts = [".cosmos.azure.com"] + document_db_hosts = [".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"] + + for host in cosmos_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a CosmosDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb", + ) + return True + for host in document_db_hosts: + if entity.endswith(host): + _log_or_warn( + _CLIENT_LOGGER, + "You appear to be connected to a DocumentDB cluster. For more information regarding feature " + "compatibility and support please visit https://www.mongodb.com/supportability/documentdb", + ) + return True + return False + + +if _HAS_REGISTER_AT_FORK: + # This will run in the same thread as the fork was called. + # If we fork in a critical region on the same thread, it should break. + # This is fine since we would never call fork directly from a critical region. + os.register_at_fork(after_in_child=_after_fork_child) diff --git a/pymongo/synchronous/monitor.py b/pymongo/synchronous/monitor.py new file mode 100644 index 0000000000..f395588814 --- /dev/null +++ b/pymongo/synchronous/monitor.py @@ -0,0 +1,543 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Class to monitor a MongoDB server on a background thread.""" + +from __future__ import annotations + +import asyncio +import atexit +import logging +import time +import weakref +from typing import TYPE_CHECKING, Any, Optional + +from pymongo import common, periodic_executor +from pymongo._csot import MovingMinimum +from pymongo.errors import NetworkTimeout, _OperationCancelled +from pymongo.hello import Hello +from pymongo.lock import _create_lock +from pymongo.logger import _SDAM_LOGGER, _debug_log, _SDAMStatusMessage +from pymongo.periodic_executor import _shutdown_executors +from pymongo.pool_options import _is_faas +from pymongo.read_preferences import MovingAverage +from pymongo.server_description import ServerDescription +from pymongo.synchronous.srv_resolver import _SrvResolver + +if TYPE_CHECKING: + from pymongo.synchronous.pool import ( # type: ignore[attr-defined] + Connection, + Pool, + _CancellationContext, + ) + from pymongo.synchronous.settings import TopologySettings + from pymongo.synchronous.topology import Topology + +_IS_SYNC = True + + +def _sanitize(error: Exception) -> None: + """PYTHON-2433 Clear error traceback info.""" + error.__traceback__ = None + error.__context__ = None + error.__cause__ = None + + +def _monotonic_duration(start: float) -> float: + """Return the duration since the given start time. + + Accounts for buggy platforms where time.monotonic() is not monotonic. + See PYTHON-4600. + """ + return max(0.0, time.monotonic() - start) + + +class MonitorBase: + def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): + """Base class to do periodic work on a background thread. + + The background thread is signaled to stop when the Topology or + this instance is freed. + """ + + # We strongly reference the executor and it weakly references us via + # this closure. When the monitor is freed, stop the executor soon. + def target() -> bool: + monitor = self_ref() + if monitor is None: + return False # Stop the executor. + monitor._run() # type:ignore[attr-defined] + return True + + executor = periodic_executor.PeriodicExecutor( + interval=interval, min_interval=min_interval, target=target, name=name + ) + + self._executor = executor + + def _on_topology_gc(dummy: Optional[Topology] = None) -> None: + # This prevents GC from waiting 10 seconds for hello to complete + # See test_cleanup_executors_on_client_del. + monitor = self_ref() + if monitor: + monitor.gc_safe_close() + + # Avoid cycles. When self or topology is freed, stop executor soon. + self_ref = weakref.ref(self, executor.close) + self._topology = weakref.proxy(topology, _on_topology_gc) + _register(self) + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + self._executor.open() + + def gc_safe_close(self) -> None: + """GC safe close.""" + self._executor.close() + + def close(self) -> None: + """Close and stop monitoring. + + open() restarts the monitor after closing. + """ + self.gc_safe_close() + + def join(self) -> None: + """Wait for the monitor to stop.""" + self._executor.join() + + def request_check(self) -> None: + """If the monitor is sleeping, wake it soon.""" + self._executor.wake() + + +class Monitor(MonitorBase): + def __init__( + self, + server_description: ServerDescription, + topology: Topology, + pool: Pool, + topology_settings: TopologySettings, + ): + """Class to monitor a MongoDB server on a background thread. + + Pass an initial ServerDescription, a Topology, a Pool, and + TopologySettings. + + The Topology is weakly referenced. The Pool must be exclusive to this + Monitor. + """ + super().__init__( + topology, + "pymongo_server_monitor_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + self._server_description = server_description + self._pool = pool + self._settings = topology_settings + self._listeners = self._settings._pool_options._event_listeners + self._publish = self._listeners is not None and self._listeners.enabled_for_server_heartbeat + self._cancel_context: Optional[_CancellationContext] = None + self._conn_id: Optional[int] = None + self._rtt_monitor = _RttMonitor( + topology, + topology_settings, + topology._create_pool_for_monitor(server_description.address), + ) + if topology_settings.server_monitoring_mode == "stream": + self._stream = True + elif topology_settings.server_monitoring_mode == "poll": + self._stream = False + else: + self._stream = not _is_faas() + + def cancel_check(self) -> None: + """Cancel any concurrent hello check. + + Note: this is called from a weakref.proxy callback and MUST NOT take + any locks. + """ + context = self._cancel_context + if context: + # Note: we cannot close the socket because doing so may cause + # concurrent reads/writes to hang until a timeout occurs + # (depending on the platform). + context.cancel() + + def _start_rtt_monitor(self) -> None: + """Start an _RttMonitor that periodically runs ping.""" + # If this monitor is closed directly before (or during) this open() + # call, the _RttMonitor will not be closed. Checking if this monitor + # was closed directly after resolves the race. + self._rtt_monitor.open() + if self._executor._stopped: + self._rtt_monitor.close() + + def gc_safe_close(self) -> None: + self._executor.close() + self._rtt_monitor.gc_safe_close() + self.cancel_check() + + def join(self) -> None: + asyncio.gather(self._executor.join(), self._rtt_monitor.join(), return_exceptions=True) # type: ignore[func-returns-value] + + def close(self) -> None: + self.gc_safe_close() + self._rtt_monitor.close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._reset_connection() + + def _reset_connection(self) -> None: + # Clear our pooled connection. + self._pool.reset() + + def _run(self) -> None: + try: + prev_sd = self._server_description + try: + self._server_description = self._check_server() + except _OperationCancelled as exc: + _sanitize(exc) + # Already closed the connection, wait for the next check. + self._server_description = ServerDescription( + self._server_description.address, error=exc + ) + if prev_sd.is_server_type_known: + # Immediately retry since we've already waited 500ms to + # discover that we've been cancelled. + self._executor.skip_sleep() + return + + # Update the Topology and clear the server pool on error. + self._topology.on_change( + self._server_description, + reset_pool=self._server_description.error, + interrupt_connections=isinstance(self._server_description.error, NetworkTimeout), + ) + + if self._stream and ( + self._server_description.is_server_type_known + and self._server_description.topology_version + ): + self._start_rtt_monitor() + # Immediately check for the next streaming response. + self._executor.skip_sleep() + + if self._server_description.error and prev_sd.is_server_type_known: + # Immediately retry on network errors. + self._executor.skip_sleep() + except ReferenceError: + # Topology was garbage-collected. + self.close() + finally: + if self._executor._stopped: + self._rtt_monitor.close() + + def _check_server(self) -> ServerDescription: + """Call hello or read the next streaming response. + + Returns a ServerDescription. + """ + self._conn_id = None + start = time.monotonic() + try: + return self._check_once() + except ReferenceError: + raise + except Exception as error: + _sanitize(error) + sd = self._server_description + address = sd.address + duration = _monotonic_duration(start) + awaited = bool(self._stream and sd.is_server_type_known and sd.topology_version) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_failed(address, duration, error, awaited) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_FAIL, + topologyId=self._topology._topology_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=duration * 1000, + failure=error, + driverConnectionId=self._conn_id, + ) + self._reset_connection() + if isinstance(error, _OperationCancelled): + raise + self._rtt_monitor.reset() + # Server type defaults to Unknown. + return ServerDescription(address, error=error) + + def _check_once(self) -> ServerDescription: + """A single attempt to call hello. + + Returns a ServerDescription, or raises an exception. + """ + address = self._server_description.address + sd = self._server_description + + # XXX: "awaited" could be incorrectly set to True in the rare case + # the pool checkout closes and recreates a connection. + awaited = bool( + self._pool.conns and self._stream and sd.is_server_type_known and sd.topology_version + ) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_started(address, awaited) + + if self._cancel_context and self._cancel_context.cancelled: + self._reset_connection() + with self._pool.checkout() as conn: + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_START, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + ) + + self._cancel_context = conn.cancel_context + # Record the connection id so we can later attach it to the failed log message. + self._conn_id = conn.id + response, round_trip_time = self._check_with_socket(conn) + if not response.awaitable: + self._rtt_monitor.add_sample(round_trip_time) + + avg_rtt, min_rtt = self._rtt_monitor.get() + sd = ServerDescription(address, response, avg_rtt, min_round_trip_time=min_rtt) + if self._publish: + assert self._listeners is not None + self._listeners.publish_server_heartbeat_succeeded( + address, round_trip_time, response, response.awaitable + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.HEARTBEAT_SUCCESS, + topologyId=self._topology._topology_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=address[0], + serverPort=address[1], + awaited=awaited, + durationMS=round_trip_time * 1000, + reply=response.document, + ) + return sd + + def _check_with_socket(self, conn: Connection) -> tuple[Hello, float]: # type: ignore[type-arg] + """Return (Hello, round_trip_time). + + Can raise ConnectionFailure or OperationFailure. + """ + start = time.monotonic() + if conn.more_to_come: + # Read the next streaming hello (MongoDB 4.4+). + response = Hello(conn._next_reply(), awaitable=True) + elif ( + self._stream and conn.performed_handshake and self._server_description.topology_version + ): + # Initiate streaming hello (MongoDB 4.4+). + response = conn._hello( + self._server_description.topology_version, + self._settings.heartbeat_frequency, + ) + else: + # New connection handshake or polling hello (MongoDB <4.4). + response = conn._hello(None, None) + duration = _monotonic_duration(start) + return response, duration + + +class SrvMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings): + """Class to poll SRV records on a background thread. + + Pass a Topology and a TopologySettings. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_srv_polling_thread", + common.MIN_SRV_RESCAN_INTERVAL, + topology_settings.heartbeat_frequency, + ) + self._settings = topology_settings + self._seedlist = self._settings._seeds + assert isinstance(self._settings.fqdn, str) + self._fqdn: str = self._settings.fqdn + self._startup_time = time.monotonic() + + def _run(self) -> None: + # Don't poll right after creation, wait 60 seconds first + if time.monotonic() < self._startup_time + common.MIN_SRV_RESCAN_INTERVAL: + return + seedlist = self._get_seedlist() + if seedlist: + self._seedlist = seedlist + try: + self._topology.on_srv_update(self._seedlist) + except ReferenceError: + # Topology was garbage-collected. + self.close() + + def _get_seedlist(self) -> Optional[list[tuple[str, Any]]]: + """Poll SRV records for a seedlist. + + Returns a list of ServerDescriptions. + """ + try: + resolver = _SrvResolver( + self._fqdn, + self._settings.pool_options.connect_timeout, + self._settings.srv_service_name, + ) + seedlist, ttl = resolver.get_hosts_and_min_ttl() + if len(seedlist) == 0: + # As per the spec: this should be treated as a failure. + raise Exception + except Exception as exc: + # As per the spec, upon encountering an error: + # - An error must not be raised + # - SRV records must be rescanned every heartbeatFrequencyMS + # - Topology must be left unchanged + self.request_check() + _debug_log(_SDAM_LOGGER, message="SRV monitor check failed", failure=repr(exc)) + return None + else: + self._executor.update_interval(max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) + return seedlist + + +class _RttMonitor(MonitorBase): + def __init__(self, topology: Topology, topology_settings: TopologySettings, pool: Pool): + """Maintain round trip times for a server. + + The Topology is weakly referenced. + """ + super().__init__( + topology, + "pymongo_server_rtt_thread", + topology_settings.heartbeat_frequency, + common.MIN_HEARTBEAT_INTERVAL, + ) + + self._pool = pool + self._moving_average = MovingAverage() + self._moving_min = MovingMinimum() + self._lock = _create_lock() + + def close(self) -> None: + self.gc_safe_close() + # Increment the generation and maybe close the socket. If the executor + # thread has the socket checked out, it will be closed when checked in. + self._pool.reset() + + def add_sample(self, sample: float) -> None: + """Add a RTT sample.""" + with self._lock: + self._moving_average.add_sample(sample) + self._moving_min.add_sample(sample) + + def get(self) -> tuple[Optional[float], float]: + """Get the calculated average, or None if no samples yet and the min.""" + with self._lock: + return self._moving_average.get(), self._moving_min.get() + + def reset(self) -> None: + """Reset the average RTT.""" + with self._lock: + self._moving_average.reset() + self._moving_min.reset() + + def _run(self) -> None: + try: + # NOTE: This thread is only run when using the streaming + # heartbeat protocol (MongoDB 4.4+). + # XXX: Skip check if the server is unknown? + rtt = self._ping() + self.add_sample(rtt) + except ReferenceError: + # Topology was garbage-collected. + self.close() + except Exception: + self._pool.reset() + + def _ping(self) -> float: + """Run a "hello" command and return the RTT.""" + with self._pool.checkout() as conn: + if self._executor._stopped: + raise Exception("_RttMonitor closed") + start = time.monotonic() + conn.hello() + return _monotonic_duration(start) + + +# Close monitors to cancel any in progress streaming checks before joining +# executor threads. For an explanation of how this works see the comment +# about _EXECUTORS in periodic_executor.py. +_MONITORS = set() + + +def _register(monitor: MonitorBase) -> None: + ref = weakref.ref(monitor, _unregister) + _MONITORS.add(ref) + + +def _unregister(monitor_ref: weakref.ReferenceType[MonitorBase]) -> None: + _MONITORS.remove(monitor_ref) + + +def _shutdown_monitors() -> None: + if _MONITORS is None: + return + + # Copy the set. Closing monitors removes them. + monitors = list(_MONITORS) + + # Close all monitors. + for ref in monitors: + monitor = ref() + if monitor: + monitor.gc_safe_close() + + monitor = None + + +def _shutdown_resources() -> None: + # _shutdown_monitors/_shutdown_executors may already be GC'd at shutdown. + shutdown = _shutdown_monitors + if shutdown: # type:ignore[truthy-function] + shutdown() + shutdown = _shutdown_executors + if shutdown: # type:ignore[truthy-function] + shutdown() + + +if _IS_SYNC: + atexit.register(_shutdown_resources) diff --git a/pymongo/synchronous/network.py b/pymongo/synchronous/network.py new file mode 100644 index 0000000000..7d9bca4d58 --- /dev/null +++ b/pymongo/synchronous/network.py @@ -0,0 +1,298 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Internal network layer helper methods.""" +from __future__ import annotations + +import datetime +import logging +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from bson import _decode_all_selective +from pymongo import _csot, helpers_shared, message +from pymongo.compression_support import _NO_COMPRESSION +from pymongo.errors import ( + NotPrimaryError, + OperationFailure, +) +from pymongo.logger import _COMMAND_LOGGER, _CommandStatusMessage, _debug_log +from pymongo.message import _OpMsg +from pymongo.monitoring import _is_speculative_authenticate +from pymongo.network_layer import ( + receive_message, + sendall, +) + +if TYPE_CHECKING: + from bson import CodecOptions + from pymongo.compression_support import SnappyContext, ZlibContext, ZstdContext + from pymongo.monitoring import _EventListeners + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection + from pymongo.typings import _Address, _CollationIn, _DocumentOut, _DocumentType + from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + + +def command( + conn: Connection, + dbname: str, + spec: MutableMapping[str, Any], + is_mongos: bool, + read_preference: Optional[_ServerMode], + codec_options: CodecOptions[_DocumentType], + session: Optional[ClientSession], + client: Optional[MongoClient[Any]], + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + address: Optional[_Address] = None, + listeners: Optional[_EventListeners] = None, + max_bson_size: Optional[int] = None, + read_concern: Optional[ReadConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + compression_ctx: Union[SnappyContext, ZlibContext, ZstdContext, None] = None, + use_op_msg: bool = False, + unacknowledged: bool = False, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + write_concern: Optional[WriteConcern] = None, +) -> _DocumentType: + """Execute a command over the socket, or raise socket.error. + + :param conn: a Connection instance + :param dbname: name of the database on which to run the command + :param spec: a command document as an ordered dict type, eg SON. + :param is_mongos: are we connected to a mongos? + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param session: optional ClientSession instance. + :param client: optional MongoClient instance for updating $clusterTime. + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param address: the (host, port) of `conn` + :param listeners: An instance of :class:`~pymongo.monitoring.EventListeners` + :param max_bson_size: The maximum encoded bson size for this server + :param read_concern: The read concern for this command. + :param parse_write_concern_error: Whether to parse the ``writeConcernError`` + field in the command response. + :param collation: The collation for this command. + :param compression_ctx: optional compression Context. + :param use_op_msg: True if we should use OP_MSG. + :param unacknowledged: True if this is an unacknowledged command. + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + :param exhaust_allowed: True if we should enable OP_MSG exhaustAllowed. + """ + name = next(iter(spec)) + ns = dbname + ".$cmd" + speculative_hello = False + + # Publish the original command document, perhaps with lsid and $clusterTime. + orig = spec + if is_mongos and not use_op_msg: + assert read_preference is not None + spec = message._maybe_add_read_preference(spec, read_preference) + if read_concern and not (session and session.in_transaction): + if read_concern.level: + spec["readConcern"] = read_concern.document + if session: + session._update_read_concern(spec, conn) + if collation is not None: + spec["collation"] = collation + + publish = listeners is not None and listeners.enabled_for_commands + start = datetime.datetime.now() + if publish: + speculative_hello = _is_speculative_authenticate(name, spec) + + if compression_ctx and name.lower() in _NO_COMPRESSION: + compression_ctx = None + + if client and client._encrypter and not client._encrypter._bypass_auto_encryption: + spec = orig = client._encrypter.encrypt(dbname, spec, codec_options) + + # Support CSOT + if client: + conn.apply_timeout(client, spec) + _csot.apply_write_concern(spec, write_concern) + + if use_op_msg: + flags = _OpMsg.MORE_TO_COME if unacknowledged else 0 + flags |= _OpMsg.EXHAUST_ALLOWED if exhaust_allowed else 0 + request_id, msg, size, max_doc_size = message._op_msg( + flags, spec, dbname, read_preference, codec_options, ctx=compression_ctx + ) + # If this is an unacknowledged write then make sure the encoded doc(s) + # are small enough, otherwise rely on the server to return an error. + if unacknowledged and max_bson_size is not None and max_doc_size > max_bson_size: + message._raise_document_too_large(name, size, max_bson_size) + else: + request_id, msg, size = message._query( + 0, ns, 0, -1, spec, None, codec_options, compression_ctx + ) + + if max_bson_size is not None and size > max_bson_size + message._COMMAND_OVERHEAD: + message._raise_document_too_large(name, size, max_bson_size + message._COMMAND_OVERHEAD) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=spec, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_start( + orig, + dbname, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + sendall(conn.conn.get_conn, msg) + if use_op_msg and unacknowledged: + # Unacknowledged, fake a successful command response. + reply = None + response_doc: _DocumentOut = {"ok": 1} + else: + reply = receive_message(conn, request_id) + conn.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response( + codec_options=codec_options, user_fields=user_fields + ) + + response_doc = unpacked_docs[0] + if not conn.ready: + cluster_time = response_doc.get("$clusterTime") + if cluster_time: + conn._cluster_time = cluster_time + if client: + client._process_response(response_doc, session) + if check: + helpers_shared._check_command_response( + response_doc, + conn.max_wire_version, + allowable_errors, + parse_write_concern_error=parse_write_concern_error, + ) + except Exception as exc: + duration = datetime.datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = message._convert_exception(exc) + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_failure( + duration, + failure, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbname, + ) + raise + duration = datetime.datetime.now() - start + if client is not None: + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=response_doc, + commandName=next(iter(spec)), + databaseName=dbname, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + speculative_authenticate="speculativeAuthenticate" in orig, + ) + if publish: + assert listeners is not None + assert address is not None + listeners.publish_command_success( + duration, + response_doc, + name, + request_id, + address, + conn.server_connection_id, + service_id=conn.service_id, + speculative_hello=speculative_hello, + database_name=dbname, + ) + + if client and client._encrypter and reply: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + response_doc = cast( + "_DocumentOut", _decode_all_selective(decrypted, codec_options, user_fields)[0] + ) + + return response_doc # type: ignore[return-value] diff --git a/pymongo/synchronous/pool.py b/pymongo/synchronous/pool.py new file mode 100644 index 0000000000..66258fda18 --- /dev/null +++ b/pymongo/synchronous/pool.py @@ -0,0 +1,1475 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +from __future__ import annotations + +import asyncio +import collections +import contextlib +import logging +import os +import sys +import time +import weakref +from typing import ( + TYPE_CHECKING, + Any, + Generator, + Mapping, + MutableMapping, + NoReturn, + Optional, + Sequence, + Union, +) + +from bson import DEFAULT_CODEC_OPTIONS +from pymongo import _csot, helpers_shared +from pymongo.common import ( + MAX_BSON_SIZE, + MAX_MESSAGE_SIZE, + MAX_WIRE_VERSION, + MAX_WRITE_BATCH_SIZE, + ORDERED_TYPES, +) +from pymongo.errors import ( # type:ignore[attr-defined] + AutoReconnect, + ConfigurationError, + DocumentTooLarge, + ExecutionTimeout, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _get_timeout_details, format_timeout_details +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) +from pymongo.logger import ( + _CONNECTION_LOGGER, + _ConnectionStatusMessage, + _debug_log, + _verbose_connection_error_reason, +) +from pymongo.monitoring import ( + ConnectionCheckOutFailedReason, + ConnectionClosedReason, +) +from pymongo.network_layer import NetworkingInterface, receive_message, sendall +from pymongo.pool_options import PoolOptions +from pymongo.pool_shared import ( + SSLErrors, + _CancellationContext, + _configured_socket_interface, + _raise_connection_failure, +) +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import _add_to_command +from pymongo.server_type import SERVER_TYPE +from pymongo.socket_checker import SocketChecker +from pymongo.synchronous.client_session import _validate_session_write_concern +from pymongo.synchronous.helpers import _handle_reauth +from pymongo.synchronous.network import command + +if TYPE_CHECKING: + from bson import CodecOptions + from bson.objectid import ObjectId + from pymongo.compression_support import ( + SnappyContext, + ZlibContext, + ZstdContext, + ) + from pymongo.message import _OpMsg, _OpReply + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import _ServerMode + from pymongo.synchronous.auth import _AuthContext + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.typings import _Address, _CollationIn + from pymongo.write_concern import WriteConcern + +try: + from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl + + def _set_non_inheritable_non_atomic(fd: int) -> None: + """Set the close-on-exec flag on the given file descriptor.""" + flags = fcntl(fd, F_GETFD) + fcntl(fd, F_SETFD, flags | FD_CLOEXEC) + +except ImportError: + # Windows, various platforms we don't claim to support + # (Jython, IronPython, ..), systems that don't provide + # everything we need from fcntl, etc. + def _set_non_inheritable_non_atomic(fd: int) -> None: # noqa: ARG001 + """Dummy function for platforms that don't provide fcntl.""" + + +_IS_SYNC = True + + +class Connection: + """Store a connection with some metadata. + + :param conn: a raw connection object + :param pool: a Pool instance + :param address: the server's (host, port) + :param id: the id of this socket in it's pool + :param is_sdam: SDAM connections do not call hello on creation + """ + + def __init__( + self, + conn: NetworkingInterface, + pool: Pool, + address: tuple[str, int], + id: int, + is_sdam: bool, + ): + self.pool_ref = weakref.ref(pool) + self.conn = conn + self.address = address + self.id = id + self.is_sdam = is_sdam + self.closed = False + self.last_checkin_time = time.monotonic() + self.performed_handshake = False + self.is_writable: bool = False + self.max_wire_version = MAX_WIRE_VERSION + self.max_bson_size = MAX_BSON_SIZE + self.max_message_size = MAX_MESSAGE_SIZE + self.max_write_batch_size = MAX_WRITE_BATCH_SIZE + self.supports_sessions = False + self.hello_ok: bool = False + self.is_mongos = False + self.op_msg_enabled = False + self.listeners = pool.opts._event_listeners + self.enabled_for_cmap = pool.enabled_for_cmap + self.enabled_for_logging = pool.enabled_for_logging + self.compression_settings = pool.opts._compression_settings + self.compression_context: Union[SnappyContext, ZlibContext, ZstdContext, None] = None + self.socket_checker: SocketChecker = SocketChecker() + self.oidc_token_gen_id: Optional[int] = None + # Support for mechanism negotiation on the initial handshake. + self.negotiated_mechs: Optional[list[str]] = None + self.auth_ctx: Optional[_AuthContext] = None + + # The pool's generation changes with each reset() so we can close + # sockets created before the last reset. + self.pool_gen = pool.gen + self.generation = self.pool_gen.get_overall() + self.ready = False + self.cancel_context: _CancellationContext = _CancellationContext() + self.opts = pool.opts + self.more_to_come: bool = False + # For load balancer support. + self.service_id: Optional[ObjectId] = None + self.server_connection_id: Optional[int] = None + # When executing a transaction in load balancing mode, this flag is + # set to true to indicate that the session now owns the connection. + self.pinned_txn = False + self.pinned_cursor = False + self.active = False + self.last_timeout = self.opts.socket_timeout + self.connect_rtt = 0.0 + self._client_id = pool._client_id + self.creation_time = time.monotonic() + # For gossiping $clusterTime from the connection handshake to the client. + self._cluster_time = None + + def set_conn_timeout(self, timeout: Optional[float]) -> None: + """Cache last timeout to avoid duplicate calls to conn.settimeout.""" + if timeout == self.last_timeout: + return + self.last_timeout = timeout + self.conn.get_conn.settimeout(timeout) + + def apply_timeout( + self, client: MongoClient[Any], cmd: Optional[MutableMapping[str, Any]] + ) -> Optional[float]: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + # Reset the socket timeout unless we're performing a streaming monitor check. + if not self.more_to_come: + self.set_conn_timeout(self.opts.socket_timeout) + return None + # RTT validation. + rtt = _csot.get_rtt() + if rtt is None: + rtt = self.connect_rtt + max_time_ms = timeout - rtt + if max_time_ms < 0: + timeout_details = _get_timeout_details(self.opts) + formatted = format_timeout_details(timeout_details) + # CSOT: raise an error without running the command since we know it will time out. + errmsg = f"operation would exceed time limit, remaining timeout:{timeout:.5f} <= network round trip time:{rtt:.5f} {formatted}" + raise ExecutionTimeout( + errmsg, + 50, + {"ok": 0, "errmsg": errmsg, "code": 50}, + self.max_wire_version, + ) + if cmd is not None: + cmd["maxTimeMS"] = int(max_time_ms * 1000) + self.set_conn_timeout(timeout) + return timeout + + def pin_txn(self) -> None: + self.pinned_txn = True + assert not self.pinned_cursor + + def pin_cursor(self) -> None: + self.pinned_cursor = True + assert not self.pinned_txn + + def unpin(self) -> None: + pool = self.pool_ref() + if pool: + pool.checkin(self) + else: + self.close_conn(ConnectionClosedReason.STALE) + + def hello_cmd(self) -> dict[str, Any]: + # Handshake spec requires us to use OP_MSG+hello command for the + # initial handshake in load balanced or stable API mode. + if self.opts.server_api or self.hello_ok or self.opts.load_balanced: + self.op_msg_enabled = True + return {HelloCompat.CMD: 1} + else: + return {HelloCompat.LEGACY_CMD: 1, "helloOk": True} + + def hello(self) -> Hello[dict[str, Any]]: + return self._hello(None, None) + + def _hello( + self, + topology_version: Optional[Any], + heartbeat_frequency: Optional[int], + ) -> Hello[dict[str, Any]]: + cmd = self.hello_cmd() + performing_handshake = not self.performed_handshake + awaitable = False + if performing_handshake: + self.performed_handshake = True + cmd["client"] = self.opts.metadata + if self.compression_settings: + cmd["compression"] = self.compression_settings.compressors + if self.opts.load_balanced: + cmd["loadBalanced"] = True + elif topology_version is not None: + cmd["topologyVersion"] = topology_version + assert heartbeat_frequency is not None + cmd["maxAwaitTimeMS"] = int(heartbeat_frequency * 1000) + awaitable = True + # If connect_timeout is None there is no timeout. + if self.opts.connect_timeout: + self.set_conn_timeout(self.opts.connect_timeout + heartbeat_frequency) + + creds = self.opts._credentials + if creds: + if creds.mechanism == "DEFAULT" and creds.username: + cmd["saslSupportedMechs"] = creds.source + "." + creds.username + from pymongo.synchronous import auth + + auth_ctx = auth._AuthContext.from_credentials(creds, self.address) + if auth_ctx: + speculative_authenticate = auth_ctx.speculate_command() + if speculative_authenticate is not None: + cmd["speculativeAuthenticate"] = speculative_authenticate + else: + auth_ctx = None + + if performing_handshake: + start = time.monotonic() + doc = self.command("admin", cmd, publish_events=False, exhaust_allowed=awaitable) + if performing_handshake: + self.connect_rtt = time.monotonic() - start + hello = Hello(doc, awaitable=awaitable) + self.is_writable = hello.is_writable + self.max_wire_version = hello.max_wire_version + self.max_bson_size = hello.max_bson_size + self.max_message_size = hello.max_message_size + self.max_write_batch_size = hello.max_write_batch_size + self.supports_sessions = ( + hello.logical_session_timeout_minutes is not None and hello.is_readable + ) + self.logical_session_timeout_minutes: Optional[int] = hello.logical_session_timeout_minutes + self.hello_ok = hello.hello_ok + self.is_repl = hello.server_type in ( + SERVER_TYPE.RSPrimary, + SERVER_TYPE.RSSecondary, + SERVER_TYPE.RSArbiter, + SERVER_TYPE.RSOther, + SERVER_TYPE.RSGhost, + ) + self.is_standalone = hello.server_type == SERVER_TYPE.Standalone + self.is_mongos = hello.server_type == SERVER_TYPE.Mongos + if performing_handshake and self.compression_settings: + ctx = self.compression_settings.get_compression_context(hello.compressors) + self.compression_context = ctx + + self.op_msg_enabled = True + self.server_connection_id = hello.connection_id + if creds: + self.negotiated_mechs = hello.sasl_supported_mechs + if auth_ctx: + auth_ctx.parse_response(hello) # type:ignore[arg-type] + if auth_ctx.speculate_succeeded(): + self.auth_ctx = auth_ctx + if self.opts.load_balanced: + if not hello.service_id: + raise ConfigurationError( + "Driver attempted to initialize in load balancing mode," + " but the server does not support this mode" + ) + self.service_id = hello.service_id + self.generation = self.pool_gen.get(self.service_id) + return hello + + def _next_reply(self) -> dict[str, Any]: + reply = self.receive_message(None) + self.more_to_come = reply.more_to_come + unpacked_docs = reply.unpack_response() + response_doc = unpacked_docs[0] + helpers_shared._check_command_response(response_doc, self.max_wire_version) + return response_doc + + @_handle_reauth + def command( + self, + dbname: str, + spec: MutableMapping[str, Any], + read_preference: _ServerMode = ReadPreference.PRIMARY, + codec_options: CodecOptions[Mapping[str, Any]] = DEFAULT_CODEC_OPTIONS, # type: ignore[assignment] + check: bool = True, + allowable_errors: Optional[Sequence[Union[str, int]]] = None, + read_concern: Optional[ReadConcern] = None, + write_concern: Optional[WriteConcern] = None, + parse_write_concern_error: bool = False, + collation: Optional[_CollationIn] = None, + session: Optional[ClientSession] = None, + client: Optional[MongoClient[Any]] = None, + retryable_write: bool = False, + publish_events: bool = True, + user_fields: Optional[Mapping[str, Any]] = None, + exhaust_allowed: bool = False, + ) -> dict[str, Any]: + """Execute a command or raise an error. + + :param dbname: name of the database on which to run the command + :param spec: a command document as a dict, SON, or mapping object + :param read_preference: a read preference + :param codec_options: a CodecOptions instance + :param check: raise OperationFailure if there are errors + :param allowable_errors: errors to ignore if `check` is True + :param read_concern: The read concern for this command. + :param write_concern: The write concern for this command. + :param parse_write_concern_error: Whether to parse the + ``writeConcernError`` field in the command response. + :param collation: The collation for this command. + :param session: optional ClientSession instance. + :param client: optional MongoClient for gossipping $clusterTime. + :param retryable_write: True if this command is a retryable write. + :param publish_events: Should we publish events for this command? + :param user_fields: Response fields that should be decoded + using the TypeDecoders from codec_options, passed to + bson._decode_all_selective. + """ + self.validate_session(client, session) + session = _validate_session_write_concern(session, write_concern) + + # Ensure command name remains in first place. + if not isinstance(spec, ORDERED_TYPES): # type:ignore[arg-type] + spec = dict(spec) + + if not (write_concern is None or write_concern.acknowledged or collation is None): + raise ConfigurationError("Collation is unsupported for unacknowledged writes.") + + self.add_server_api(spec) + if session: + session._apply_to(spec, retryable_write, read_preference, self) + self.send_cluster_time(spec, session, client) + listeners = self.listeners if publish_events else None + unacknowledged = bool(write_concern and not write_concern.acknowledged) + if self.op_msg_enabled: + self._raise_if_not_writable(unacknowledged) + try: + return command( + self, + dbname, + spec, + self.is_mongos, + read_preference, + codec_options, # type: ignore[arg-type] + session, + client, + check, + allowable_errors, + self.address, + listeners, + self.max_bson_size, + read_concern, + parse_write_concern_error=parse_write_concern_error, + collation=collation, + compression_ctx=self.compression_context, + use_op_msg=self.op_msg_enabled, + unacknowledged=unacknowledged, + user_fields=user_fields, + exhaust_allowed=exhaust_allowed, + write_concern=write_concern, + ) + except (OperationFailure, NotPrimaryError): + raise + # Catch socket.error, KeyboardInterrupt, CancelledError, etc. and close ourselves. + except BaseException as error: + self._raise_connection_failure(error) + + def send_message(self, message: bytes, max_doc_size: int) -> None: + """Send a raw BSON message or raise ConnectionFailure. + + If a network exception is raised, the socket is closed. + """ + if self.max_bson_size is not None and max_doc_size > self.max_bson_size: + raise DocumentTooLarge( + "BSON document too large (%d bytes) - the connected server " + "supports BSON document sizes up to %d bytes." % (max_doc_size, self.max_bson_size) + ) + + try: + sendall(self.conn.get_conn, message) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + self._raise_connection_failure(error) + + def receive_message(self, request_id: Optional[int]) -> Union[_OpReply, _OpMsg]: + """Receive a raw BSON message or raise ConnectionFailure. + + If any exception is raised, the socket is closed. + """ + try: + return receive_message(self, request_id, self.max_message_size) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + self._raise_connection_failure(error) + + def _raise_if_not_writable(self, unacknowledged: bool) -> None: + """Raise NotPrimaryError on unacknowledged write if this socket is not + writable. + """ + if unacknowledged and not self.is_writable: + # Write won't succeed, bail as if we'd received a not primary error. + raise NotPrimaryError("not primary", {"ok": 0, "errmsg": "not primary", "code": 10107}) + + def unack_write(self, msg: bytes, max_doc_size: int) -> None: + """Send unack OP_MSG. + + Can raise ConnectionFailure or InvalidDocument. + + :param msg: bytes, an OP_MSG message. + :param max_doc_size: size in bytes of the largest document in `msg`. + """ + self._raise_if_not_writable(True) + self.send_message(msg, max_doc_size) + + def write_command( + self, request_id: int, msg: bytes, codec_options: CodecOptions[Mapping[str, Any]] + ) -> dict[str, Any]: + """Send "insert" etc. command, returning response as a dict. + + Can raise ConnectionFailure or OperationFailure. + + :param request_id: an int. + :param msg: bytes, the command message. + """ + self.send_message(msg, 0) + reply = self.receive_message(request_id) + result = reply.command_response(codec_options) + + # Raises NotPrimaryError or OperationFailure. + helpers_shared._check_command_response(result, self.max_wire_version) + return result + + def authenticate(self, reauthenticate: bool = False) -> None: + """Authenticate to the server if needed. + + Can raise ConnectionFailure or OperationFailure. + """ + # CMAP spec says to publish the ready event only after authenticating + # the connection. + if reauthenticate: + if self.performed_handshake: + # Existing auth_ctx is stale, remove it. + self.auth_ctx = None + self.ready = False + if not self.ready: + creds = self.opts._credentials + if creds: + from pymongo.synchronous import auth + + auth.authenticate(creds, self, reauthenticate=reauthenticate) + self.ready = True + duration = time.monotonic() - self.creation_time + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_ready(self.address, self.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + durationMS=duration, + ) + + def validate_session( + self, client: Optional[MongoClient[Any]], session: Optional[ClientSession] + ) -> None: + """Validate this session before use with client. + + Raises error if the client is not the one that created the session. + """ + if session: + if session._client is not client: + raise InvalidOperation("Can only use session with the MongoClient that started it") + + def close_conn(self, reason: Optional[str]) -> None: + """Close this connection with a reason.""" + if self.closed: + return + self._close_conn() + if reason: + if self.enabled_for_cmap: + assert self.listeners is not None + self.listeners.publish_connection_closed(self.address, self.id, reason) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=self.id, + reason=_verbose_connection_error_reason(reason), + error=reason, + ) + + def _close_conn(self) -> None: + """Close this connection.""" + if self.closed: + return + self.closed = True + self.cancel_context.cancel() + # Note: We catch exceptions to avoid spurious errors on interpreter + # shutdown. + try: + self.conn.close() + except Exception: # noqa: S110 + pass + + def conn_closed(self) -> bool: + """Return True if we know socket has been closed, False otherwise.""" + if _IS_SYNC: + return self.socket_checker.socket_closed(self.conn.get_conn) + else: + return self.conn.is_closing() + + def send_cluster_time( + self, + command: MutableMapping[str, Any], + session: Optional[ClientSession], + client: Optional[MongoClient[Any]], + ) -> None: + """Add $clusterTime.""" + if client: + client._send_cluster_time(command, session) + + def add_server_api(self, command: MutableMapping[str, Any]) -> None: + """Add server_api parameters.""" + if self.opts.server_api: + _add_to_command(command, self.opts.server_api) + + def update_last_checkin_time(self) -> None: + self.last_checkin_time = time.monotonic() + + def update_is_writable(self, is_writable: bool) -> None: + self.is_writable = is_writable + + def idle_time_seconds(self) -> float: + """Seconds since this socket was last checked into its pool.""" + return time.monotonic() - self.last_checkin_time + + def _raise_connection_failure(self, error: BaseException) -> NoReturn: + # Catch *all* exceptions from socket methods and close the socket. In + # regular Python, socket operations only raise socket.error, even if + # the underlying cause was a Ctrl-C: a signal raised during socket.recv + # is expressed as an EINTR error from poll. See internal_select_ex() in + # socketmodule.c. All error codes from poll become socket.error at + # first. Eventually in PyEval_EvalFrameEx the interpreter checks for + # signals and throws KeyboardInterrupt into the current frame on the + # main thread. + # + # But in Gevent, the polling mechanism (epoll, kqueue, + # ..) is called in Python code, which experiences the signal as a + # KeyboardInterrupt from the start, rather than as an initial + # socket.error, so we catch that, close the socket, and reraise it. + # + # The connection closed event will be emitted later in checkin. + if self.ready: + reason = None + else: + reason = ConnectionClosedReason.ERROR + self.close_conn(reason) + # SSLError from PyOpenSSL inherits directly from Exception. + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + else: + raise + + def __eq__(self, other: Any) -> bool: + return self.conn == other.conn + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __hash__(self) -> int: + return hash(self.conn) + + def __repr__(self) -> str: + return "Connection({}){} at {}".format( + repr(self.conn), + self.closed and " CLOSED" or "", + id(self), + ) + + +class _PoolClosedError(PyMongoError): + """Internal error raised when a thread tries to get a connection from a + closed pool. + """ + + +class _PoolGeneration: + def __init__(self) -> None: + # Maps service_id to generation. + self._generations: dict[ObjectId, int] = collections.defaultdict(int) + # Overall pool generation. + self._generation = 0 + + def get(self, service_id: Optional[ObjectId]) -> int: + """Get the generation for the given service_id.""" + if service_id is None: + return self._generation + return self._generations[service_id] + + def get_overall(self) -> int: + """Get the Pool's overall generation.""" + return self._generation + + def inc(self, service_id: Optional[ObjectId]) -> None: + """Increment the generation for the given service_id.""" + self._generation += 1 + if service_id is None: + for service_id in self._generations: + self._generations[service_id] += 1 + else: + self._generations[service_id] += 1 + + def stale(self, gen: int, service_id: Optional[ObjectId]) -> bool: + """Return if the given generation for a given service_id is stale.""" + return gen != self.get(service_id) + + +class PoolState: + PAUSED = 1 + READY = 2 + CLOSED = 3 + + +# Do *not* explicitly inherit from object or Jython won't call __del__ +# https://bugs.jython.org/issue1057 +class Pool: + def __init__( + self, + address: _Address, + options: PoolOptions, + is_sdam: bool = False, + client_id: Optional[ObjectId] = None, + ): + """ + :param address: a (hostname, port) tuple + :param options: a PoolOptions instance + :param is_sdam: whether to call hello for each new Connection + """ + if options.pause_enabled: + self.state = PoolState.PAUSED + else: + self.state = PoolState.READY + # Check a socket's health with socket_closed() every once in a while. + # Can override for testing: 0 to always check, None to never check. + self._check_interval_seconds = 1 + # LIFO pool. Sockets are ordered on idle time. Sockets claimed + # and returned to pool from the left side. Stale sockets removed + # from the right side. + self.conns: collections.deque[Connection] = collections.deque() + self.active_contexts: set[_CancellationContext] = set() + self.lock = _create_lock() + self._max_connecting_cond = _create_condition(self.lock) + self.active_sockets = 0 + # Monotonically increasing connection ID required for CMAP Events. + self.next_connection_id = 1 + # Track whether the sockets in this pool are writeable or not. + self.is_writable: Optional[bool] = None + + # Keep track of resets, so we notice sockets created before the most + # recent reset and close them. + # self.generation = 0 + self.gen = _PoolGeneration() + self.pid = os.getpid() + self.address = address + self.opts = options + self.is_sdam = is_sdam + # Don't publish events or logs in Monitor pools. + self.enabled_for_cmap = ( + not self.is_sdam + and self.opts._event_listeners is not None + and self.opts._event_listeners.enabled_for_cmap + ) + self.enabled_for_logging = not self.is_sdam + + # The first portion of the wait queue. + # Enforces: maxPoolSize + # Also used for: clearing the wait queue + self.size_cond = _create_condition(self.lock) + self.requests = 0 + self.max_pool_size = self.opts.max_pool_size + if not self.max_pool_size: + self.max_pool_size = float("inf") + # The second portion of the wait queue. + # Enforces: maxConnecting + # Also used for: clearing the wait queue + self._max_connecting_cond = _create_condition(self.lock) + self._max_connecting = self.opts.max_connecting + self._pending = 0 + self._client_id = client_id + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_created( + self.address, self.opts.non_default_options + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + **self.opts.non_default_options, + ) + # Similar to active_sockets but includes threads in the wait queue. + self.operation_count: int = 0 + # Retain references to pinned connections to prevent the CPython GC + # from thinking that a cursor's pinned connection can be GC'd when the + # cursor is GC'd (see PYTHON-2751). + self.__pinned_sockets: set[Connection] = set() + self.ncursors = 0 + self.ntxns = 0 + + def ready(self) -> None: + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + self.state = PoolState.READY + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_pool_ready(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_READY, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + @property + def closed(self) -> bool: + return self.state == PoolState.CLOSED + + def _reset( + self, + close: bool, + pause: bool = True, + service_id: Optional[ObjectId] = None, + interrupt_connections: bool = False, + ) -> None: + old_state = self.state + with self.size_cond: + if self.closed: + return + if self.opts.pause_enabled and pause and not self.opts.load_balanced: + old_state, self.state = self.state, PoolState.PAUSED + self.gen.inc(service_id) + newpid = os.getpid() + if self.pid != newpid: + self.pid = newpid + self.active_sockets = 0 + self.operation_count = 0 + if service_id is None: + sockets, self.conns = self.conns, collections.deque() + else: + discard: collections.deque = collections.deque() # type: ignore[type-arg] + keep: collections.deque = collections.deque() # type: ignore[type-arg] + for conn in self.conns: + if conn.service_id == service_id: + discard.append(conn) + else: + keep.append(conn) + sockets = discard + self.conns = keep + + if close: + self.state = PoolState.CLOSED + # Clear the wait queue + self._max_connecting_cond.notify_all() + self.size_cond.notify_all() + + if interrupt_connections: + for context in self.active_contexts: + context.cancel() + + listeners = self.opts._event_listeners + # CMAP spec says that close() MUST close sockets before publishing the + # PoolClosedEvent but that reset() SHOULD close sockets *after* + # publishing the PoolClearedEvent. + if close: + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.POOL_CLOSED) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_closed(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + else: + if old_state != PoolState.PAUSED: + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_pool_cleared( + self.address, + service_id=service_id, + interrupt_connections=interrupt_connections, + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.POOL_CLEARED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + serviceId=service_id, + ) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.STALE) for conn in sockets], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in sockets: + conn.close_conn(ConnectionClosedReason.STALE) + + def update_is_writable(self, is_writable: Optional[bool]) -> None: + """Updates the is_writable attribute on all sockets currently in the + Pool. + """ + self.is_writable = is_writable + with self.lock: + for _socket in self.conns: + _socket.update_is_writable(self.is_writable) # type: ignore[arg-type] + + def reset( + self, service_id: Optional[ObjectId] = None, interrupt_connections: bool = False + ) -> None: + self._reset(close=False, service_id=service_id, interrupt_connections=interrupt_connections) + + def reset_without_pause(self) -> None: + self._reset(close=False, pause=False) + + def close(self) -> None: + self._reset(close=True) + + def stale_generation(self, gen: int, service_id: Optional[ObjectId]) -> bool: + return self.gen.stale(gen, service_id) + + def remove_stale_sockets(self, reference_generation: int) -> None: + """Removes stale sockets then adds new ones if pool is too small and + has not been reset. The `reference_generation` argument specifies the + `generation` at the point in time this operation was requested on the + pool. + """ + # Take the lock to avoid the race condition described in PYTHON-2699. + with self.lock: + if self.state != PoolState.READY: + return + + if self.opts.max_idle_time_seconds is not None: + close_conns = [] + with self.lock: + while ( + self.conns + and self.conns[-1].idle_time_seconds() > self.opts.max_idle_time_seconds + ): + close_conns.append(self.conns.pop()) + if not _IS_SYNC: + asyncio.gather( + *[conn.close_conn(ConnectionClosedReason.IDLE) for conn in close_conns], # type: ignore[func-returns-value] + return_exceptions=True, + ) + else: + for conn in close_conns: + conn.close_conn(ConnectionClosedReason.IDLE) + + while True: + with self.size_cond: + # There are enough sockets in the pool. + if len(self.conns) + self.active_sockets >= self.opts.min_pool_size: + return + if self.requests >= self.opts.min_pool_size: + return + self.requests += 1 + incremented = False + try: + with self._max_connecting_cond: + # If maxConnecting connections are already being created + # by this pool then try again later instead of waiting. + if self._pending >= self._max_connecting: + return + self._pending += 1 + incremented = True + conn = self.connect() + close_conn = False + with self.lock: + # Close connection and return if the pool was reset during + # socket creation or while acquiring the pool lock. + if self.gen.get_overall() != reference_generation: + close_conn = True + if not close_conn: + self.conns.appendleft(conn) + self.active_contexts.discard(conn.cancel_context) + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) + return + finally: + if incremented: + # Notify after adding the socket to the pool. + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + + with self.size_cond: + self.requests -= 1 + self.size_cond.notify() + + def connect(self, handler: Optional[_MongoClientErrorHandler] = None) -> Connection: + """Connect to Mongo and return a new Connection. + + Can raise ConnectionFailure. + + Note that the pool does not keep a reference to the socket -- you + must call checkin() when you're done with it. + """ + with self.lock: + conn_id = self.next_connection_id + self.next_connection_id += 1 + # Use a temporary context so that interrupt_connections can cancel creating the socket. + tmp_context = _CancellationContext() + self.active_contexts.add(tmp_context) + + listeners = self.opts._event_listeners + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_created(self.address, conn_id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CREATED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + ) + + try: + networking_interface = _configured_socket_interface(self.address, self.opts) + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException as error: + with self.lock: + self.active_contexts.discard(tmp_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn_id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn_id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + if isinstance(error, (IOError, OSError, *SSLErrors)): + details = _get_timeout_details(self.opts) + _raise_connection_failure(self.address, error, timeout_details=details) + + raise + + conn = Connection(networking_interface, self, self.address, conn_id, self.is_sdam) # type: ignore[arg-type] + with self.lock: + self.active_contexts.add(conn.cancel_context) + self.active_contexts.discard(tmp_context) + if tmp_context.cancelled: + conn.cancel_context.cancel() + try: + if not self.is_sdam: + conn.hello() + self.is_writable = conn.is_writable + if handler: + handler.contribute_socket(conn, completed_handshake=False) + + conn.authenticate() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + with self.lock: + self.active_contexts.discard(conn.cancel_context) + conn.close_conn(ConnectionClosedReason.ERROR) + raise + + if handler: + handler.client._topology.receive_cluster_time(conn._cluster_time) + + return conn + + @contextlib.contextmanager + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> Generator[Connection, None]: + """Get a connection from the pool. Use with a "with" statement. + + Returns a :class:`Connection` object wrapping a connected + :class:`socket.socket`. + + This method should always be used in a with-statement:: + + with pool.get_conn() as connection: + connection.send_message(msg) + data = connection.receive_message(op_code, request_id) + + Can raise ConnectionFailure or OperationFailure. + + :param handler: A _MongoClientErrorHandler. + """ + listeners = self.opts._event_listeners + checkout_started_time = time.monotonic() + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_started(self.address) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_STARTED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + ) + + conn = self._get_conn(checkout_started_time, handler=handler) + + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_out(self.address, conn.id, duration) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_SUCCEEDED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + durationMS=duration, + ) + try: + with self.lock: + self.active_contexts.add(conn.cancel_context) + yield conn + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + # Exception in caller. Ensure the connection gets returned. + # Note that when pinned is True, the session owns the + # connection and it is responsible for checking the connection + # back into the pool. + pinned = conn.pinned_txn or conn.pinned_cursor + if handler: + # Perform SDAM error handling rules while the connection is + # still checked out. + exc_type, exc_val, _ = sys.exc_info() + handler.handle(exc_type, exc_val) + if not pinned and conn.active: + self.checkin(conn) + raise + if conn.pinned_txn: + with self.lock: + self.__pinned_sockets.add(conn) + self.ntxns += 1 + elif conn.pinned_cursor: + with self.lock: + self.__pinned_sockets.add(conn) + self.ncursors += 1 + elif conn.active: + self.checkin(conn) + + def _raise_if_not_ready(self, checkout_started_time: float, emit_event: bool) -> None: + if self.state != PoolState.READY: + if emit_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + + details = _get_timeout_details(self.opts) + _raise_connection_failure( + self.address, AutoReconnect("connection pool paused"), timeout_details=details + ) + + def _get_conn( + self, checkout_started_time: float, handler: Optional[_MongoClientErrorHandler] = None + ) -> Connection: + """Get or create a Connection. Can raise ConnectionFailure.""" + # We use the pid here to avoid issues with fork / multiprocessing. + # See test.test_client:TestClient.test_fork for an example of + # what could go wrong otherwise + if self.pid != os.getpid(): + self.reset_without_pause() + + if self.closed: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.POOL_CLOSED, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Connection pool was closed", + error=ConnectionCheckOutFailedReason.POOL_CLOSED, + durationMS=duration, + ) + raise _PoolClosedError( + "Attempted to check out a connection from closed connection pool" + ) + + with self.lock: + self.operation_count += 1 + + # Get a free socket or create one. + if _csot.get_timeout(): + deadline = _csot.get_deadline() + elif self.opts.wait_queue_timeout: + deadline = time.monotonic() + self.opts.wait_queue_timeout + else: + deadline = None + + with self.size_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=True) + while not (self.requests < self.max_pool_size): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self.size_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.requests < self.max_pool_size: + self.size_cond.notify() + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=True) + self.requests += 1 + + # We've now acquired the semaphore and must release it on error. + conn = None + incremented = False + emitted_event = False + try: + with self.lock: + self.active_sockets += 1 + incremented = True + while conn is None: + # CMAP: we MUST wait for either maxConnecting OR for a socket + # to be checked back into the pool. + with self._max_connecting_cond: + self._raise_if_not_ready(checkout_started_time, emit_event=False) + while not (self.conns or self._pending < self._max_connecting): + timeout = deadline - time.monotonic() if deadline else None + if not _cond_wait(self._max_connecting_cond, timeout): + # Timed out, notify the next thread to ensure a + # timeout doesn't consume the condition. + if self.conns or self._pending < self._max_connecting: + self._max_connecting_cond.notify() + emitted_event = True + self._raise_wait_queue_timeout(checkout_started_time) + self._raise_if_not_ready(checkout_started_time, emit_event=False) + + try: + conn = self.conns.popleft() + except IndexError: + self._pending += 1 + if conn: # We got a socket from the pool + if self._perished(conn): + conn = None + continue + else: # We need to create a new connection + try: + conn = self.connect(handler=handler) + finally: + with self._max_connecting_cond: + self._pending -= 1 + self._max_connecting_cond.notify() + # Catch KeyboardInterrupt, CancelledError, etc. and cleanup. + except BaseException: + if conn: + # We checked out a socket but authentication failed. + conn.close_conn(ConnectionClosedReason.ERROR) + with self.size_cond: + self.requests -= 1 + if incremented: + self.active_sockets -= 1 + self.size_cond.notify() + + if not emitted_event: + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert self.opts._event_listeners is not None + self.opts._event_listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.CONN_ERROR, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="An error occurred while trying to establish a new connection", + error=ConnectionCheckOutFailedReason.CONN_ERROR, + durationMS=duration, + ) + raise + + conn.active = True + return conn + + def checkin(self, conn: Connection) -> None: + """Return the connection to the pool, or if it's closed discard it. + + :param conn: The connection to check into the pool. + """ + txn = conn.pinned_txn + cursor = conn.pinned_cursor + conn.active = False + conn.pinned_txn = False + conn.pinned_cursor = False + self.__pinned_sockets.discard(conn) + listeners = self.opts._event_listeners + with self.lock: + self.active_contexts.discard(conn.cancel_context) + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_checked_in(self.address, conn.id) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKEDIN, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + ) + if self.pid != os.getpid(): + self.reset_without_pause() + else: + if self.closed: + conn.close_conn(ConnectionClosedReason.POOL_CLOSED) + elif conn.closed: + # CMAP requires the closed event be emitted after the check in. + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_closed( + self.address, conn.id, ConnectionClosedReason.ERROR + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CONN_CLOSED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + driverConnectionId=conn.id, + reason=_verbose_connection_error_reason(ConnectionClosedReason.ERROR), + error=ConnectionClosedReason.ERROR, + ) + else: + close_conn = False + with self.lock: + # Hold the lock to ensure this section does not race with + # Pool.reset(). + if self.stale_generation(conn.generation, conn.service_id): + close_conn = True + else: + conn.update_last_checkin_time() + conn.update_is_writable(bool(self.is_writable)) + self.conns.appendleft(conn) + # Notify any threads waiting to create a connection. + self._max_connecting_cond.notify() + if close_conn: + conn.close_conn(ConnectionClosedReason.STALE) + + with self.size_cond: + if txn: + self.ntxns -= 1 + elif cursor: + self.ncursors -= 1 + self.requests -= 1 + self.active_sockets -= 1 + self.operation_count -= 1 + self.size_cond.notify() + + def _perished(self, conn: Connection) -> bool: + """Return True and close the connection if it is "perished". + + This side-effecty function checks if this socket has been idle for + for longer than the max idle time, or if the socket has been closed by + some external network error, or if the socket's generation is outdated. + + Checking sockets lets us avoid seeing *some* + :class:`~pymongo.errors.AutoReconnect` exceptions on server + hiccups, etc. We only check if the socket was closed by an external + error if it has been > 1 second since the socket was checked into the + pool, to keep performance reasonable - we can't avoid AutoReconnects + completely anyway. + """ + idle_time_seconds = conn.idle_time_seconds() + # If socket is idle, open a new one. + if ( + self.opts.max_idle_time_seconds is not None + and idle_time_seconds > self.opts.max_idle_time_seconds + ): + conn.close_conn(ConnectionClosedReason.IDLE) + return True + + if self._check_interval_seconds is not None and ( + self._check_interval_seconds == 0 or idle_time_seconds > self._check_interval_seconds + ): + if conn.conn_closed(): + conn.close_conn(ConnectionClosedReason.ERROR) + return True + + if self.stale_generation(conn.generation, conn.service_id): + conn.close_conn(ConnectionClosedReason.STALE) + return True + + return False + + def _raise_wait_queue_timeout(self, checkout_started_time: float) -> NoReturn: + listeners = self.opts._event_listeners + duration = time.monotonic() - checkout_started_time + if self.enabled_for_cmap: + assert listeners is not None + listeners.publish_connection_check_out_failed( + self.address, ConnectionCheckOutFailedReason.TIMEOUT, duration + ) + if self.enabled_for_logging and _CONNECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _CONNECTION_LOGGER, + message=_ConnectionStatusMessage.CHECKOUT_FAILED, + clientId=self._client_id, + serverHost=self.address[0], + serverPort=self.address[1], + reason="Wait queue timeout elapsed without a connection becoming available", + error=ConnectionCheckOutFailedReason.TIMEOUT, + durationMS=duration, + ) + timeout = _csot.get_timeout() or self.opts.wait_queue_timeout + if self.opts.load_balanced: + other_ops = self.active_sockets - self.ncursors - self.ntxns + raise WaitQueueTimeoutError( + "Timeout waiting for connection from the connection pool. " + "maxPoolSize: {}, connections in use by cursors: {}, " + "connections in use by transactions: {}, connections in use " + "by other operations: {}, timeout: {}".format( + self.opts.max_pool_size, + self.ncursors, + self.ntxns, + other_ops, + timeout, + ) + ) + raise WaitQueueTimeoutError( + "Timed out while checking out a connection from connection pool. " + f"maxPoolSize: {self.opts.max_pool_size}, timeout: {timeout}" + ) + + def __del__(self) -> None: + # Avoid ResourceWarnings in Python 3 + # Close all sockets without calling reset() or close() because it is + # not safe to acquire a lock in __del__. + if _IS_SYNC: + for conn in self.conns: + conn.close_conn(None) # type: ignore[unused-coroutine] diff --git a/pymongo/synchronous/server.py b/pymongo/synchronous/server.py new file mode 100644 index 0000000000..f57420918b --- /dev/null +++ b/pymongo/synchronous/server.py @@ -0,0 +1,383 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Communicate with one MongoDB server in a topology.""" +from __future__ import annotations + +import logging +from datetime import datetime +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ContextManager, + Optional, + Union, +) + +from bson import _decode_all_selective +from pymongo.errors import NotPrimaryError, OperationFailure +from pymongo.helpers_shared import _check_command_response +from pymongo.logger import ( + _COMMAND_LOGGER, + _SDAM_LOGGER, + _CommandStatusMessage, + _debug_log, + _SDAMStatusMessage, +) +from pymongo.message import _convert_exception, _GetMore, _OpMsg, _Query +from pymongo.response import PinnedResponse, Response +from pymongo.synchronous.helpers import _handle_reauth + +if TYPE_CHECKING: + from queue import Queue + from weakref import ReferenceType + + from bson.objectid import ObjectId + from pymongo.monitoring import _EventListeners + from pymongo.read_preferences import _ServerMode + from pymongo.server_description import ServerDescription + from pymongo.synchronous.mongo_client import MongoClient, _MongoClientErrorHandler + from pymongo.synchronous.monitor import Monitor + from pymongo.synchronous.pool import Connection, Pool + from pymongo.typings import _DocumentOut + +_IS_SYNC = True + +_CURSOR_DOC_FIELDS = {"cursor": {"firstBatch": 1, "nextBatch": 1}} + + +class Server: + def __init__( + self, + server_description: ServerDescription, + pool: Pool, + monitor: Monitor, + topology_id: Optional[ObjectId] = None, + listeners: Optional[_EventListeners] = None, + events: Optional[ReferenceType[Queue[Any]]] = None, + ) -> None: + """Represent one MongoDB server.""" + self._description = server_description + self._pool = pool + self._monitor = monitor + self._topology_id = topology_id + self._publish = listeners is not None and listeners.enabled_for_server + self._listener = listeners + self._events = None + if self._publish: + self._events = events() # type: ignore[misc] + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + Multiple calls have no effect. + """ + if not self._pool.opts.load_balanced: + self._monitor.open() + + def reset(self, service_id: Optional[ObjectId] = None) -> None: + """Clear the connection pool.""" + self.pool.reset(service_id) + + def close(self) -> None: + """Clear the connection pool and stop the monitor. + + Reconnect with open(). + """ + if self._publish: + assert self._listener is not None + assert self._events is not None + self._events.put( + ( + self._listener.publish_server_closed, + (self._description.address, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.STOP_SERVER, + topologyId=self._topology_id, + serverHost=self._description.address[0], + serverPort=self._description.address[1], + ) + + self._monitor.close() + self._pool.close() + + def request_check(self) -> None: + """Check the server's state soon.""" + self._monitor.request_check() + + def operation_to_command( + self, operation: Union[_Query, _GetMore], conn: Connection, apply_timeout: bool = False + ) -> tuple[dict[str, Any], str]: + cmd, db = operation.as_command(conn, apply_timeout) + # Support auto encryption + if operation.client._encrypter and not operation.client._encrypter._bypass_auto_encryption: + cmd = operation.client._encrypter.encrypt( # type: ignore[misc, assignment] + operation.db, cmd, operation.codec_options + ) + operation.update_command(cmd) + + return cmd, db + + @_handle_reauth + def run_operation( + self, + conn: Connection, + operation: Union[_Query, _GetMore], + read_preference: _ServerMode, + listeners: Optional[_EventListeners], + unpack_res: Callable[..., list[_DocumentOut]], + client: MongoClient[Any], + ) -> Response: + """Run a _Query or _GetMore operation and return a Response object. + + This method is used only to run _Query/_GetMore operations from + cursors. + Can raise ConnectionFailure, OperationFailure, etc. + + :param conn: A Connection instance. + :param operation: A _Query or _GetMore object. + :param read_preference: The read preference to use. + :param listeners: Instance of _EventListeners or None. + :param unpack_res: A callable that decodes the wire protocol response. + :param client: A MongoClient instance. + """ + assert listeners is not None + publish = listeners.enabled_for_commands + start = datetime.now() + + use_cmd = operation.use_command(conn) + more_to_come = operation.conn_mgr and operation.conn_mgr.more_to_come + cmd, dbn = self.operation_to_command(operation, conn, use_cmd) + if more_to_come: + request_id = 0 + else: + message = operation.get_message(read_preference, conn, use_cmd) + request_id, data, max_doc_size = self._split_message(message) + + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.STARTED, + clientId=client._topology_settings._topology_id, + command=cmd, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + + if publish: + if "$db" not in cmd: + cmd["$db"] = dbn + assert listeners is not None + listeners.publish_command_start( + cmd, + dbn, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + ) + + try: + if more_to_come: + reply = conn.receive_message(None) + else: + conn.send_message(data, max_doc_size) + reply = conn.receive_message(request_id) + + # Unpack and check for command errors. + if use_cmd: + user_fields = _CURSOR_DOC_FIELDS + legacy_response = False + else: + user_fields = None + legacy_response = True + docs = unpack_res( + reply, + operation.cursor_id, + operation.codec_options, + legacy_response=legacy_response, + user_fields=user_fields, + ) + if use_cmd: + first = docs[0] + operation.client._process_response(first, operation.session) # type: ignore[misc, arg-type] + _check_command_response(first, conn.max_wire_version, pool_opts=conn.opts) # type:ignore[has-type] + except Exception as exc: + duration = datetime.now() - start + if isinstance(exc, (NotPrimaryError, OperationFailure)): + failure: _DocumentOut = exc.details # type: ignore[assignment] + else: + failure = _convert_exception(exc) + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.FAILED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + failure=failure, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + isServerSideError=isinstance(exc, OperationFailure), + ) + if publish: + assert listeners is not None + listeners.publish_command_failure( + duration, + failure, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + raise + duration = datetime.now() - start + # Must publish in find / getMore / explain command response + # format. + if use_cmd: + res = docs[0] + elif operation.name == "explain": + res = docs[0] if docs else {} + else: + res = {"cursor": {"id": reply.cursor_id, "ns": operation.namespace()}, "ok": 1} # type: ignore[union-attr] + if operation.name == "find": + res["cursor"]["firstBatch"] = docs + else: + res["cursor"]["nextBatch"] = docs + if _COMMAND_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _COMMAND_LOGGER, + message=_CommandStatusMessage.SUCCEEDED, + clientId=client._topology_settings._topology_id, + durationMS=duration, + reply=res, + commandName=next(iter(cmd)), + databaseName=dbn, + requestId=request_id, + operationId=request_id, + driverConnectionId=conn.id, + serverConnectionId=conn.server_connection_id, + serverHost=conn.address[0], + serverPort=conn.address[1], + serviceId=conn.service_id, + ) + if publish: + assert listeners is not None + listeners.publish_command_success( + duration, + res, + operation.name, + request_id, + conn.address, + conn.server_connection_id, + service_id=conn.service_id, + database_name=dbn, + ) + + # Decrypt response. + client = operation.client # type: ignore[assignment] + if client and client._encrypter: + if use_cmd: + decrypted = client._encrypter.decrypt(reply.raw_command_response()) + docs = _decode_all_selective(decrypted, operation.codec_options, user_fields) + + response: Response + + if client._should_pin_cursor(operation.session) or operation.exhaust: # type: ignore[arg-type] + conn.pin_cursor() + if isinstance(reply, _OpMsg): + # In OP_MSG, the server keeps sending only if the + # more_to_come flag is set. + more_to_come = reply.more_to_come + else: + # In OP_REPLY, the server keeps sending until cursor_id is 0. + more_to_come = bool(operation.exhaust and reply.cursor_id) + if operation.conn_mgr: + operation.conn_mgr.update_exhaust(more_to_come) + response = PinnedResponse( + data=reply, + address=self._description.address, + conn=conn, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + more_to_come=more_to_come, + ) + else: + response = Response( + data=reply, + address=self._description.address, + duration=duration, + request_id=request_id, + from_command=use_cmd, + docs=docs, + ) + + return response + + def checkout( + self, handler: Optional[_MongoClientErrorHandler] = None + ) -> ContextManager[Connection]: + return self.pool.checkout(handler) + + @property + def description(self) -> ServerDescription: + return self._description + + @description.setter + def description(self, server_description: ServerDescription) -> None: + assert server_description.address == self._description.address + self._description = server_description + + @property + def pool(self) -> Pool: + return self._pool + + def _split_message( + self, message: Union[tuple[int, Any], tuple[int, Any, int]] + ) -> tuple[int, Any, int]: + """Return request_id, data, max_doc_size. + + :param message: (request_id, data, max_doc_size) or (request_id, data) + """ + if len(message) == 3: + return message # type: ignore[return-value] + else: + # get_more and kill_cursors messages don't include BSON documents. + request_id, data = message # type: ignore[misc] + return request_id, data, 0 + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self._description!r}>" diff --git a/pymongo/synchronous/settings.py b/pymongo/synchronous/settings.py new file mode 100644 index 0000000000..61b86fa18d --- /dev/null +++ b/pymongo/synchronous/settings.py @@ -0,0 +1,175 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Represent MongoClient's configuration.""" +from __future__ import annotations + +import threading +import traceback +from typing import Any, Collection, Optional, Type, Union + +from bson.objectid import ObjectId +from pymongo import common +from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT +from pymongo.errors import ConfigurationError +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.synchronous import monitor, pool +from pymongo.synchronous.pool import Pool +from pymongo.topology_description import TOPOLOGY_TYPE, _ServerSelector + +_IS_SYNC = True + + +class TopologySettings: + def __init__( + self, + seeds: Optional[Collection[tuple[str, int]]] = None, + replica_set_name: Optional[str] = None, + pool_class: Optional[Type[Pool]] = None, + pool_options: Optional[PoolOptions] = None, + monitor_class: Optional[Type[monitor.Monitor]] = None, + condition_class: Optional[Type[threading.Condition]] = None, + local_threshold_ms: int = LOCAL_THRESHOLD_MS, + server_selection_timeout: int = SERVER_SELECTION_TIMEOUT, + heartbeat_frequency: int = common.HEARTBEAT_FREQUENCY, + server_selector: Optional[_ServerSelector] = None, + fqdn: Optional[str] = None, + direct_connection: Optional[bool] = False, + load_balanced: Optional[bool] = None, + srv_service_name: str = common.SRV_SERVICE_NAME, + srv_max_hosts: int = 0, + server_monitoring_mode: str = common.SERVER_MONITORING_MODE, + topology_id: Optional[ObjectId] = None, + ): + """Represent MongoClient's configuration. + + Take a list of (host, port) pairs and optional replica set name. + """ + if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL: + raise ConfigurationError( + "heartbeatFrequencyMS cannot be less than %d" + % (common.MIN_HEARTBEAT_INTERVAL * 1000,) + ) + + self._seeds: Collection[tuple[str, int]] = seeds or [("localhost", 27017)] + self._replica_set_name = replica_set_name + self._pool_class: Type[Pool] = pool_class or pool.Pool + self._pool_options: PoolOptions = pool_options or PoolOptions() + self._monitor_class: Type[monitor.Monitor] = monitor_class or monitor.Monitor + self._condition_class: Type[threading.Condition] = condition_class or threading.Condition + self._local_threshold_ms = local_threshold_ms + self._server_selection_timeout = server_selection_timeout + self._server_selector = server_selector + self._fqdn = fqdn + self._heartbeat_frequency = heartbeat_frequency + self._direct = direct_connection + self._load_balanced = load_balanced + self._srv_service_name = srv_service_name + self._srv_max_hosts = srv_max_hosts or 0 + self._server_monitoring_mode = server_monitoring_mode + if topology_id is not None: + self._topology_id = topology_id + else: + self._topology_id = ObjectId() + # Store the allocation traceback to catch unclosed clients in the + # test suite. + self._stack = "".join(traceback.format_stack()[:-2]) + + @property + def seeds(self) -> Collection[tuple[str, int]]: + """List of server addresses.""" + return self._seeds + + @property + def replica_set_name(self) -> Optional[str]: + return self._replica_set_name + + @property + def pool_class(self) -> Type[Pool]: + return self._pool_class + + @property + def pool_options(self) -> PoolOptions: + return self._pool_options + + @property + def monitor_class(self) -> Type[monitor.Monitor]: + return self._monitor_class + + @property + def condition_class(self) -> Type[threading.Condition]: + return self._condition_class + + @property + def local_threshold_ms(self) -> int: + return self._local_threshold_ms + + @property + def server_selection_timeout(self) -> int: + return self._server_selection_timeout + + @property + def server_selector(self) -> Optional[_ServerSelector]: + return self._server_selector + + @property + def heartbeat_frequency(self) -> int: + return self._heartbeat_frequency + + @property + def fqdn(self) -> Optional[str]: + return self._fqdn + + @property + def direct(self) -> Optional[bool]: + """Connect directly to a single server, or use a set of servers? + + True if there is one seed and no replica_set_name. + """ + return self._direct + + @property + def load_balanced(self) -> Optional[bool]: + """True if the client was configured to connect to a load balancer.""" + return self._load_balanced + + @property + def srv_service_name(self) -> str: + """The srvServiceName.""" + return self._srv_service_name + + @property + def srv_max_hosts(self) -> int: + """The srvMaxHosts.""" + return self._srv_max_hosts + + @property + def server_monitoring_mode(self) -> str: + """The serverMonitoringMode.""" + return self._server_monitoring_mode + + def get_topology_type(self) -> int: + if self.load_balanced: + return TOPOLOGY_TYPE.LoadBalanced + elif self.direct: + return TOPOLOGY_TYPE.Single + elif self.replica_set_name is not None: + return TOPOLOGY_TYPE.ReplicaSetNoPrimary + else: + return TOPOLOGY_TYPE.Unknown + + def get_server_descriptions(self) -> dict[Union[tuple[str, int], Any], ServerDescription]: + """Initial dict of (address, ServerDescription) for all seeds.""" + return {address: ServerDescription(address) for address in self.seeds} diff --git a/pymongo/synchronous/srv_resolver.py b/pymongo/synchronous/srv_resolver.py new file mode 100644 index 0000000000..4802310698 --- /dev/null +++ b/pymongo/synchronous/srv_resolver.py @@ -0,0 +1,155 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Support for resolving hosts and options from mongodb+srv:// URIs.""" +from __future__ import annotations + +import ipaddress +import random +from typing import TYPE_CHECKING, Any, Optional, Union + +from pymongo.common import CONNECT_TIMEOUT +from pymongo.errors import ConfigurationError + +if TYPE_CHECKING: + from dns import resolver + +_IS_SYNC = True + + +def _have_dnspython() -> bool: + try: + import dns # noqa: F401 + + return True + except ImportError: + return False + + +# dnspython can return bytes or str from various parts +# of its API depending on version. We always want str. +def maybe_decode(text: Union[str, bytes]) -> str: + if isinstance(text, bytes): + return text.decode() + return text + + +# PYTHON-2667 Lazily call dns.resolver methods for compatibility with eventlet. +def _resolve(*args: Any, **kwargs: Any) -> resolver.Answer: + if _IS_SYNC: + from dns import resolver + + return resolver.resolve(*args, **kwargs) + else: + from dns import asyncresolver + + return asyncresolver.resolve(*args, **kwargs) # type:ignore[return-value] + + +_INVALID_HOST_MSG = ( + "Invalid URI host: %s is not a valid hostname for 'mongodb+srv://'. " + "Did you mean to use 'mongodb://'?" +) + + +class _SrvResolver: + def __init__( + self, + fqdn: str, + connect_timeout: Optional[float], + srv_service_name: str, + srv_max_hosts: int = 0, + ): + self.__fqdn = fqdn + self.__srv = srv_service_name + self.__connect_timeout = connect_timeout or CONNECT_TIMEOUT + self.__srv_max_hosts = srv_max_hosts or 0 + # Validate the fully qualified domain name. + try: + ipaddress.ip_address(fqdn) + raise ConfigurationError(_INVALID_HOST_MSG % ("an IP address",)) + except ValueError: + pass + try: + split_fqdn = self.__fqdn.split(".") + self.__plist = split_fqdn[1:] if len(split_fqdn) > 2 else split_fqdn + except Exception: + raise ConfigurationError(_INVALID_HOST_MSG % (fqdn,)) from None + self.__slen = len(self.__plist) + self.nparts = len(split_fqdn) + + def get_options(self) -> Optional[str]: + from dns import resolver + + try: + results = _resolve(self.__fqdn, "TXT", lifetime=self.__connect_timeout) + except (resolver.NoAnswer, resolver.NXDOMAIN): + # No TXT records + return None + except Exception as exc: + raise ConfigurationError(str(exc)) from exc + if len(results) > 1: + raise ConfigurationError("Only one TXT record is supported") + return (b"&".join([b"".join(res.strings) for res in results])).decode("utf-8") # type: ignore[attr-defined] + + def _resolve_uri(self, encapsulate_errors: bool) -> resolver.Answer: + try: + results = _resolve( + "_" + self.__srv + "._tcp." + self.__fqdn, "SRV", lifetime=self.__connect_timeout + ) + except Exception as exc: + if not encapsulate_errors: + # Raise the original error. + raise + # Else, raise all errors as ConfigurationError. + raise ConfigurationError(str(exc)) from exc + return results + + def _get_srv_response_and_hosts( + self, encapsulate_errors: bool + ) -> tuple[resolver.Answer, list[tuple[str, Any]]]: + results = self._resolve_uri(encapsulate_errors) + + # Construct address tuples + nodes = [ + (maybe_decode(res.target.to_text(omit_final_dot=True)), res.port) # type: ignore[attr-defined] + for res in results + ] + + # Validate hosts + for node in nodes: + srv_host = node[0].lower() + if self.__fqdn == srv_host and self.nparts < 3: + raise ConfigurationError( + "Invalid SRV host: return address is identical to SRV hostname" + ) + try: + nlist = srv_host.split(".")[1:][-self.__slen :] + except Exception as exc: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") from exc + if self.__plist != nlist: + raise ConfigurationError(f"Invalid SRV host: {node[0]}") + if self.__srv_max_hosts: + nodes = random.sample(nodes, min(self.__srv_max_hosts, len(nodes))) + return results, nodes + + def get_hosts(self) -> list[tuple[str, Any]]: + _, nodes = self._get_srv_response_and_hosts(True) + return nodes + + def get_hosts_and_min_ttl(self) -> tuple[list[tuple[str, Any]], int]: + results, nodes = self._get_srv_response_and_hosts(False) + rrset = results.rrset + ttl = rrset.ttl if rrset else 0 + return nodes, ttl diff --git a/pymongo/synchronous/topology.py b/pymongo/synchronous/topology.py new file mode 100644 index 0000000000..a4ca0e6e0f --- /dev/null +++ b/pymongo/synchronous/topology.py @@ -0,0 +1,1125 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Internal class to monitor a topology of one or more servers.""" + +from __future__ import annotations + +import asyncio +import logging +import os +import queue +import random +import sys +import time +import warnings +import weakref +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Mapping, Optional, cast + +from pymongo import _csot, common, helpers_shared, periodic_executor +from pymongo.errors import ( + ConnectionFailure, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteError, +) +from pymongo.hello import Hello +from pymongo.lock import ( + _cond_wait, + _create_condition, + _create_lock, +) +from pymongo.logger import ( + _SDAM_LOGGER, + _SERVER_SELECTION_LOGGER, + _debug_log, + _SDAMStatusMessage, + _ServerSelectionStatusMessage, +) +from pymongo.pool_options import PoolOptions +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import ( + Selection, + any_server_selector, + arbiter_server_selector, + secondary_server_selector, + writable_server_selector, +) +from pymongo.synchronous.client_session import _ServerSession, _ServerSessionPool +from pymongo.synchronous.monitor import MonitorBase, SrvMonitor +from pymongo.synchronous.pool import Pool +from pymongo.synchronous.server import Server +from pymongo.topology_description import ( + SRV_POLLING_TOPOLOGIES, + TOPOLOGY_TYPE, + TopologyDescription, + _updated_topology_description_srv_polling, + updated_topology_description, +) + +if TYPE_CHECKING: + from bson import ObjectId + from pymongo.synchronous.settings import TopologySettings + from pymongo.typings import ClusterTime, _Address + +_IS_SYNC = True + +_pymongo_dir = str(Path(__file__).parent) + + +def process_events_queue(queue_ref: weakref.ReferenceType[queue.Queue]) -> bool: # type: ignore[type-arg] + q = queue_ref() + if not q: + return False # Cancel PeriodicExecutor. + + while True: + try: + event = q.get_nowait() + except queue.Empty: + break + else: + fn, args = event + fn(*args) + + return True # Continue PeriodicExecutor. + + +class Topology: + """Monitor a topology of one or more servers.""" + + def __init__(self, topology_settings: TopologySettings): + self._topology_id = topology_settings._topology_id + self._listeners = topology_settings._pool_options._event_listeners + self._publish_server = self._listeners is not None and self._listeners.enabled_for_server + self._publish_tp = self._listeners is not None and self._listeners.enabled_for_topology + + # Create events queue if there are publishers. + self._events = None + self.__events_executor: Any = None + + if self._publish_server or self._publish_tp: + self._events = queue.Queue(maxsize=100) + + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_TOPOLOGY, + topologyId=self._topology_id, + ) + + if self._publish_tp: + assert self._events is not None + self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) + self._settings = topology_settings + topology_description = TopologyDescription( + topology_settings.get_topology_type(), + topology_settings.get_server_descriptions(), + topology_settings.replica_set_name, + None, + None, + topology_settings, + ) + + self._description = topology_description + initial_td = TopologyDescription( + TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings + ) + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (initial_td, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(initial_td), + newDescription=repr(self._description), + ) + + for seed in topology_settings.seeds: + if self._publish_server: + assert self._events is not None + self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.START_SERVER, + topologyId=self._topology_id, + serverHost=seed[0], + serverPort=seed[1], + ) + + # Store the seed list to help diagnose errors in _error_message(). + self._seed_addresses = list(topology_description.server_descriptions()) + self._opened = False + self._closed = False + self._lock = _create_lock() + self._condition = _create_condition( + self._lock, self._settings.condition_class if _IS_SYNC else None + ) + self._servers: dict[_Address, Server] = {} + self._pid: Optional[int] = None + self._max_cluster_time: Optional[ClusterTime] = None + self._session_pool = _ServerSessionPool() + + if self._publish_server or self._publish_tp: + assert self._events is not None + weak: weakref.ReferenceType[queue.Queue[Any]] + + def target() -> bool: + return process_events_queue(weak) + + executor = periodic_executor.PeriodicExecutor( + interval=common.EVENTS_QUEUE_FREQUENCY, + min_interval=common.MIN_HEARTBEAT_INTERVAL, + target=target, + name="pymongo_events_thread", + ) + + # We strongly reference the executor and it weakly references + # the queue via this closure. When the topology is freed, stop + # the executor soon. + weak = weakref.ref(self._events, executor.close) + self.__events_executor = executor + executor.open() + + self._srv_monitor = None + if self._settings.fqdn is not None and not self._settings.load_balanced: + self._srv_monitor = SrvMonitor(self, self._settings) + + # Stores all monitor tasks that need to be joined on close or server selection + self._monitor_tasks: list[MonitorBase] = [] + + def open(self) -> None: + """Start monitoring, or restart after a fork. + + No effect if called multiple times. + + .. warning:: Topology is shared among multiple threads and is protected + by mutual exclusion. Using Topology from a process other than the one + that initialized it will emit a warning and may result in deadlock. To + prevent this from happening, MongoClient must be created after any + forking. + + """ + pid = os.getpid() + if self._pid is None: + self._pid = pid + elif pid != self._pid: + self._pid = pid + if sys.version_info[:2] >= (3, 12): + kwargs = {"skip_file_prefixes": (_pymongo_dir,)} + else: + kwargs = {"stacklevel": 6} + # Ignore B028 warning for missing stacklevel. + warnings.warn( # type: ignore[call-overload] # noqa: B028 + "MongoClient opened before fork. May not be entirely fork-safe, " + "proceed with caution. See PyMongo's documentation for details: " + "https://dochub.mongodb.org/core/pymongo-fork-deadlock", + **kwargs, + ) + with self._lock: + # Close servers and clear the pools. + for server in self._servers.values(): + server.close() + # Reset the session pool to avoid duplicate sessions in + # the child process. + self._session_pool.reset() + + with self._lock: + self._ensure_opened() + + def get_server_selection_timeout(self) -> float: + # CSOT: use remaining timeout when set. + timeout = _csot.remaining() + if timeout is None: + return self._settings.server_selection_timeout + return timeout + + def select_servers( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + operation_id: Optional[int] = None, + ) -> list[Server]: + """Return a list of Servers matching selector, or time out. + + :param selector: function that takes a list of Servers and returns + a subset of them. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value common.SERVER_SELECTION_TIMEOUT + is used. + :param address: optional server address to select. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + if server_selection_timeout is None: + server_timeout = self.get_server_selection_timeout() + else: + server_timeout = server_selection_timeout + + # Cleanup any completed monitor tasks safely + if not _IS_SYNC and self._monitor_tasks: + self.cleanup_monitors() + + with self._lock: + server_descriptions = self._select_servers_loop( + selector, server_timeout, operation, operation_id, address + ) + + return [ + cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions + ] + + def _select_servers_loop( + self, + selector: Callable[[Selection], Selection], + timeout: float, + operation: str, + operation_id: Optional[int], + address: Optional[_Address], + ) -> list[ServerDescription]: + """select_servers() guts. Hold the lock when calling this.""" + now = time.monotonic() + end_time = now + timeout + logged_waiting = False + + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.STARTED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + ) + + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + while not server_descriptions: + # No suitable servers. + if timeout == 0 or now > end_time: + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.FAILED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + failure=self._error_message(selector), + ) + raise ServerSelectionTimeoutError( + f"{self._error_message(selector)}, Timeout: {timeout}s, Topology Description: {self.description!r}" + ) + + if not logged_waiting: + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.WAITING, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + remainingTimeMS=int(1000 * (end_time - time.monotonic())), + ) + logged_waiting = True + + self._ensure_opened() + self._request_check_all() + + # Release the lock and wait for the topology description to + # change, or for a timeout. We won't miss any changes that + # came after our most recent apply_selector call, since we've + # held the lock until now. + _cond_wait(self._condition, common.MIN_HEARTBEAT_INTERVAL) + self._description.check_compatible() + now = time.monotonic() + server_descriptions = self._description.apply_selector( + selector, address, custom_selector=self._settings.server_selector + ) + + self._description.check_compatible() + return server_descriptions + + def _select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + servers = self.select_servers( + selector, operation, server_selection_timeout, address, operation_id + ) + servers = _filter_servers(servers, deprioritized_servers) + if len(servers) == 1: + return servers[0] + server1, server2 = random.sample(servers, 2) + if server1.pool.operation_count <= server2.pool.operation_count: + return server1 + else: + return server2 + + def select_server( + self, + selector: Callable[[Selection], Selection], + operation: str, + server_selection_timeout: Optional[float] = None, + address: Optional[_Address] = None, + deprioritized_servers: Optional[list[Server]] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Like select_servers, but choose a random server if several match.""" + server = self._select_server( + selector, + operation, + server_selection_timeout, + address, + deprioritized_servers, + operation_id=operation_id, + ) + if _csot.get_timeout(): + _csot.set_rtt(server.description.min_round_trip_time) + if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SERVER_SELECTION_LOGGER, + message=_ServerSelectionStatusMessage.SUCCEEDED, + selector=selector, + operation=operation, + operationId=operation_id, + topologyDescription=self.description, + clientId=self.description._topology_settings._topology_id, + serverHost=server.description.address[0], + serverPort=server.description.address[1], + ) + return server + + def select_server_by_address( + self, + address: _Address, + operation: str, + server_selection_timeout: Optional[int] = None, + operation_id: Optional[int] = None, + ) -> Server: + """Return a Server for "address", reconnecting if necessary. + + If the server's type is not known, request an immediate check of all + servers. Time out after "server_selection_timeout" if the server + cannot be reached. + + :param address: A (host, port) pair. + :param operation: The name of the operation that the server is being selected for. + :param server_selection_timeout: maximum seconds to wait. + If not provided, the default value + common.SERVER_SELECTION_TIMEOUT is used. + :param operation_id: The unique id of the current operation being performed. Defaults to None if not provided. + + Calls self.open() if needed. + + Raises exc:`ServerSelectionTimeoutError` after + `server_selection_timeout` if no matching servers are found. + """ + return self.select_server( + any_server_selector, + operation, + server_selection_timeout, + address, + operation_id=operation_id, + ) + + def _process_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription on an opened topology. + + Hold the lock when calling this. + """ + td_old = self._description + sd_old = td_old._server_descriptions[server_description.address] + if _is_stale_server_description(sd_old, server_description): + # This is a stale hello response. Ignore it. + return + + new_td = updated_topology_description(self._description, server_description) + # CMAP: Ensure the pool is "ready" when the server is selectable. + if server_description.is_readable or ( + server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single + ): + server = self._servers.get(server_description.address) + if server: + server.pool.ready() + + suppress_event = sd_old == server_description + if self._publish_server and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_server_description_changed, + (sd_old, server_description, server_description.address, self._topology_id), + ) + ) + + self._description = new_td + self._update_servers() + + if self._publish_tp and not suppress_event: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG) and not suppress_event: + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + # Shutdown SRV polling for unsupported cluster types. + # This is only applicable if the old topology was Unknown, and the + # new one is something other than Unknown or Sharded. + if self._srv_monitor and ( + td_old.topology_type == TOPOLOGY_TYPE.Unknown + and self._description.topology_type not in SRV_POLLING_TOPOLOGIES + ): + self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + # Wake anything waiting in select_servers(). + self._condition.notify_all() + + def on_change( + self, + server_description: ServerDescription, + reset_pool: bool = False, + interrupt_connections: bool = False, + ) -> None: + """Process a new ServerDescription after an hello call completes.""" + # We do no I/O holding the lock. + with self._lock: + # Monitors may continue working on hello calls for some time + # after a call to Topology.close, so this method may be called at + # any time. Ensure the topology is open before processing the + # change. + # Any monitored server was definitely in the topology description + # once. Check if it's still in the description or if some state- + # change removed it. E.g., we got a host list from the primary + # that didn't include this server. + if self._opened and self._description.has_server(server_description.address): + self._process_change(server_description, reset_pool, interrupt_connections) + # Clear the pool from a failed heartbeat, done outside the lock to avoid blocking on connection close. + if reset_pool: + server = self._servers.get(server_description.address) + if server: + server.pool.reset(interrupt_connections=interrupt_connections) + + def _process_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new seedlist on an opened topology. + Hold the lock when calling this. + """ + td_old = self._description + if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: + return + self._description = _updated_topology_description_srv_polling(self._description, seedlist) + + self._update_servers() + + if self._publish_tp: + assert self._events is not None + self._events.put( + ( + self._listeners.publish_topology_description_changed, + (td_old, self._description, self._topology_id), + ) + ) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(td_old), + newDescription=repr(self._description), + ) + + def on_srv_update(self, seedlist: list[tuple[str, Any]]) -> None: + """Process a new list of nodes obtained from scanning SRV records.""" + # We do no I/O holding the lock. + with self._lock: + if self._opened: + self._process_srv_update(seedlist) + + def get_server_by_address(self, address: _Address) -> Optional[Server]: + """Get a Server or None. + + Returns the current version of the server immediately, even if it's + Unknown or absent from the topology. Only use this in unittests. + In driver code, use select_server_by_address, since then you're + assured a recent view of the server's type and wire protocol version. + """ + return self._servers.get(address) + + def has_server(self, address: _Address) -> bool: + return address in self._servers + + def get_primary(self) -> Optional[_Address]: + """Return primary's address or None.""" + # Implemented here in Topology instead of MongoClient, so it can lock. + with self._lock: + topology_type = self._description.topology_type + if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: + return None + + return writable_server_selector(self._new_selection())[0].address + + def _get_replica_set_members(self, selector: Callable[[Selection], Selection]) -> set[_Address]: + """Return set of replica set member addresses.""" + # Implemented here in Topology instead of MongoClient, so it can lock. + with self._lock: + topology_type = self._description.topology_type + if topology_type not in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ): + return set() + + return {sd.address for sd in iter(selector(self._new_selection()))} + + def get_secondaries(self) -> set[_Address]: + """Return set of secondary addresses.""" + return self._get_replica_set_members(secondary_server_selector) + + def get_arbiters(self) -> set[_Address]: + """Return set of arbiter addresses.""" + return self._get_replica_set_members(arbiter_server_selector) + + def max_cluster_time(self) -> Optional[ClusterTime]: + """Return a document, the highest seen $clusterTime.""" + return self._max_cluster_time + + def _receive_cluster_time_no_lock(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + # Driver Sessions Spec: "Whenever a driver receives a cluster time from + # a server it MUST compare it to the current highest seen cluster time + # for the deployment. If the new cluster time is higher than the + # highest seen cluster time it MUST become the new highest seen cluster + # time. Two cluster times are compared using only the BsonTimestamp + # value of the clusterTime embedded field." + if cluster_time: + # ">" uses bson.timestamp.Timestamp's comparison operator. + if ( + not self._max_cluster_time + or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] + ): + self._max_cluster_time = cluster_time + + def receive_cluster_time(self, cluster_time: Optional[Mapping[str, Any]]) -> None: + with self._lock: + self._receive_cluster_time_no_lock(cluster_time) + + def request_check_all(self, wait_time: int = 5) -> None: + """Wake all monitors, wait for at least one to check its server.""" + with self._lock: + self._request_check_all() + _cond_wait(self._condition, wait_time) + + def data_bearing_servers(self) -> list[ServerDescription]: + """Return a list of all data-bearing servers. + + This includes any server that might be selected for an operation. + """ + if self._description.topology_type == TOPOLOGY_TYPE.Single: + return self._description.known_servers + return self._description.readable_servers + + def update_pool(self) -> None: + # Remove any stale sockets and add new sockets if pool is too small. + servers = [] + with self._lock: + # Only update pools for data-bearing servers. + for sd in self.data_bearing_servers(): + server = self._servers[sd.address] + servers.append((server, server.pool.gen.get_overall())) + + for server, generation in servers: + try: + server.pool.remove_stale_sockets(generation) + except PyMongoError as exc: + ctx = _ErrorContext(exc, 0, generation, False, None) + self.handle_error(server.description.address, ctx) + raise + + def close(self) -> None: + """Clear pools and terminate monitors. Topology does not reopen on + demand. Any further operations will raise + :exc:`~.errors.InvalidOperation`. + """ + with self._lock: + old_td = self._description + for server in self._servers.values(): + server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + + # Mark all servers Unknown. + self._description = self._description.reset() + for address, sd in self._description.server_descriptions().items(): + if address in self._servers: + self._servers[address].description = sd + + # Stop SRV polling thread. + if self._srv_monitor: + self._srv_monitor.close() + if not _IS_SYNC: + self._monitor_tasks.append(self._srv_monitor) + + self._opened = False + self._closed = True + + # Publish only after releasing the lock. + if self._publish_tp: + assert self._events is not None + self._description = TopologyDescription( + TOPOLOGY_TYPE.Unknown, + {}, + self._description.replica_set_name, + self._description.max_set_version, + self._description.max_election_id, + self._description._topology_settings, + ) + self._events.put( + ( + self._listeners.publish_topology_description_changed, + ( + old_td, + self._description, + self._topology_id, + ), + ) + ) + self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) + if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): + _debug_log( + _SDAM_LOGGER, + message=_SDAMStatusMessage.TOPOLOGY_CHANGE, + topologyId=self._topology_id, + previousDescription=repr(old_td), + newDescription=repr(self._description), + ) + _debug_log( + _SDAM_LOGGER, message=_SDAMStatusMessage.STOP_TOPOLOGY, topologyId=self._topology_id + ) + + if self._publish_server or self._publish_tp: + # Make sure the events executor thread is fully closed before publishing the remaining events + self.__events_executor.close() + self.__events_executor.join(1) + process_events_queue(weakref.ref(self._events)) # type: ignore[arg-type] + + @property + def description(self) -> TopologyDescription: + return self._description + + def pop_all_sessions(self) -> list[_ServerSession]: + """Pop all session ids from the pool.""" + return self._session_pool.pop_all() + + def get_server_session(self, session_timeout_minutes: Optional[int]) -> _ServerSession: + """Start or resume a server session, or raise ConfigurationError.""" + return self._session_pool.get_server_session(session_timeout_minutes) + + def return_server_session(self, server_session: _ServerSession) -> None: + self._session_pool.return_server_session(server_session) + + def _new_selection(self) -> Selection: + """A Selection object, initially including all known servers. + + Hold the lock when calling this. + """ + return Selection.from_topology_description(self._description) + + def _ensure_opened(self) -> None: + """Start monitors, or restart after a fork. + + Hold the lock when calling this. + """ + if self._closed: + raise InvalidOperation("Cannot use MongoClient after close") + + if not self._opened: + self._opened = True + self._update_servers() + + # Start or restart the events publishing thread. + if self._publish_tp or self._publish_server: + self.__events_executor.open() + + # Start the SRV polling thread. + if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): + self._srv_monitor.open() + + if self._settings.load_balanced: + # Emit initial SDAM events for load balancer mode. + self._process_change( + ServerDescription( + self._seed_addresses[0], + Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), + ) + ) + + # Ensure that the monitors are open. + for server in self._servers.values(): + server.open() + + def _is_stale_error(self, address: _Address, err_ctx: _ErrorContext) -> bool: + server = self._servers.get(address) + if server is None: + # Another thread removed this server from the topology. + return True + + if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): + # This is an outdated error from a previous pool version. + return True + + # topologyVersion check, ignore error when cur_tv >= error_tv: + cur_tv = server.description.topology_version + error = err_ctx.error + error_tv = None + if error and hasattr(error, "details"): + if isinstance(error.details, dict): + error_tv = error.details.get("topologyVersion") + + return _is_stale_error_topology_version(cur_tv, error_tv) + + def _handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + if self._is_stale_error(address, err_ctx): + return + + server = self._servers[address] + error = err_ctx.error + service_id = err_ctx.service_id + + # Ignore a handshake error if the server is behind a load balancer but + # the service ID is unknown. This indicates that the error happened + # when dialing the connection or during the MongoDB handshake, so we + # don't know the service ID to use for clearing the pool. + if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: + return + + if isinstance(error, NetworkTimeout) and err_ctx.completed_handshake: + # The socket has been closed. Don't reset the server. + # Server Discovery And Monitoring Spec: "When an application + # operation fails because of any network error besides a socket + # timeout...." + return + elif isinstance(error, WriteError): + # Ignore writeErrors. + return + elif isinstance(error, (NotPrimaryError, OperationFailure)): + # As per the SDAM spec if: + # - the server sees a "not primary" error, and + # - the server is not shutting down, and + # - the server version is >= 4.2, then + # we keep the existing connection pool, but mark the server type + # as Unknown and request an immediate check of the server. + # Otherwise, we clear the connection pool, mark the server as + # Unknown and request an immediate check of the server. + if hasattr(error, "code"): + err_code = error.code + else: + # Default error code if one does not exist. + default = 10107 if isinstance(error, NotPrimaryError) else None + err_code = error.details.get("code", default) # type: ignore[union-attr] + if err_code in helpers_shared._NOT_PRIMARY_CODES: + is_shutting_down = err_code in helpers_shared._SHUTDOWN_CODES + # Mark server Unknown, clear the pool, and request check. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + if is_shutting_down or (err_ctx.max_wire_version <= 7): + # Clear the pool. + server.reset(service_id) + server.request_check() + elif not err_ctx.completed_handshake: + # Unknown command error during the connection handshake. + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + elif isinstance(error, ConnectionFailure): + if isinstance(error, WaitQueueTimeoutError): + return + # "Client MUST replace the server's description with type Unknown + # ... MUST NOT request an immediate check of the server." + if not self._settings.load_balanced: + self._process_change(ServerDescription(address, error=error)) + # Clear the pool. + server.reset(service_id) + # "When a client marks a server Unknown from `Network error when + # reading or writing`_, clients MUST cancel the hello check on + # that server and close the current monitoring connection." + server._monitor.cancel_check() + + def handle_error(self, address: _Address, err_ctx: _ErrorContext) -> None: + """Handle an application error. + + May reset the server to Unknown, clear the pool, and request an + immediate check depending on the error and the context. + """ + with self._lock: + self._handle_error(address, err_ctx) + + def _request_check_all(self) -> None: + """Wake all monitors. Hold the lock when calling this.""" + for server in self._servers.values(): + server.request_check() + + def _update_servers(self) -> None: + """Sync our Servers from TopologyDescription.server_descriptions. + + Hold the lock while calling this. + """ + for address, sd in self._description.server_descriptions().items(): + if address not in self._servers: + monitor = self._settings.monitor_class( + server_description=sd, + topology=self, + pool=self._create_pool_for_monitor(address), + topology_settings=self._settings, + ) + + weak = None + if self._publish_server and self._events is not None: + weak = weakref.ref(self._events) + server = Server( + server_description=sd, + pool=self._create_pool_for_server(address), + monitor=monitor, + topology_id=self._topology_id, + listeners=self._listeners, + events=weak, + ) + + self._servers[address] = server + server.open() + else: + # Cache old is_writable value. + was_writable = self._servers[address].description.is_writable + # Update server description. + self._servers[address].description = sd + # Update is_writable value of the pool, if it changed. + if was_writable != sd.is_writable: + self._servers[address].pool.update_is_writable(sd.is_writable) + + for address, server in list(self._servers.items()): + if not self._description.has_server(address): + server.close() + if not _IS_SYNC: + self._monitor_tasks.append(server._monitor) + self._servers.pop(address) + + def _create_pool_for_server(self, address: _Address) -> Pool: + return self._settings.pool_class( + address, self._settings.pool_options, client_id=self._topology_id + ) + + def _create_pool_for_monitor(self, address: _Address) -> Pool: + options = self._settings.pool_options + + # According to the Server Discovery And Monitoring Spec, monitors use + # connect_timeout for both connect_timeout and socket_timeout. The + # pool only has one socket so maxPoolSize and so on aren't needed. + monitor_pool_options = PoolOptions( + connect_timeout=options.connect_timeout, + socket_timeout=options.connect_timeout, + ssl_context=options._ssl_context, + tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, + event_listeners=options._event_listeners, + appname=options.appname, + driver=options.driver, + pause_enabled=False, + server_api=options.server_api, + ) + + return self._settings.pool_class( + address, monitor_pool_options, is_sdam=True, client_id=self._topology_id + ) + + def _error_message(self, selector: Callable[[Selection], Selection]) -> str: + """Format an error message if server selection fails. + + Hold the lock when calling this. + """ + is_replica_set = self._description.topology_type in ( + TOPOLOGY_TYPE.ReplicaSetWithPrimary, + TOPOLOGY_TYPE.ReplicaSetNoPrimary, + ) + + if is_replica_set: + server_plural = "replica set members" + elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: + server_plural = "mongoses" + else: + server_plural = "servers" + + if self._description.known_servers: + # We've connected, but no servers match the selector. + if selector is writable_server_selector: + if is_replica_set: + return "No primary available for writes" + else: + return "No %s available for writes" % server_plural + else: + return f'No {server_plural} match selector "{selector}"' + else: + addresses = list(self._description.server_descriptions()) + servers = list(self._description.server_descriptions().values()) + if not servers: + if is_replica_set: + # We removed all servers because of the wrong setName? + return 'No {} available for replica set name "{}"'.format( + server_plural, + self._settings.replica_set_name, + ) + else: + return "No %s available" % server_plural + + # 1 or more servers, all Unknown. Are they unknown for one reason? + error = servers[0].error + same = all(server.error == error for server in servers[1:]) + if same: + if error is None: + # We're still discovering. + return "No %s found yet" % server_plural + + if is_replica_set and not set(addresses).intersection(self._seed_addresses): + # We replaced our seeds with new hosts but can't reach any. + return ( + "Could not reach any servers in %s. Replica set is" + " configured with internal hostnames or IPs?" % addresses + ) + + return str(error) + else: + return ",".join(str(server.error) for server in servers if server.error) + + def cleanup_monitors(self) -> None: + tasks = [] + try: + while self._monitor_tasks: + tasks.append(self._monitor_tasks.pop()) + except IndexError: + pass + asyncio.gather(*[t.join() for t in tasks], return_exceptions=True) # type: ignore[func-returns-value] + + def __repr__(self) -> str: + msg = "" + if not self._opened: + msg = "CLOSED " + return f"<{self.__class__.__name__} {msg}{self._description!r}>" + + def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: + """The properties to use for MongoClient/Topology equality checks.""" + ts = self._settings + return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) + + def __eq__(self, other: object) -> bool: + if isinstance(other, self.__class__): + return self.eq_props() == other.eq_props() + return NotImplemented + + def __hash__(self) -> int: + return hash(self.eq_props()) + + +class _ErrorContext: + """An error with context for SDAM error handling.""" + + def __init__( + self, + error: BaseException, + max_wire_version: int, + sock_generation: int, + completed_handshake: bool, + service_id: Optional[ObjectId], + ): + self.error = error + self.max_wire_version = max_wire_version + self.sock_generation = sock_generation + self.completed_handshake = completed_handshake + self.service_id = service_id + + +def _is_stale_error_topology_version( + current_tv: Optional[Mapping[str, Any]], error_tv: Optional[Mapping[str, Any]] +) -> bool: + """Return True if the error's topologyVersion is <= current.""" + if current_tv is None or error_tv is None: + return False + if current_tv["processId"] != error_tv["processId"]: + return False + return current_tv["counter"] >= error_tv["counter"] + + +def _is_stale_server_description(current_sd: ServerDescription, new_sd: ServerDescription) -> bool: + """Return True if the new topologyVersion is < current.""" + current_tv, new_tv = current_sd.topology_version, new_sd.topology_version + if current_tv is None or new_tv is None: + return False + if current_tv["processId"] != new_tv["processId"]: + return False + return current_tv["counter"] > new_tv["counter"] + + +def _filter_servers( + candidates: list[Server], deprioritized_servers: Optional[list[Server]] = None +) -> list[Server]: + """Filter out deprioritized servers from a list of server candidates.""" + if not deprioritized_servers: + return candidates + + filtered = [server for server in candidates if server not in deprioritized_servers] + + # If not possible to pick a prioritized server, return the original list + return filtered or candidates diff --git a/pymongo/synchronous/uri_parser.py b/pymongo/synchronous/uri_parser.py new file mode 100644 index 0000000000..45c1752953 --- /dev/null +++ b/pymongo/synchronous/uri_parser.py @@ -0,0 +1,193 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI.""" +from __future__ import annotations + +from typing import Any, Optional +from urllib.parse import unquote_plus + +from pymongo.common import SRV_SERVICE_NAME, _CaseInsensitiveDictionary +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.synchronous.srv_resolver import _SrvResolver +from pymongo.uri_parser_shared import ( + _ALLOWED_TXT_OPTS, + DEFAULT_PORT, + SCHEME, + SCHEME_LEN, + SRV_SCHEME_LEN, + _check_options, + _make_options_case_sensitive, + _validate_uri, + split_hosts, + split_options, +) + +_IS_SYNC = True + + +def parse_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + """Parse and validate a MongoDB URI. + + Returns a dict of the form:: + + { + 'nodelist': , + 'username': or None, + 'password': or None, + 'database': or None, + 'collection': or None, + 'options': , + 'fqdn': or None + } + + If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done + to build nodelist and options. + + :param uri: The MongoDB URI to parse. + :param default_port: The port number to use when one wasn't specified + for a host in the URI. + :param validate: If ``True`` (the default), validate and + normalize all options. Default: ``True``. + :param warn: When validating, if ``True`` then will warn + the user then ignore any invalid options or values. If ``False``, + validation will error when options are unsupported or values are + invalid. Default: ``False``. + :param normalize: If ``True``, convert names of URI options + to their internally-used names. Default: ``True``. + :param connect_timeout: The maximum time in milliseconds to + wait for a response from the DNS server. + :param srv_service_name: A custom SRV service name + + .. versionchanged:: 4.14 + ``options`` is now type ``dict`` as opposed to a ``_CaseInsensitiveDictionary``. + + .. versionchanged:: 4.6 + The delimiting slash (``/``) between hosts and connection options is now optional. + For example, "mongodb://example.com?tls=true" is now a valid URI. + + .. versionchanged:: 4.0 + To better follow RFC 3986, unquoted percent signs ("%") are no longer + supported. + + .. versionchanged:: 3.9 + Added the ``normalize`` parameter. + + .. versionchanged:: 3.6 + Added support for mongodb+srv:// URIs. + + .. versionchanged:: 3.5 + Return the original value of the ``readPreference`` MongoDB URI option + instead of the validated read preference mode. + + .. versionchanged:: 3.1 + ``warn`` added so invalid options can be ignored. + """ + result = _validate_uri(uri, default_port, validate, warn, normalize, srv_max_hosts) + result.update( + _parse_srv( + uri, + default_port, + validate, + warn, + normalize, + connect_timeout, + srv_service_name, + srv_max_hosts, + ) + ) + result["options"] = _make_options_case_sensitive(result["options"]) + return result + + +def _parse_srv( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + connect_timeout: Optional[float] = None, + srv_service_name: Optional[str] = None, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + else: + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, _ = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if srv_service_name is None: + srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) + if "@" in host_part: + _, _, hosts = host_part.rpartition("@") + else: + hosts = host_part + + hosts = unquote_plus(hosts) + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + nodes = split_hosts(hosts, default_port=None) + fqdn, port = nodes[0] + + # Use the connection timeout. connectTimeoutMS passed as a keyword + # argument overrides the same option passed in the connection string. + connect_timeout = connect_timeout or options.get("connectTimeoutMS") + dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) + nodes = dns_resolver.get_hosts() + dns_options = dns_resolver.get_options() + if dns_options: + parsed_dns_options = split_options(dns_options, validate, warn, normalize) + if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: + raise ConfigurationError( + "Only authSource, replicaSet, and loadBalanced are supported from DNS" + ) + for opt, val in parsed_dns_options.items(): + if opt not in options: + options[opt] = val + if options.get("loadBalanced") and srv_max_hosts: + raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") + if options.get("replicaSet") and srv_max_hosts: + raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") + if "tls" not in options and "ssl" not in options: + options["tls"] = True if validate else "true" + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "options": options, + } diff --git a/pymongo/topology.py b/pymongo/topology.py deleted file mode 100644 index 6781a9e549..0000000000 --- a/pymongo/topology.py +++ /dev/null @@ -1,890 +0,0 @@ -# Copyright 2014-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you -# may not use this file except in compliance with the License. You -# may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Internal class to monitor a topology of one or more servers.""" - -import os -import queue -import random -import threading -import time -import warnings -import weakref -from typing import Any - -from pymongo import _csot, common, helpers, periodic_executor -from pymongo.client_session import _ServerSessionPool -from pymongo.errors import ( - ConfigurationError, - ConnectionFailure, - InvalidOperation, - NetworkTimeout, - NotPrimaryError, - OperationFailure, - PyMongoError, - ServerSelectionTimeoutError, - WriteError, -) -from pymongo.hello import Hello -from pymongo.monitor import SrvMonitor -from pymongo.pool import PoolOptions -from pymongo.server import Server -from pymongo.server_description import ServerDescription -from pymongo.server_selectors import ( - Selection, - any_server_selector, - arbiter_server_selector, - readable_server_selector, - secondary_server_selector, - writable_server_selector, -) -from pymongo.topology_description import ( - SRV_POLLING_TOPOLOGIES, - TOPOLOGY_TYPE, - TopologyDescription, - _updated_topology_description_srv_polling, - updated_topology_description, -) - - -def process_events_queue(queue_ref): - q = queue_ref() - if not q: - return False # Cancel PeriodicExecutor. - - while True: - try: - event = q.get_nowait() - except queue.Empty: - break - else: - fn, args = event - fn(*args) - - return True # Continue PeriodicExecutor. - - -class Topology(object): - """Monitor a topology of one or more servers.""" - - def __init__(self, topology_settings): - self._topology_id = topology_settings._topology_id - self._listeners = topology_settings._pool_options._event_listeners - pub = self._listeners is not None - self._publish_server = pub and self._listeners.enabled_for_server - self._publish_tp = pub and self._listeners.enabled_for_topology - - # Create events queue if there are publishers. - self._events = None - self.__events_executor: Any = None - - if self._publish_server or self._publish_tp: - self._events = queue.Queue(maxsize=100) - - if self._publish_tp: - assert self._events is not None - self._events.put((self._listeners.publish_topology_opened, (self._topology_id,))) - self._settings = topology_settings - topology_description = TopologyDescription( - topology_settings.get_topology_type(), - topology_settings.get_server_descriptions(), - topology_settings.replica_set_name, - None, - None, - topology_settings, - ) - - self._description = topology_description - if self._publish_tp: - assert self._events is not None - initial_td = TopologyDescription( - TOPOLOGY_TYPE.Unknown, {}, None, None, None, self._settings - ) - self._events.put( - ( - self._listeners.publish_topology_description_changed, - (initial_td, self._description, self._topology_id), - ) - ) - - for seed in topology_settings.seeds: - if self._publish_server: - assert self._events is not None - self._events.put((self._listeners.publish_server_opened, (seed, self._topology_id))) - - # Store the seed list to help diagnose errors in _error_message(). - self._seed_addresses = list(topology_description.server_descriptions()) - self._opened = False - self._closed = False - self._lock = threading.Lock() - self._condition = self._settings.condition_class(self._lock) - self._servers = {} - self._pid = None - self._max_cluster_time = None - self._session_pool = _ServerSessionPool() - - if self._publish_server or self._publish_tp: - - def target(): - return process_events_queue(weak) - - executor = periodic_executor.PeriodicExecutor( - interval=common.EVENTS_QUEUE_FREQUENCY, - min_interval=common.MIN_HEARTBEAT_INTERVAL, - target=target, - name="pymongo_events_thread", - ) - - # We strongly reference the executor and it weakly references - # the queue via this closure. When the topology is freed, stop - # the executor soon. - weak = weakref.ref(self._events, executor.close) - self.__events_executor = executor - executor.open() - - self._srv_monitor = None - if self._settings.fqdn is not None and not self._settings.load_balanced: - self._srv_monitor = SrvMonitor(self, self._settings) - - def open(self): - """Start monitoring, or restart after a fork. - - No effect if called multiple times. - - .. warning:: Topology is shared among multiple threads and is protected - by mutual exclusion. Using Topology from a process other than the one - that initialized it will emit a warning and may result in deadlock. To - prevent this from happening, MongoClient must be created after any - forking. - - """ - pid = os.getpid() - if self._pid is None: - self._pid = pid - elif pid != self._pid: - self._pid = pid - warnings.warn( - "MongoClient opened before fork. Create MongoClient only " - "after forking. See PyMongo's documentation for details: " - "https://pymongo.readthedocs.io/en/stable/faq.html#" - "is-pymongo-fork-safe" - ) - with self._lock: - # Close servers and clear the pools. - for server in self._servers.values(): - server.close() - # Reset the session pool to avoid duplicate sessions in - # the child process. - self._session_pool.reset() - - with self._lock: - self._ensure_opened() - - def get_server_selection_timeout(self): - # CSOT: use remaining timeout when set. - timeout = _csot.remaining() - if timeout is None: - return self._settings.server_selection_timeout - return timeout - - def select_servers(self, selector, server_selection_timeout=None, address=None): - """Return a list of Servers matching selector, or time out. - - :Parameters: - - `selector`: function that takes a list of Servers and returns - a subset of them. - - `server_selection_timeout` (optional): maximum seconds to wait. - If not provided, the default value common.SERVER_SELECTION_TIMEOUT - is used. - - `address`: optional server address to select. - - Calls self.open() if needed. - - Raises exc:`ServerSelectionTimeoutError` after - `server_selection_timeout` if no matching servers are found. - """ - if server_selection_timeout is None: - server_timeout = self.get_server_selection_timeout() - else: - server_timeout = server_selection_timeout - - with self._lock: - server_descriptions = self._select_servers_loop(selector, server_timeout, address) - - return [self.get_server_by_address(sd.address) for sd in server_descriptions] - - def _select_servers_loop(self, selector, timeout, address): - """select_servers() guts. Hold the lock when calling this.""" - now = time.monotonic() - end_time = now + timeout - server_descriptions = self._description.apply_selector( - selector, address, custom_selector=self._settings.server_selector - ) - - while not server_descriptions: - # No suitable servers. - if timeout == 0 or now > end_time: - raise ServerSelectionTimeoutError( - "%s, Timeout: %ss, Topology Description: %r" - % (self._error_message(selector), timeout, self.description) - ) - - self._ensure_opened() - self._request_check_all() - - # Release the lock and wait for the topology description to - # change, or for a timeout. We won't miss any changes that - # came after our most recent apply_selector call, since we've - # held the lock until now. - self._condition.wait(common.MIN_HEARTBEAT_INTERVAL) - self._description.check_compatible() - now = time.monotonic() - server_descriptions = self._description.apply_selector( - selector, address, custom_selector=self._settings.server_selector - ) - - self._description.check_compatible() - return server_descriptions - - def _select_server(self, selector, server_selection_timeout=None, address=None): - servers = self.select_servers(selector, server_selection_timeout, address) - if len(servers) == 1: - return servers[0] - server1, server2 = random.sample(servers, 2) - if server1.pool.operation_count <= server2.pool.operation_count: - return server1 - else: - return server2 - - def select_server(self, selector, server_selection_timeout=None, address=None): - """Like select_servers, but choose a random server if several match.""" - server = self._select_server(selector, server_selection_timeout, address) - if _csot.get_timeout(): - _csot.set_rtt(server.description.round_trip_time) - return server - - def select_server_by_address(self, address, server_selection_timeout=None): - """Return a Server for "address", reconnecting if necessary. - - If the server's type is not known, request an immediate check of all - servers. Time out after "server_selection_timeout" if the server - cannot be reached. - - :Parameters: - - `address`: A (host, port) pair. - - `server_selection_timeout` (optional): maximum seconds to wait. - If not provided, the default value - common.SERVER_SELECTION_TIMEOUT is used. - - Calls self.open() if needed. - - Raises exc:`ServerSelectionTimeoutError` after - `server_selection_timeout` if no matching servers are found. - """ - return self.select_server(any_server_selector, server_selection_timeout, address) - - def _process_change(self, server_description, reset_pool=False): - """Process a new ServerDescription on an opened topology. - - Hold the lock when calling this. - """ - td_old = self._description - sd_old = td_old._server_descriptions[server_description.address] - if _is_stale_server_description(sd_old, server_description): - # This is a stale hello response. Ignore it. - return - - new_td = updated_topology_description(self._description, server_description) - # CMAP: Ensure the pool is "ready" when the server is selectable. - if server_description.is_readable or ( - server_description.is_server_type_known and new_td.topology_type == TOPOLOGY_TYPE.Single - ): - server = self._servers.get(server_description.address) - if server: - server.pool.ready() - - suppress_event = (self._publish_server or self._publish_tp) and sd_old == server_description - if self._publish_server and not suppress_event: - assert self._events is not None - self._events.put( - ( - self._listeners.publish_server_description_changed, - (sd_old, server_description, server_description.address, self._topology_id), - ) - ) - - self._description = new_td - self._update_servers() - self._receive_cluster_time_no_lock(server_description.cluster_time) - - if self._publish_tp and not suppress_event: - assert self._events is not None - self._events.put( - ( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id), - ) - ) - - # Shutdown SRV polling for unsupported cluster types. - # This is only applicable if the old topology was Unknown, and the - # new one is something other than Unknown or Sharded. - if self._srv_monitor and ( - td_old.topology_type == TOPOLOGY_TYPE.Unknown - and self._description.topology_type not in SRV_POLLING_TOPOLOGIES - ): - self._srv_monitor.close() - - # Clear the pool from a failed heartbeat. - if reset_pool: - server = self._servers.get(server_description.address) - if server: - server.pool.reset() - - # Wake waiters in select_servers(). - self._condition.notify_all() - - def on_change(self, server_description, reset_pool=False): - """Process a new ServerDescription after an hello call completes.""" - # We do no I/O holding the lock. - with self._lock: - # Monitors may continue working on hello calls for some time - # after a call to Topology.close, so this method may be called at - # any time. Ensure the topology is open before processing the - # change. - # Any monitored server was definitely in the topology description - # once. Check if it's still in the description or if some state- - # change removed it. E.g., we got a host list from the primary - # that didn't include this server. - if self._opened and self._description.has_server(server_description.address): - self._process_change(server_description, reset_pool) - - def _process_srv_update(self, seedlist): - """Process a new seedlist on an opened topology. - Hold the lock when calling this. - """ - td_old = self._description - if td_old.topology_type not in SRV_POLLING_TOPOLOGIES: - return - self._description = _updated_topology_description_srv_polling(self._description, seedlist) - - self._update_servers() - - if self._publish_tp: - assert self._events is not None - self._events.put( - ( - self._listeners.publish_topology_description_changed, - (td_old, self._description, self._topology_id), - ) - ) - - def on_srv_update(self, seedlist): - """Process a new list of nodes obtained from scanning SRV records.""" - # We do no I/O holding the lock. - with self._lock: - if self._opened: - self._process_srv_update(seedlist) - - def get_server_by_address(self, address): - """Get a Server or None. - - Returns the current version of the server immediately, even if it's - Unknown or absent from the topology. Only use this in unittests. - In driver code, use select_server_by_address, since then you're - assured a recent view of the server's type and wire protocol version. - """ - return self._servers.get(address) - - def has_server(self, address): - return address in self._servers - - def get_primary(self): - """Return primary's address or None.""" - # Implemented here in Topology instead of MongoClient, so it can lock. - with self._lock: - topology_type = self._description.topology_type - if topology_type != TOPOLOGY_TYPE.ReplicaSetWithPrimary: - return None - - return writable_server_selector(self._new_selection())[0].address - - def _get_replica_set_members(self, selector): - """Return set of replica set member addresses.""" - # Implemented here in Topology instead of MongoClient, so it can lock. - with self._lock: - topology_type = self._description.topology_type - if topology_type not in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary, - ): - return set() - - return set([sd.address for sd in selector(self._new_selection())]) - - def get_secondaries(self): - """Return set of secondary addresses.""" - return self._get_replica_set_members(secondary_server_selector) - - def get_arbiters(self): - """Return set of arbiter addresses.""" - return self._get_replica_set_members(arbiter_server_selector) - - def max_cluster_time(self): - """Return a document, the highest seen $clusterTime.""" - return self._max_cluster_time - - def _receive_cluster_time_no_lock(self, cluster_time): - # Driver Sessions Spec: "Whenever a driver receives a cluster time from - # a server it MUST compare it to the current highest seen cluster time - # for the deployment. If the new cluster time is higher than the - # highest seen cluster time it MUST become the new highest seen cluster - # time. Two cluster times are compared using only the BsonTimestamp - # value of the clusterTime embedded field." - if cluster_time: - # ">" uses bson.timestamp.Timestamp's comparison operator. - if ( - not self._max_cluster_time - or cluster_time["clusterTime"] > self._max_cluster_time["clusterTime"] - ): - self._max_cluster_time = cluster_time - - def receive_cluster_time(self, cluster_time): - with self._lock: - self._receive_cluster_time_no_lock(cluster_time) - - def request_check_all(self, wait_time=5): - """Wake all monitors, wait for at least one to check its server.""" - with self._lock: - self._request_check_all() - self._condition.wait(wait_time) - - def data_bearing_servers(self): - """Return a list of all data-bearing servers. - - This includes any server that might be selected for an operation. - """ - if self._description.topology_type == TOPOLOGY_TYPE.Single: - return self._description.known_servers - return self._description.readable_servers - - def update_pool(self): - # Remove any stale sockets and add new sockets if pool is too small. - servers = [] - with self._lock: - # Only update pools for data-bearing servers. - for sd in self.data_bearing_servers(): - server = self._servers[sd.address] - servers.append((server, server.pool.gen.get_overall())) - - for server, generation in servers: - try: - server.pool.remove_stale_sockets(generation) - except PyMongoError as exc: - ctx = _ErrorContext(exc, 0, generation, False, None) - self.handle_error(server.description.address, ctx) - raise - - def close(self): - """Clear pools and terminate monitors. Topology does not reopen on - demand. Any further operations will raise - :exc:`~.errors.InvalidOperation`.""" - with self._lock: - for server in self._servers.values(): - server.close() - - # Mark all servers Unknown. - self._description = self._description.reset() - for address, sd in self._description.server_descriptions().items(): - if address in self._servers: - self._servers[address].description = sd - - # Stop SRV polling thread. - if self._srv_monitor: - self._srv_monitor.close() - - self._opened = False - self._closed = True - - # Publish only after releasing the lock. - if self._publish_tp: - assert self._events is not None - self._events.put((self._listeners.publish_topology_closed, (self._topology_id,))) - if self._publish_server or self._publish_tp: - self.__events_executor.close() - - @property - def description(self): - return self._description - - def pop_all_sessions(self): - """Pop all session ids from the pool.""" - with self._lock: - return self._session_pool.pop_all() - - def _check_implicit_session_support(self): - with self._lock: - self._check_session_support() - - def _check_session_support(self): - """Internal check for session support on clusters.""" - if self._settings.load_balanced: - # Sessions never time out in load balanced mode. - return float("inf") - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - # Maybe we need an initial scan? Can raise ServerSelectionError. - if self._description.topology_type == TOPOLOGY_TYPE.Single: - if not self._description.has_known_servers: - self._select_servers_loop( - any_server_selector, self.get_server_selection_timeout(), None - ) - elif not self._description.readable_servers: - self._select_servers_loop( - readable_server_selector, self.get_server_selection_timeout(), None - ) - - session_timeout = self._description.logical_session_timeout_minutes - if session_timeout is None: - raise ConfigurationError("Sessions are not supported by this MongoDB deployment") - return session_timeout - - def get_server_session(self): - """Start or resume a server session, or raise ConfigurationError.""" - with self._lock: - session_timeout = self._check_session_support() - return self._session_pool.get_server_session(session_timeout) - - def return_server_session(self, server_session, lock): - if lock: - with self._lock: - self._session_pool.return_server_session( - server_session, self._description.logical_session_timeout_minutes - ) - else: - # Called from a __del__ method, can't use a lock. - self._session_pool.return_server_session_no_lock(server_session) - - def _new_selection(self): - """A Selection object, initially including all known servers. - - Hold the lock when calling this. - """ - return Selection.from_topology_description(self._description) - - def _ensure_opened(self): - """Start monitors, or restart after a fork. - - Hold the lock when calling this. - """ - if self._closed: - raise InvalidOperation("Cannot use MongoClient after close") - - if not self._opened: - self._opened = True - self._update_servers() - - # Start or restart the events publishing thread. - if self._publish_tp or self._publish_server: - self.__events_executor.open() - - # Start the SRV polling thread. - if self._srv_monitor and (self.description.topology_type in SRV_POLLING_TOPOLOGIES): - self._srv_monitor.open() - - if self._settings.load_balanced: - # Emit initial SDAM events for load balancer mode. - self._process_change( - ServerDescription( - self._seed_addresses[0], - Hello({"ok": 1, "serviceId": self._topology_id, "maxWireVersion": 13}), - ) - ) - - # Ensure that the monitors are open. - for server in self._servers.values(): - server.open() - - def _is_stale_error(self, address, err_ctx): - server = self._servers.get(address) - if server is None: - # Another thread removed this server from the topology. - return True - - if server._pool.stale_generation(err_ctx.sock_generation, err_ctx.service_id): - # This is an outdated error from a previous pool version. - return True - - # topologyVersion check, ignore error when cur_tv >= error_tv: - cur_tv = server.description.topology_version - error = err_ctx.error - error_tv = None - if error and hasattr(error, "details"): - if isinstance(error.details, dict): - error_tv = error.details.get("topologyVersion") - - return _is_stale_error_topology_version(cur_tv, error_tv) - - def _handle_error(self, address, err_ctx): - if self._is_stale_error(address, err_ctx): - return - - server = self._servers[address] - error = err_ctx.error - exc_type = type(error) - service_id = err_ctx.service_id - - # Ignore a handshake error if the server is behind a load balancer but - # the service ID is unknown. This indicates that the error happened - # when dialing the connection or during the MongoDB handshake, so we - # don't know the service ID to use for clearing the pool. - if self._settings.load_balanced and not service_id and not err_ctx.completed_handshake: - return - - if issubclass(exc_type, NetworkTimeout) and err_ctx.completed_handshake: - # The socket has been closed. Don't reset the server. - # Server Discovery And Monitoring Spec: "When an application - # operation fails because of any network error besides a socket - # timeout...." - return - elif issubclass(exc_type, WriteError): - # Ignore writeErrors. - return - elif issubclass(exc_type, (NotPrimaryError, OperationFailure)): - # As per the SDAM spec if: - # - the server sees a "not primary" error, and - # - the server is not shutting down, and - # - the server version is >= 4.2, then - # we keep the existing connection pool, but mark the server type - # as Unknown and request an immediate check of the server. - # Otherwise, we clear the connection pool, mark the server as - # Unknown and request an immediate check of the server. - if hasattr(error, "code"): - err_code = error.code - else: - # Default error code if one does not exist. - default = 10107 if isinstance(error, NotPrimaryError) else None - err_code = error.details.get("code", default) - if err_code in helpers._NOT_PRIMARY_CODES: - is_shutting_down = err_code in helpers._SHUTDOWN_CODES - # Mark server Unknown, clear the pool, and request check. - if not self._settings.load_balanced: - self._process_change(ServerDescription(address, error=error)) - if is_shutting_down or (err_ctx.max_wire_version <= 7): - # Clear the pool. - server.reset(service_id) - server.request_check() - elif not err_ctx.completed_handshake: - # Unknown command error during the connection handshake. - if not self._settings.load_balanced: - self._process_change(ServerDescription(address, error=error)) - # Clear the pool. - server.reset(service_id) - elif issubclass(exc_type, ConnectionFailure): - # "Client MUST replace the server's description with type Unknown - # ... MUST NOT request an immediate check of the server." - if not self._settings.load_balanced: - self._process_change(ServerDescription(address, error=error)) - # Clear the pool. - server.reset(service_id) - # "When a client marks a server Unknown from `Network error when - # reading or writing`_, clients MUST cancel the hello check on - # that server and close the current monitoring connection." - server._monitor.cancel_check() - - def handle_error(self, address, err_ctx): - """Handle an application error. - - May reset the server to Unknown, clear the pool, and request an - immediate check depending on the error and the context. - """ - with self._lock: - self._handle_error(address, err_ctx) - - def _request_check_all(self): - """Wake all monitors. Hold the lock when calling this.""" - for server in self._servers.values(): - server.request_check() - - def _update_servers(self): - """Sync our Servers from TopologyDescription.server_descriptions. - - Hold the lock while calling this. - """ - for address, sd in self._description.server_descriptions().items(): - if address not in self._servers: - monitor = self._settings.monitor_class( - server_description=sd, - topology=self, - pool=self._create_pool_for_monitor(address), - topology_settings=self._settings, - ) - - weak = None - if self._publish_server: - weak = weakref.ref(self._events) - server = Server( - server_description=sd, - pool=self._create_pool_for_server(address), - monitor=monitor, - topology_id=self._topology_id, - listeners=self._listeners, - events=weak, - ) - - self._servers[address] = server - server.open() - else: - # Cache old is_writable value. - was_writable = self._servers[address].description.is_writable - # Update server description. - self._servers[address].description = sd - # Update is_writable value of the pool, if it changed. - if was_writable != sd.is_writable: - self._servers[address].pool.update_is_writable(sd.is_writable) - - for address, server in list(self._servers.items()): - if not self._description.has_server(address): - server.close() - self._servers.pop(address) - - def _create_pool_for_server(self, address): - return self._settings.pool_class(address, self._settings.pool_options) - - def _create_pool_for_monitor(self, address): - options = self._settings.pool_options - - # According to the Server Discovery And Monitoring Spec, monitors use - # connect_timeout for both connect_timeout and socket_timeout. The - # pool only has one socket so maxPoolSize and so on aren't needed. - monitor_pool_options = PoolOptions( - connect_timeout=options.connect_timeout, - socket_timeout=options.connect_timeout, - ssl_context=options._ssl_context, - tls_allow_invalid_hostnames=options.tls_allow_invalid_hostnames, - event_listeners=options._event_listeners, - appname=options.appname, - driver=options.driver, - pause_enabled=False, - server_api=options.server_api, - ) - - return self._settings.pool_class(address, monitor_pool_options, handshake=False) - - def _error_message(self, selector): - """Format an error message if server selection fails. - - Hold the lock when calling this. - """ - is_replica_set = self._description.topology_type in ( - TOPOLOGY_TYPE.ReplicaSetWithPrimary, - TOPOLOGY_TYPE.ReplicaSetNoPrimary, - ) - - if is_replica_set: - server_plural = "replica set members" - elif self._description.topology_type == TOPOLOGY_TYPE.Sharded: - server_plural = "mongoses" - else: - server_plural = "servers" - - if self._description.known_servers: - # We've connected, but no servers match the selector. - if selector is writable_server_selector: - if is_replica_set: - return "No primary available for writes" - else: - return "No %s available for writes" % server_plural - else: - return 'No %s match selector "%s"' % (server_plural, selector) - else: - addresses = list(self._description.server_descriptions()) - servers = list(self._description.server_descriptions().values()) - if not servers: - if is_replica_set: - # We removed all servers because of the wrong setName? - return 'No %s available for replica set name "%s"' % ( - server_plural, - self._settings.replica_set_name, - ) - else: - return "No %s available" % server_plural - - # 1 or more servers, all Unknown. Are they unknown for one reason? - error = servers[0].error - same = all(server.error == error for server in servers[1:]) - if same: - if error is None: - # We're still discovering. - return "No %s found yet" % server_plural - - if is_replica_set and not set(addresses).intersection(self._seed_addresses): - # We replaced our seeds with new hosts but can't reach any. - return ( - "Could not reach any servers in %s. Replica set is" - " configured with internal hostnames or IPs?" % addresses - ) - - return str(error) - else: - return ",".join(str(server.error) for server in servers if server.error) - - def __repr__(self): - msg = "" - if not self._opened: - msg = "CLOSED " - return "<%s %s%r>" % (self.__class__.__name__, msg, self._description) - - def eq_props(self): - """The properties to use for MongoClient/Topology equality checks.""" - ts = self._settings - return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name) - - def __eq__(self, other): - if isinstance(other, self.__class__): - return self.eq_props() == other.eq_props() - return NotImplemented - - def __hash__(self): - return hash(self.eq_props()) - - -class _ErrorContext(object): - """An error with context for SDAM error handling.""" - - def __init__(self, error, max_wire_version, sock_generation, completed_handshake, service_id): - self.error = error - self.max_wire_version = max_wire_version - self.sock_generation = sock_generation - self.completed_handshake = completed_handshake - self.service_id = service_id - - -def _is_stale_error_topology_version(current_tv, error_tv): - """Return True if the error's topologyVersion is <= current.""" - if current_tv is None or error_tv is None: - return False - if current_tv["processId"] != error_tv["processId"]: - return False - return current_tv["counter"] >= error_tv["counter"] - - -def _is_stale_server_description(current_sd, new_sd): - """Return True if the new topologyVersion is < current.""" - current_tv, new_tv = current_sd.topology_version, new_sd.topology_version - if current_tv is None or new_tv is None: - return False - if current_tv["processId"] != new_tv["processId"]: - return False - return current_tv["counter"] > new_tv["counter"] diff --git a/pymongo/topology_description.py b/pymongo/topology_description.py index b32a86e2d7..de67a8f94a 100644 --- a/pymongo/topology_description.py +++ b/pymongo/topology_description.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,15 +12,29 @@ # implied. See the License for the specific language governing # permissions and limitations under the License. -"""Represent a deployment of MongoDB servers.""" +"""Represent a deployment of MongoDB servers. -from random import sample -from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations +from random import sample +from typing import ( + Any, + Callable, + List, + Mapping, + MutableMapping, + NamedTuple, + Optional, + cast, +) + +from bson.min_key import MinKey from bson.objectid import ObjectId from pymongo import common -from pymongo.errors import ConfigurationError -from pymongo.read_preferences import ReadPreference, _AggWritePref, _ServerMode +from pymongo.errors import ConfigurationError, PyMongoError +from pymongo.read_preferences import Primary, ReadPreference, _AggWritePref, _ServerMode from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection from pymongo.server_type import SERVER_TYPE @@ -40,17 +54,17 @@ class _TopologyType(NamedTuple): TOPOLOGY_TYPE = _TopologyType(*range(6)) # Topologies compatible with SRV record polling. -SRV_POLLING_TOPOLOGIES: Tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) +SRV_POLLING_TOPOLOGIES: tuple[int, int] = (TOPOLOGY_TYPE.Unknown, TOPOLOGY_TYPE.Sharded) _ServerSelector = Callable[[List[ServerDescription]], List[ServerDescription]] -class TopologyDescription(object): +class TopologyDescription: def __init__( self, topology_type: int, - server_descriptions: Dict[_Address, ServerDescription], + server_descriptions: dict[_Address, ServerDescription], replica_set_name: Optional[str], max_set_version: Optional[int], max_election_id: Optional[ObjectId], @@ -58,14 +72,13 @@ def __init__( ) -> None: """Representation of a deployment of MongoDB servers. - :Parameters: - - `topology_type`: initial type - - `server_descriptions`: dict of (address, ServerDescription) for + :param topology_type: initial type + :param server_descriptions: dict of (address, ServerDescription) for all seeds - - `replica_set_name`: replica set name or None - - `max_set_version`: greatest setVersion seen from a primary, or None - - `max_election_id`: greatest electionId seen from a primary, or None - - `topology_settings`: a TopologySettings + :param replica_set_name: replica set name or None + :param max_set_version: greatest setVersion seen from a primary, or None + :param max_election_id: greatest electionId seen from a primary, or None + :param topology_settings: a TopologySettings """ self._topology_type = topology_type self._replica_set_name = replica_set_name @@ -98,7 +111,7 @@ def __init__( s.logical_session_timeout_minutes for s in readable_servers ) - def _init_incompatible_err(self): + def _init_incompatible_err(self) -> None: """Internal compatibility check for non-load balanced topologies.""" for s in self._server_descriptions.values(): if not s.is_server_type_known: @@ -157,12 +170,12 @@ def check_compatible(self) -> None: def has_server(self, address: _Address) -> bool: return address in self._server_descriptions - def reset_server(self, address: _Address) -> "TopologyDescription": + def reset_server(self, address: _Address) -> TopologyDescription: """A copy of this description, with one server marked Unknown.""" unknown_sd = self._server_descriptions[address].to_unknown() return updated_topology_description(self, unknown_sd) - def reset(self) -> "TopologyDescription": + def reset(self) -> TopologyDescription: """A copy of this description, with all servers marked Unknown.""" if self._topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary: topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary @@ -170,7 +183,7 @@ def reset(self) -> "TopologyDescription": topology_type = self._topology_type # The default ServerDescription's type is Unknown. - sds = dict((address, ServerDescription(address)) for address in self._server_descriptions) + sds = {address: ServerDescription(address) for address in self._server_descriptions} return TopologyDescription( topology_type, @@ -181,9 +194,10 @@ def reset(self) -> "TopologyDescription": self._topology_settings, ) - def server_descriptions(self) -> Dict[_Address, ServerDescription]: - """Dict of (address, - :class:`~pymongo.server_description.ServerDescription`).""" + def server_descriptions(self) -> dict[_Address, ServerDescription]: + """dict of (address, + :class:`~pymongo.server_description.ServerDescription`). + """ return self._server_descriptions.copy() @property @@ -220,7 +234,7 @@ def logical_session_timeout_minutes(self) -> Optional[int]: return self._ls_timeout_minutes @property - def known_servers(self) -> List[ServerDescription]: + def known_servers(self) -> list[ServerDescription]: """List of Servers of types besides Unknown.""" return [s for s in self._server_descriptions.values() if s.is_server_type_known] @@ -230,7 +244,7 @@ def has_known_servers(self) -> bool: return any(s for s in self._server_descriptions.values() if s.is_server_type_known) @property - def readable_servers(self) -> List[ServerDescription]: + def readable_servers(self) -> list[ServerDescription]: """List of readable Servers.""" return [s for s in self._server_descriptions.values() if s.is_readable] @@ -251,14 +265,22 @@ def heartbeat_frequency(self) -> int: def srv_max_hosts(self) -> int: return self._topology_settings._srv_max_hosts - def _apply_local_threshold(self, selection): + def _apply_local_threshold(self, selection: Optional[Selection]) -> list[ServerDescription]: if not selection: return [] + round_trip_times: list[float] = [] + for server in selection.server_descriptions: + if server.round_trip_time is None: + config_err_msg = f"round_trip_time for server {server.address} is unexpectedly None: {self}, servers: {selection.server_descriptions}" + raise ConfigurationError(config_err_msg) + round_trip_times.append(server.round_trip_time) # Round trip time in seconds. - fastest = min(s.round_trip_time for s in selection.server_descriptions) + fastest = min(round_trip_times) threshold = self._topology_settings.local_threshold_ms / 1000.0 return [ - s for s in selection.server_descriptions if (s.round_trip_time - fastest) <= threshold + s + for s in selection.server_descriptions + if (cast(float, s.round_trip_time) - fastest) <= threshold ] def apply_selector( @@ -266,15 +288,14 @@ def apply_selector( selector: Any, address: Optional[_Address] = None, custom_selector: Optional[_ServerSelector] = None, - ) -> List[ServerDescription]: + ) -> list[ServerDescription]: """List of servers matching the provided selector(s). - :Parameters: - - `selector`: a callable that takes a Selection as input and returns + :param selector: a callable that takes a Selection as input and returns a Selection as output. For example, an instance of a read preference from :mod:`~pymongo.read_preferences`. - - `address` (optional): A server address to select. - - `custom_selector` (optional): A callable that augments server + :param address: A server address to select. + :param custom_selector: A callable that augments server selection rules. Accepts a list of :class:`~pymongo.server_description.ServerDescription` objects and return a list of server descriptions that should be considered @@ -303,6 +324,17 @@ def apply_selector( description = self.server_descriptions().get(address) return [description] if description else [] + # Primary selection fast path. + if self.topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary and type(selector) is Primary: + for sd in self._server_descriptions.values(): + if sd.server_type == SERVER_TYPE.RSPrimary: + sds = [sd] + if custom_selector: + sds = custom_selector(sds) + return sds + # No primary found, return an empty list. + return [] + selection = Selection.from_topology_description(self) # Ignore read preference for sharded clusters. if self.topology_type != TOPOLOGY_TYPE.Sharded: @@ -319,8 +351,7 @@ def has_readable_server(self, read_preference: _ServerMode = ReadPreference.PRIM """Does this topology have any readable servers available matching the given read preference? - :Parameters: - - `read_preference`: an instance of a read preference from + :param read_preference: an instance of a read preference from :mod:`~pymongo.read_preferences`. Defaults to :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`. @@ -342,10 +373,10 @@ def has_writable_server(self) -> bool: """ return self.has_readable_server(ReadPreference.PRIMARY) - def __repr__(self): + def __repr__(self) -> str: # Sort the servers by address. servers = sorted(self._server_descriptions.values(), key=lambda sd: sd.address) - return "<%s id: %s, topology_type: %s, servers: %r>" % ( + return "<{} id: {}, topology_type: {}, servers: {!r}>".format( self.__class__.__name__, self._topology_settings._topology_id, self.topology_type_name, @@ -367,12 +398,11 @@ def __repr__(self): def updated_topology_description( topology_description: TopologyDescription, server_description: ServerDescription -) -> "TopologyDescription": +) -> TopologyDescription: """Return an updated copy of a TopologyDescription. - :Parameters: - - `topology_description`: the current TopologyDescription - - `server_description`: a new ServerDescription that resulted from + :param topology_description: the current TopologyDescription + :param server_description: a new ServerDescription that resulted from a hello call Called after attempting (successfully or not) to call hello on the @@ -399,8 +429,9 @@ def updated_topology_description( if set_name is not None and set_name != server_description.replica_set_name: error = ConfigurationError( "client is configured to connect to a replica set named " - "'%s' but this node belongs to a set named '%s'" - % (set_name, server_description.replica_set_name) + "'{}' but this node belongs to a set named '{}'".format( + set_name, server_description.replica_set_name + ) ) sds[address] = server_description.to_unknown(error=error) # Single type never changes. @@ -469,12 +500,13 @@ def updated_topology_description( ) -def _updated_topology_description_srv_polling(topology_description, seedlist): +def _updated_topology_description_srv_polling( + topology_description: TopologyDescription, seedlist: list[tuple[str, Any]] +) -> TopologyDescription: """Return an updated copy of a TopologyDescription. - :Parameters: - - `topology_description`: the current TopologyDescription - - `seedlist`: a list of new seeds new ServerDescription that resulted from + :param topology_description: the current TopologyDescription + :param seedlist: a list of new seeds new ServerDescription that resulted from a hello call """ assert topology_description.topology_type in SRV_POLLING_TOPOLOGIES @@ -494,7 +526,7 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): new_hosts = set(seedlist) - set(sds.keys()) n_to_add = topology_description.srv_max_hosts - len(sds) if n_to_add > 0: - seedlist = sample(new_hosts, min(n_to_add, len(new_hosts))) + seedlist = sample(sorted(new_hosts), min(n_to_add, len(new_hosts))) else: seedlist = [] # Add SDs corresponding to servers recently added to the SRV record. @@ -512,8 +544,12 @@ def _updated_topology_description_srv_polling(topology_description, seedlist): def _update_rs_from_primary( - sds, replica_set_name, server_description, max_set_version, max_election_id -): + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, + max_set_version: Optional[int], + max_election_id: Optional[ObjectId], +) -> tuple[int, Optional[str], Optional[int], Optional[ObjectId]]: """Update topology description from a primary's hello response. Pass in a dict of ServerDescriptions, current replica set name, the @@ -530,26 +566,42 @@ def _update_rs_from_primary( # We found a primary but it doesn't have the replica_set_name # provided by the user. sds.pop(server_description.address) - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + + if server_description.max_wire_version is None or server_description.max_wire_version < 17: + new_election_tuple: tuple = (server_description.set_version, server_description.election_id) # type: ignore[type-arg] + max_election_tuple: tuple = (max_set_version, max_election_id) # type: ignore[type-arg] + if None not in new_election_tuple: + if None not in max_election_tuple and new_election_tuple < max_election_tuple: + # Stale primary, set to type Unknown. + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + max_election_id = server_description.election_id - max_election_tuple = max_set_version, max_election_id - if None not in server_description.election_tuple: - if ( - None not in max_election_tuple - and max_election_tuple > server_description.election_tuple + if server_description.set_version is not None and ( + max_set_version is None or server_description.set_version > max_set_version ): - + max_set_version = server_description.set_version + else: + new_election_tuple = server_description.election_id, server_description.set_version + max_election_tuple = max_election_id, max_set_version + new_election_safe = tuple(MinKey() if i is None else i for i in new_election_tuple) + max_election_safe = tuple(MinKey() if i is None else i for i in max_election_tuple) + if new_election_safe < max_election_safe: # Stale primary, set to type Unknown. - sds[server_description.address] = server_description.to_unknown() - return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) - - max_election_id = server_description.election_id - - if server_description.set_version is not None and ( - max_set_version is None or server_description.set_version > max_set_version - ): - - max_set_version = server_description.set_version + sds[server_description.address] = server_description.to_unknown( + PyMongoError( + f"primary marked stale due to electionId/setVersion mismatch, {new_election_tuple} is stale compared to {max_election_tuple}" + ) + ) + return _check_has_primary(sds), replica_set_name, max_set_version, max_election_id + else: + max_election_id = server_description.election_id + max_set_version = server_description.set_version # We've heard from the primary. Is it the same primary as before? for server in sds.values(): @@ -557,9 +609,10 @@ def _update_rs_from_primary( server.server_type is SERVER_TYPE.RSPrimary and server.address != server_description.address ): - # Reset old primary's type to Unknown. - sds[server.address] = server.to_unknown() + sds[server.address] = server.to_unknown( + PyMongoError("primary marked stale due to discovery of newer primary") + ) # There can be only one prior primary. break @@ -578,7 +631,11 @@ def _update_rs_from_primary( return (_check_has_primary(sds), replica_set_name, max_set_version, max_election_id) -def _update_rs_with_primary_from_member(sds, replica_set_name, server_description): +def _update_rs_with_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> int: """RS with known primary. Process a response from a non-primary. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -597,7 +654,11 @@ def _update_rs_with_primary_from_member(sds, replica_set_name, server_descriptio return _check_has_primary(sds) -def _update_rs_no_primary_from_member(sds, replica_set_name, server_description): +def _update_rs_no_primary_from_member( + sds: MutableMapping[_Address, ServerDescription], + replica_set_name: Optional[str], + server_description: ServerDescription, +) -> tuple[int, Optional[str]]: """RS without known primary. Update from a non-primary's response. Pass in a dict of ServerDescriptions, current replica set name, and the @@ -625,7 +686,7 @@ def _update_rs_no_primary_from_member(sds, replica_set_name, server_description) return topology_type, replica_set_name -def _check_has_primary(sds): +def _check_has_primary(sds: Mapping[_Address, ServerDescription]) -> int: """Current topology type is ReplicaSetWithPrimary. Is primary still known? Pass in a dict of ServerDescriptions. @@ -635,5 +696,5 @@ def _check_has_primary(sds): for s in sds.values(): if s.server_type == SERVER_TYPE.RSPrimary: return TOPOLOGY_TYPE.ReplicaSetWithPrimary - else: + else: # noqa: PLW0120 return TOPOLOGY_TYPE.ReplicaSetNoPrimary diff --git a/pymongo/typings.py b/pymongo/typings.py index 14e059a8f0..e678720db9 100644 --- a/pymongo/typings.py +++ b/pymongo/typings.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +13,12 @@ # limitations under the License. """Type aliases used by PyMongo""" +from __future__ import annotations + from typing import ( TYPE_CHECKING, Any, Mapping, - MutableMapping, Optional, Sequence, Tuple, @@ -25,15 +26,53 @@ Union, ) +from bson.typings import _DocumentOut, _DocumentType, _DocumentTypeArg + if TYPE_CHECKING: - from bson.raw_bson import RawBSONDocument + from pymongo.asynchronous.bulk import _AsyncBulk + from pymongo.asynchronous.client_bulk import _AsyncClientBulk + from pymongo.asynchronous.client_session import AsyncClientSession + from pymongo.asynchronous.mongo_client import AsyncMongoClient + from pymongo.asynchronous.pool import AsyncConnection from pymongo.collation import Collation + from pymongo.synchronous.bulk import _Bulk + from pymongo.synchronous.client_bulk import _ClientBulk + from pymongo.synchronous.client_session import ClientSession + from pymongo.synchronous.mongo_client import MongoClient + from pymongo.synchronous.pool import Connection # Common Shared Types. _Address = Tuple[str, Optional[int]] _CollationIn = Union[Mapping[str, Any], "Collation"] -_DocumentIn = Union[MutableMapping[str, Any], "RawBSONDocument"] _Pipeline = Sequence[Mapping[str, Any]] -_DocumentOut = _DocumentIn -_DocumentType = TypeVar("_DocumentType", bound=Mapping[str, Any]) +ClusterTime = Mapping[str, Any] + +_T = TypeVar("_T") + +# Type hinting types for compatibility between async and sync classes +_AgnosticMongoClient = Union["AsyncMongoClient", "MongoClient"] # type: ignore[type-arg] +_AgnosticConnection = Union["AsyncConnection", "Connection"] +_AgnosticClientSession = Union["AsyncClientSession", "ClientSession"] +_AgnosticBulk = Union["_AsyncBulk", "_Bulk"] +_AgnosticClientBulk = Union["_AsyncClientBulk", "_ClientBulk"] + + +def strip_optional(elem: Optional[_T]) -> _T: + """This function is to allow us to cast all the elements of an iterator from Optional[_T] to _T + while inside a list comprehension. + """ + assert elem is not None + return elem + + +__all__ = [ + "_DocumentOut", + "_DocumentType", + "_DocumentTypeArg", + "_Address", + "_CollationIn", + "_Pipeline", + "strip_optional", + "_AgnosticMongoClient", +] diff --git a/pymongo/uri_parser.py b/pymongo/uri_parser.py index cd18c067e7..fe253b9bbf 100644 --- a/pymongo/uri_parser.py +++ b/pymongo/uri_parser.py @@ -4,7 +4,7 @@ # may not use this file except in compliance with the License. You # may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,612 +13,32 @@ # permissions and limitations under the License. -"""Tools to parse and validate a MongoDB URI.""" -import re -import sys -import warnings -from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union -from urllib.parse import unquote_plus - -from pymongo.client_options import _parse_ssl_options -from pymongo.common import ( - INTERNAL_URI_OPTION_NAME_MAP, - SRV_SERVICE_NAME, - URI_OPTIONS_DEPRECATION_MAP, - _CaseInsensitiveDictionary, - get_validated_options, -) -from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.srv_resolver import _HAVE_DNSPYTHON, _SrvResolver -from pymongo.typings import _Address - -SCHEME = "mongodb://" -SCHEME_LEN = len(SCHEME) -SRV_SCHEME = "mongodb+srv://" -SRV_SCHEME_LEN = len(SRV_SCHEME) -DEFAULT_PORT = 27017 - - -def _unquoted_percent(s): - """Check for unescaped percent signs. - - :Paramaters: - - `s`: A string. `s` can have things like '%25', '%2525', - and '%E2%85%A8' but cannot have unquoted percent like '%foo'. - """ - for i in range(len(s)): - if s[i] == "%": - sub = s[i : i + 3] - # If unquoting yields the same string this means there was an - # unquoted %. - if unquote_plus(sub) == sub: - return True - return False - - -def parse_userinfo(userinfo: str) -> Tuple[str, str]: - """Validates the format of user information in a MongoDB URI. - Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", - "]", "@") as per RFC 3986 must be escaped. - - Returns a 2-tuple containing the unescaped username followed - by the unescaped password. - - :Paramaters: - - `userinfo`: A string of the form : - """ - if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): - raise InvalidURI( - "Username and password must be escaped according to " - "RFC 3986, use urllib.parse.quote_plus" - ) - - user, _, passwd = userinfo.partition(":") - # No password is expected with GSSAPI authentication. - if not user: - raise InvalidURI("The empty string is not valid username.") - - return unquote_plus(user), unquote_plus(passwd) - - -def parse_ipv6_literal_host( - entity: str, default_port: Optional[int] -) -> Tuple[str, Optional[Union[str, int]]]: - """Validates an IPv6 literal host:port string. - - Returns a 2-tuple of IPv6 literal followed by port where - port is default_port if it wasn't specified in entity. - - :Parameters: - - `entity`: A string that represents an IPv6 literal enclosed - in braces (e.g. '[::1]' or '[::1]:27017'). - - `default_port`: The port number to use when one wasn't - specified in entity. - """ - if entity.find("]") == -1: - raise ValueError( - "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." - ) - i = entity.find("]:") - if i == -1: - return entity[1:-1], default_port - return entity[1:i], entity[i + 2 :] - - -def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: - """Validates a host string - - Returns a 2-tuple of host followed by port where port is default_port - if it wasn't specified in the string. - - :Parameters: - - `entity`: A host or host:port string where host could be a - hostname or IP address. - - `default_port`: The port number to use when one wasn't - specified in entity. - """ - host = entity - port: Optional[Union[str, int]] = default_port - if entity[0] == "[": - host, port = parse_ipv6_literal_host(entity, default_port) - elif entity.endswith(".sock"): - return entity, default_port - elif entity.find(":") != -1: - if entity.count(":") > 1: - raise ValueError( - "Reserved characters such as ':' must be " - "escaped according RFC 2396. An IPv6 " - "address literal must be enclosed in '[' " - "and ']' according to RFC 2732." - ) - host, port = host.split(":", 1) - if isinstance(port, str): - if not port.isdigit() or int(port) > 65535 or int(port) <= 0: - raise ValueError("Port must be an integer between 0 and 65535: %r" % (port,)) - port = int(port) - - # Normalize hostname to lowercase, since DNS is case-insensitive: - # http://tools.ietf.org/html/rfc4343 - # This prevents useless rediscovery if "foo.com" is in the seed list but - # "FOO.com" is in the hello response. - return host.lower(), port - - -# Options whose values are implicitly determined by tlsInsecure. -_IMPLICIT_TLSINSECURE_OPTS = { - "tlsallowinvalidcertificates", - "tlsallowinvalidhostnames", - "tlsdisableocspendpointcheck", -} - - -def _parse_options(opts, delim): - """Helper method for split_options which creates the options dict. - Also handles the creation of a list for the URI tag_sets/ - readpreferencetags portion, and the use of a unicode options string.""" - options = _CaseInsensitiveDictionary() - for uriopt in opts.split(delim): - key, value = uriopt.split("=") - if key.lower() == "readpreferencetags": - options.setdefault(key, []).append(value) - else: - if key in options: - warnings.warn("Duplicate URI option '%s'." % (key,)) - if key.lower() == "authmechanismproperties": - val = value - else: - val = unquote_plus(value) - options[key] = val - - return options - - -def _handle_security_options(options): - """Raise appropriate errors when conflicting TLS options are present in - the options dictionary. - - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Implicitly defined options must not be explicitly specified. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - if opt in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) - ) - - # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. - tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") - if tlsallowinvalidcerts is not None: - if "tlsdisableocspendpointcheck" in options: - err_msg = "URI options %s and %s cannot be specified simultaneously." - raise InvalidURI( - err_msg - % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) - ) - if tlsallowinvalidcerts is True: - options["tlsdisableocspendpointcheck"] = True - - # Handle co-occurence of CRL and OCSP-related options. - tlscrlfile = options.get("tlscrlfile") - if tlscrlfile is not None: - for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): - if options.get(opt) is True: - err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." - raise InvalidURI(err_msg % (opt,)) - - if "ssl" in options and "tls" in options: - - def truth_value(val): - if val in ("true", "false"): - return val == "true" - if isinstance(val, bool): - return val - return val - - if truth_value(options.get("ssl")) != truth_value(options.get("tls")): - err_msg = "Can not specify conflicting values for URI options %s and %s." - raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) - - return options - - -def _handle_option_deprecations(options): - """Issue appropriate warnings when deprecated options are present in the - options dictionary. Removes deprecated option key, value pairs if the - options dictionary is found to also have the renamed option. - - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - for optname in list(options): - if optname in URI_OPTIONS_DEPRECATION_MAP: - mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] - if mode == "renamed": - newoptname = message - if newoptname in options: - warn_msg = "Deprecated option '%s' ignored in favor of '%s'." - warnings.warn( - warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), - DeprecationWarning, - stacklevel=2, - ) - options.pop(optname) - continue - warn_msg = "Option '%s' is deprecated, use '%s' instead." - warnings.warn( - warn_msg % (options.cased_key(optname), newoptname), - DeprecationWarning, - stacklevel=2, - ) - elif mode == "removed": - warn_msg = "Option '%s' is deprecated. %s." - warnings.warn( - warn_msg % (options.cased_key(optname), message), - DeprecationWarning, - stacklevel=2, - ) - - return options - - -def _normalize_options(options): - """Normalizes option names in the options dictionary by converting them to - their internally-used names. - - :Parameters: - - `options`: Instance of _CaseInsensitiveDictionary containing - MongoDB URI options. - """ - # Expand the tlsInsecure option. - tlsinsecure = options.get("tlsinsecure") - if tlsinsecure is not None: - for opt in _IMPLICIT_TLSINSECURE_OPTS: - # Implicit options are logically the same as tlsInsecure. - options[opt] = tlsinsecure - - for optname in list(options): - intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) - if intname is not None: - options[intname] = options.pop(optname) - - return options - - -def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: - """Validates and normalizes options passed in a MongoDB URI. +"""Re-import of synchronous URI Parser API for compatibility.""" +from __future__ import annotations - Returns a new dictionary of validated and normalized options. If warn is - False then errors will be thrown for invalid options, otherwise they will - be ignored and a warning will be issued. - - :Parameters: - - `opts`: A dict of MongoDB URI options. - - `warn` (optional): If ``True`` then warnings will be logged and - invalid options will be ignored. Otherwise invalid options will - cause errors. - """ - return get_validated_options(opts, warn) - - -def split_options( - opts: str, validate: bool = True, warn: bool = False, normalize: bool = True -) -> MutableMapping[str, Any]: - """Takes the options portion of a MongoDB URI, validates each option - and returns the options in a dictionary. - - :Parameters: - - `opt`: A string representing MongoDB URI options. - - `validate`: If ``True`` (the default), validate and normalize all - options. - - `warn`: If ``False`` (the default), suppress all warnings raised - during validation of options. - - `normalize`: If ``True`` (the default), renames all options to their - internally-used names. - """ - and_idx = opts.find("&") - semi_idx = opts.find(";") - try: - if and_idx >= 0 and semi_idx >= 0: - raise InvalidURI("Can not mix '&' and ';' for option separators.") - elif and_idx >= 0: - options = _parse_options(opts, "&") - elif semi_idx >= 0: - options = _parse_options(opts, ";") - elif opts.find("=") != -1: - options = _parse_options(opts, None) - else: - raise ValueError - except ValueError: - raise InvalidURI("MongoDB URI options are key=value pairs.") - - options = _handle_security_options(options) - - options = _handle_option_deprecations(options) - - if normalize: - options = _normalize_options(options) - - if validate: - options = validate_options(options, warn) - if options.get("authsource") == "": - raise InvalidURI("the authSource database cannot be an empty string") - - return options - - -def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> List[_Address]: - """Takes a string of the form host1[:port],host2[:port]... and - splits it into (host, port) tuples. If [:port] isn't present the - default_port is used. - - Returns a set of 2-tuples containing the host name (or IP) followed by - port number. - - :Parameters: - - `hosts`: A string of the form host1[:port],host2[:port],... - - `default_port`: The port number to use when one wasn't specified - for a host. - """ - nodes = [] - for entity in hosts.split(","): - if not entity: - raise ConfigurationError("Empty host (or extra comma in host list).") - port = default_port - # Unix socket entities don't have ports - if entity.endswith(".sock"): - port = None - nodes.append(parse_host(entity, port)) - return nodes - - -# Prohibited characters in database name. DB names also can't have ".", but for -# backward-compat we allow "db.collection" in URI. -_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") - -_ALLOWED_TXT_OPTS = frozenset( - ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] -) - - -def _check_options(nodes, options): - # Ensure directConnection was not True if there are multiple seeds. - if len(nodes) > 1 and options.get("directconnection"): - raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") - - if options.get("loadbalanced"): - if len(nodes) > 1: - raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") - if options.get("directconnection"): - raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") - if options.get("replicaset"): - raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") - - -def parse_uri( - uri: str, - default_port: Optional[int] = DEFAULT_PORT, - validate: bool = True, - warn: bool = False, - normalize: bool = True, - connect_timeout: Optional[float] = None, - srv_service_name: Optional[str] = None, - srv_max_hosts: Optional[int] = None, -) -> Dict[str, Any]: - """Parse and validate a MongoDB URI. - - Returns a dict of the form:: - - { - 'nodelist': , - 'username': or None, - 'password': or None, - 'database': or None, - 'collection': or None, - 'options': , - 'fqdn': or None - } - - If the URI scheme is "mongodb+srv://" DNS SRV and TXT lookups will be done - to build nodelist and options. - - :Parameters: - - `uri`: The MongoDB URI to parse. - - `default_port`: The port number to use when one wasn't specified - for a host in the URI. - - `validate` (optional): If ``True`` (the default), validate and - normalize all options. Default: ``True``. - - `warn` (optional): When validating, if ``True`` then will warn - the user then ignore any invalid options or values. If ``False``, - validation will error when options are unsupported or values are - invalid. Default: ``False``. - - `normalize` (optional): If ``True``, convert names of URI options - to their internally-used names. Default: ``True``. - - `connect_timeout` (optional): The maximum time in milliseconds to - wait for a response from the DNS server. - - 'srv_service_name` (optional): A custom SRV service name - - .. versionchanged:: 4.0 - To better follow RFC 3986, unquoted percent signs ("%") are no longer - supported. - - .. versionchanged:: 3.9 - Added the ``normalize`` parameter. - - .. versionchanged:: 3.6 - Added support for mongodb+srv:// URIs. - - .. versionchanged:: 3.5 - Return the original value of the ``readPreference`` MongoDB URI option - instead of the validated read preference mode. - - .. versionchanged:: 3.1 - ``warn`` added so invalid options can be ignored. - """ - if uri.startswith(SCHEME): - is_srv = False - scheme_free = uri[SCHEME_LEN:] - elif uri.startswith(SRV_SCHEME): - if not _HAVE_DNSPYTHON: - python_path = sys.executable or "python" - raise ConfigurationError( - 'The "dnspython" module must be ' - "installed to use mongodb+srv:// URIs. " - "To fix this error install pymongo with the srv extra:\n " - '%s -m pip install "pymongo[srv]"' % (python_path) - ) - is_srv = True - scheme_free = uri[SRV_SCHEME_LEN:] - else: - raise InvalidURI( - "Invalid URI scheme: URI must begin with '%s' or '%s'" % (SCHEME, SRV_SCHEME) - ) - - if not scheme_free: - raise InvalidURI("Must provide at least one hostname or IP.") - - user = None - passwd = None - dbase = None - collection = None - options = _CaseInsensitiveDictionary() - - host_part, _, path_part = scheme_free.partition("/") - if not host_part: - host_part = path_part - path_part = "" - - if not path_part and "?" in host_part: - raise InvalidURI("A '/' is required between the host list and any options.") - - if path_part: - dbase, _, opts = path_part.partition("?") - if dbase: - dbase = unquote_plus(dbase) - if "." in dbase: - dbase, collection = dbase.split(".", 1) - if _BAD_DB_CHARS.search(dbase): - raise InvalidURI('Bad database name "%s"' % dbase) - else: - dbase = None - - if opts: - options.update(split_options(opts, validate, warn, normalize)) - if srv_service_name is None: - srv_service_name = options.get("srvServiceName", SRV_SERVICE_NAME) - if "@" in host_part: - userinfo, _, hosts = host_part.rpartition("@") - user, passwd = parse_userinfo(userinfo) - else: - hosts = host_part - - if "/" in hosts: - raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) - - hosts = unquote_plus(hosts) - fqdn = None - srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") - if is_srv: - if options.get("directConnection"): - raise ConfigurationError( - "Cannot specify directConnection=true with %s URIs" % (SRV_SCHEME,) - ) - nodes = split_hosts(hosts, default_port=None) - if len(nodes) != 1: - raise InvalidURI("%s URIs must include one, and only one, hostname" % (SRV_SCHEME,)) - fqdn, port = nodes[0] - if port is not None: - raise InvalidURI("%s URIs must not include a port number" % (SRV_SCHEME,)) - - # Use the connection timeout. connectTimeoutMS passed as a keyword - # argument overrides the same option passed in the connection string. - connect_timeout = connect_timeout or options.get("connectTimeoutMS") - dns_resolver = _SrvResolver(fqdn, connect_timeout, srv_service_name, srv_max_hosts) - nodes = dns_resolver.get_hosts() - dns_options = dns_resolver.get_options() - if dns_options: - parsed_dns_options = split_options(dns_options, validate, warn, normalize) - if set(parsed_dns_options) - _ALLOWED_TXT_OPTS: - raise ConfigurationError( - "Only authSource, replicaSet, and loadBalanced are supported from DNS" - ) - for opt, val in parsed_dns_options.items(): - if opt not in options: - options[opt] = val - if options.get("loadBalanced") and srv_max_hosts: - raise InvalidURI("You cannot specify loadBalanced with srvMaxHosts") - if options.get("replicaSet") and srv_max_hosts: - raise InvalidURI("You cannot specify replicaSet with srvMaxHosts") - if "tls" not in options and "ssl" not in options: - options["tls"] = True if validate else "true" - elif not is_srv and options.get("srvServiceName") is not None: - raise ConfigurationError( - "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" - ) - elif not is_srv and srv_max_hosts: - raise ConfigurationError( - "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" - ) - else: - nodes = split_hosts(hosts, default_port=default_port) - - _check_options(nodes, options) - - return { - "nodelist": nodes, - "username": user, - "password": passwd, - "database": dbase, - "collection": collection, - "options": options, - "fqdn": fqdn, - } - - -def _parse_kms_tls_options(kms_tls_options): - """Parse KMS TLS connection options.""" - if not kms_tls_options: - return {} - if not isinstance(kms_tls_options, dict): - raise TypeError("kms_tls_options must be a dict") - contexts = {} - for provider, opts in kms_tls_options.items(): - if not isinstance(opts, dict): - raise TypeError(f'kms_tls_options["{provider}"] must be a dict') - opts.setdefault("tls", True) - opts = _CaseInsensitiveDictionary(opts) - opts = _handle_security_options(opts) - opts = _normalize_options(opts) - opts = validate_options(opts) - ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts) - if ssl_context is None: - raise ConfigurationError("TLS is required for KMS providers") - if allow_invalid_hostnames: - raise ConfigurationError("Insecure TLS options prohibited") - - for n in [ - "tlsInsecure", - "tlsAllowInvalidCertificates", - "tlsAllowInvalidHostnames", - "tlsDisableOCSPEndpointCheck", - "tlsDisableCertificateRevocationCheck", - ]: - if n in opts: - raise ConfigurationError(f"Insecure TLS options prohibited: {n}") - contexts[provider] = ssl_context - return contexts +import sys +from pymongo.errors import InvalidURI +from pymongo.synchronous.uri_parser import * # noqa: F403 +from pymongo.synchronous.uri_parser import __doc__ as original_doc +from pymongo.uri_parser_shared import * # noqa: F403 + +__doc__ = original_doc +__all__ = [ # noqa: F405 + "parse_userinfo", + "parse_ipv6_literal_host", + "parse_host", + "validate_options", + "split_options", + "split_hosts", + "parse_uri", +] if __name__ == "__main__": import pprint try: - pprint.pprint(parse_uri(sys.argv[1])) + pprint.pprint(parse_uri(sys.argv[1])) # noqa: F405, T203 except InvalidURI as exc: - print(exc) + print(exc) # noqa: T201 sys.exit(0) diff --git a/pymongo/uri_parser_shared.py b/pymongo/uri_parser_shared.py new file mode 100644 index 0000000000..59168d1e9f --- /dev/null +++ b/pymongo/uri_parser_shared.py @@ -0,0 +1,614 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you +# may not use this file except in compliance with the License. You +# may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. See the License for the specific language governing +# permissions and limitations under the License. + + +"""Tools to parse and validate a MongoDB URI. + +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +import re +import sys +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Mapping, + MutableMapping, + Optional, + Sized, + Union, + cast, +) +from urllib.parse import unquote_plus + +from pymongo.asynchronous.srv_resolver import _have_dnspython +from pymongo.client_options import _parse_ssl_options +from pymongo.common import ( + INTERNAL_URI_OPTION_NAME_MAP, + URI_OPTIONS_DEPRECATION_MAP, + _CaseInsensitiveDictionary, + get_validated_options, +) +from pymongo.errors import ConfigurationError, InvalidURI +from pymongo.typings import _Address + +if TYPE_CHECKING: + from pymongo.pyopenssl_context import SSLContext + +SCHEME = "mongodb://" +SCHEME_LEN = len(SCHEME) +SRV_SCHEME = "mongodb+srv://" +SRV_SCHEME_LEN = len(SRV_SCHEME) +DEFAULT_PORT = 27017 + +URI_OPTIONS = frozenset( + [ + "appname", + "authMechanism", + "authMechanismProperties", + "authSource", + "compressors", + "connectTimeoutMS", + "directConnection", + "heartbeatFrequencyMS", + "journal", + "loadBalanced", + "localThresholdMS", + "maxIdleTimeMS", + "maxPoolSize", + "maxConnecting", + "maxStalenessSeconds", + "minPoolSize", + "proxyHost", + "proxyPort", + "proxyUsername", + "proxyPassword", + "readConcernLevel", + "readPreference", + "readPreferenceTags", + "replicaSet", + "retryReads", + "retryWrites", + "serverMonitoringMode", + "serverSelectionTimeoutMS", + "serverSelectionTryOnce", + "socketTimeoutMS", + "srvMaxHosts", + "srvServiceName", + "ssl", + "tls", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsCAFile", + "tlsCertificateKeyFile", + "tlsCertificateKeyFilePassword", + "tlsDisableCertificateRevocationCheck", + "tlsDisableOCSPEndpointCheck", + "tlsInsecure", + "w", + "waitQueueTimeoutMS", + "wTimeoutMS", + "zlibCompressionLevel", + ] +) + + +def _unquoted_percent(s: str) -> bool: + """Check for unescaped percent signs. + + :param s: A string. `s` can have things like '%25', '%2525', + and '%E2%85%A8' but cannot have unquoted percent like '%foo'. + """ + for i in range(len(s)): + if s[i] == "%": + sub = s[i : i + 3] + # If unquoting yields the same string this means there was an + # unquoted %. + if unquote_plus(sub) == sub: + return True + return False + + +def parse_userinfo(userinfo: str) -> tuple[str, str]: + """Validates the format of user information in a MongoDB URI. + Reserved characters that are gen-delimiters (":", "/", "?", "#", "[", + "]", "@") as per RFC 3986 must be escaped. + + Returns a 2-tuple containing the unescaped username followed + by the unescaped password. + + :param userinfo: A string of the form : + """ + if "@" in userinfo or userinfo.count(":") > 1 or _unquoted_percent(userinfo): + raise InvalidURI( + "Username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + + user, _, passwd = userinfo.partition(":") + # No password is expected with GSSAPI authentication. + if not user: + raise InvalidURI("The empty string is not valid username") + + return unquote_plus(user), unquote_plus(passwd) + + +def parse_ipv6_literal_host( + entity: str, default_port: Optional[int] +) -> tuple[str, Optional[Union[str, int]]]: + """Validates an IPv6 literal host:port string. + + Returns a 2-tuple of IPv6 literal followed by port where + port is default_port if it wasn't specified in entity. + + :param entity: A string that represents an IPv6 literal enclosed + in braces (e.g. '[::1]' or '[::1]:27017'). + :param default_port: The port number to use when one wasn't + specified in entity. + """ + if entity.find("]") == -1: + raise ValueError( + "an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732." + ) + i = entity.find("]:") + if i == -1: + return entity[1:-1], default_port + return entity[1:i], entity[i + 2 :] + + +def parse_host(entity: str, default_port: Optional[int] = DEFAULT_PORT) -> _Address: + """Validates a host string + + Returns a 2-tuple of host followed by port where port is default_port + if it wasn't specified in the string. + + :param entity: A host or host:port string where host could be a + hostname or IP address. + :param default_port: The port number to use when one wasn't + specified in entity. + """ + host = entity + port: Optional[Union[str, int]] = default_port + if entity[0] == "[": + host, port = parse_ipv6_literal_host(entity, default_port) + elif entity.endswith(".sock"): + return entity, default_port + elif entity.find(":") != -1: + if entity.count(":") > 1: + raise ValueError( + "Reserved characters such as ':' must be " + "escaped according RFC 2396. An IPv6 " + "address literal must be enclosed in '[' " + "and ']' according to RFC 2732." + ) + host, port = host.split(":", 1) + if isinstance(port, str): + if not port.isdigit(): + # Special case check for mistakes like "mongodb://localhost:27017 ". + if all(c.isspace() or c.isdigit() for c in port): + for c in port: + if c.isspace(): + raise ValueError(f"Port contains whitespace character: {c!r}") + + # A non-digit port indicates that the URI is invalid, likely because the password + # or username were not escaped. + raise ValueError( + "Port contains non-digit characters. Hint: username and password must be escaped according to " + "RFC 3986, use urllib.parse.quote_plus" + ) + if int(port) > 65535 or int(port) <= 0: + raise ValueError("Port must be an integer between 0 and 65535") + port = int(port) + + # Normalize hostname to lowercase, since DNS is case-insensitive: + # https://tools.ietf.org/html/rfc4343 + # This prevents useless rediscovery if "foo.com" is in the seed list but + # "FOO.com" is in the hello response. + return host.lower(), port + + +# Options whose values are implicitly determined by tlsInsecure. +_IMPLICIT_TLSINSECURE_OPTS = { + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlsdisableocspendpointcheck", +} + + +def _parse_options(opts: str, delim: Optional[str]) -> _CaseInsensitiveDictionary: + """Helper method for split_options which creates the options dict. + Also handles the creation of a list for the URI tag_sets/ + readpreferencetags portion, and the use of a unicode options string. + """ + options = _CaseInsensitiveDictionary() + for uriopt in opts.split(delim): + key, value = uriopt.split("=") + if key.lower() == "readpreferencetags": + options.setdefault(key, []).append(value) + else: + if key in options: + warnings.warn(f"Duplicate URI option '{key}'.", stacklevel=2) + if key.lower() == "authmechanismproperties": + val = value + else: + val = unquote_plus(value) + options[key] = val + + return options + + +def _handle_security_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Raise appropriate errors when conflicting TLS options are present in + the options dictionary. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Implicitly defined options must not be explicitly specified. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + if opt in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg % (options.cased_key("tlsinsecure"), options.cased_key(opt)) + ) + + # Handle co-occurence of OCSP & tlsAllowInvalidCertificates options. + tlsallowinvalidcerts = options.get("tlsallowinvalidcertificates") + if tlsallowinvalidcerts is not None: + if "tlsdisableocspendpointcheck" in options: + err_msg = "URI options %s and %s cannot be specified simultaneously." + raise InvalidURI( + err_msg + % ("tlsallowinvalidcertificates", options.cased_key("tlsdisableocspendpointcheck")) + ) + if tlsallowinvalidcerts is True: + options["tlsdisableocspendpointcheck"] = True + + # Handle co-occurence of CRL and OCSP-related options. + tlscrlfile = options.get("tlscrlfile") + if tlscrlfile is not None: + for opt in ("tlsinsecure", "tlsallowinvalidcertificates", "tlsdisableocspendpointcheck"): + if options.get(opt) is True: + err_msg = "URI option %s=True cannot be specified when CRL checking is enabled." + raise InvalidURI(err_msg % (opt,)) + + if "ssl" in options and "tls" in options: + + def truth_value(val: Any) -> Any: + if val in ("true", "false"): + return val == "true" + if isinstance(val, bool): + return val + return val + + if truth_value(options.get("ssl")) != truth_value(options.get("tls")): + err_msg = "Can not specify conflicting values for URI options %s and %s." + raise InvalidURI(err_msg % (options.cased_key("ssl"), options.cased_key("tls"))) + + return options + + +def _handle_option_deprecations(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Issue appropriate warnings when deprecated options are present in the + options dictionary. Removes deprecated option key, value pairs if the + options dictionary is found to also have the renamed option. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + for optname in list(options): + if optname in URI_OPTIONS_DEPRECATION_MAP: + mode, message = URI_OPTIONS_DEPRECATION_MAP[optname] + if mode == "renamed": + newoptname = message + if newoptname in options: + warn_msg = "Deprecated option '%s' ignored in favor of '%s'." + warnings.warn( + warn_msg % (options.cased_key(optname), options.cased_key(newoptname)), + DeprecationWarning, + stacklevel=2, + ) + options.pop(optname) + continue + warn_msg = "Option '%s' is deprecated, use '%s' instead." + warnings.warn( + warn_msg % (options.cased_key(optname), newoptname), + DeprecationWarning, + stacklevel=2, + ) + elif mode == "removed": + warn_msg = "Option '%s' is deprecated. %s." + warnings.warn( + warn_msg % (options.cased_key(optname), message), + DeprecationWarning, + stacklevel=2, + ) + + return options + + +def _normalize_options(options: _CaseInsensitiveDictionary) -> _CaseInsensitiveDictionary: + """Normalizes option names in the options dictionary by converting them to + their internally-used names. + + :param options: Instance of _CaseInsensitiveDictionary containing + MongoDB URI options. + """ + # Expand the tlsInsecure option. + tlsinsecure = options.get("tlsinsecure") + if tlsinsecure is not None: + for opt in _IMPLICIT_TLSINSECURE_OPTS: + # Implicit options are logically the same as tlsInsecure. + options[opt] = tlsinsecure + + for optname in list(options): + intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, None) + if intname is not None: + options[intname] = options.pop(optname) + + return options + + +def validate_options(opts: Mapping[str, Any], warn: bool = False) -> MutableMapping[str, Any]: + """Validates and normalizes options passed in a MongoDB URI. + + Returns a new dictionary of validated and normalized options. If warn is + False then errors will be thrown for invalid options, otherwise they will + be ignored and a warning will be issued. + + :param opts: A dict of MongoDB URI options. + :param warn: If ``True`` then warnings will be logged and + invalid options will be ignored. Otherwise invalid options will + cause errors. + """ + return get_validated_options(opts, warn) + + +def split_options( + opts: str, validate: bool = True, warn: bool = False, normalize: bool = True +) -> MutableMapping[str, Any]: + """Takes the options portion of a MongoDB URI, validates each option + and returns the options in a dictionary. + + :param opt: A string representing MongoDB URI options. + :param validate: If ``True`` (the default), validate and normalize all + options. + :param warn: If ``False`` (the default), suppress all warnings raised + during validation of options. + :param normalize: If ``True`` (the default), renames all options to their + internally-used names. + """ + and_idx = opts.find("&") + semi_idx = opts.find(";") + try: + if and_idx >= 0 and semi_idx >= 0: + raise InvalidURI("Can not mix '&' and ';' for option separators") + elif and_idx >= 0: + options = _parse_options(opts, "&") + elif semi_idx >= 0: + options = _parse_options(opts, ";") + elif opts.find("=") != -1: + options = _parse_options(opts, None) + else: + raise ValueError + except ValueError: + raise InvalidURI("MongoDB URI options are key=value pairs") from None + + options = _handle_security_options(options) + + options = _handle_option_deprecations(options) + + if normalize: + options = _normalize_options(options) + + if validate: + options = cast(_CaseInsensitiveDictionary, validate_options(options, warn)) + if options.get("authsource") == "": + raise InvalidURI("the authSource database cannot be an empty string") + + return options + + +def split_hosts(hosts: str, default_port: Optional[int] = DEFAULT_PORT) -> list[_Address]: + """Takes a string of the form host1[:port],host2[:port]... and + splits it into (host, port) tuples. If [:port] isn't present the + default_port is used. + + Returns a set of 2-tuples containing the host name (or IP) followed by + port number. + + :param hosts: A string of the form host1[:port],host2[:port],... + :param default_port: The port number to use when one wasn't specified + for a host. + """ + nodes = [] + for entity in hosts.split(","): + if not entity: + raise ConfigurationError("Empty host (or extra comma in host list)") + port = default_port + # Unix socket entities don't have ports + if entity.endswith(".sock"): + port = None + nodes.append(parse_host(entity, port)) + return nodes + + +# Prohibited characters in database name. DB names also can't have ".", but for +# backward-compat we allow "db.collection" in URI. +_BAD_DB_CHARS = re.compile("[" + re.escape(r'/ "$') + "]") + +_ALLOWED_TXT_OPTS = frozenset( + ["authsource", "authSource", "replicaset", "replicaSet", "loadbalanced", "loadBalanced"] +) + + +def _check_options(nodes: Sized, options: Mapping[str, Any]) -> None: + # Ensure directConnection was not True if there are multiple seeds. + if len(nodes) > 1 and options.get("directconnection"): + raise ConfigurationError("Cannot specify multiple hosts with directConnection=true") + + if options.get("loadbalanced"): + if len(nodes) > 1: + raise ConfigurationError("Cannot specify multiple hosts with loadBalanced=true") + if options.get("directconnection"): + raise ConfigurationError("Cannot specify directConnection=true with loadBalanced=true") + if options.get("replicaset"): + raise ConfigurationError("Cannot specify replicaSet with loadBalanced=true") + + +def _parse_kms_tls_options( + kms_tls_options: Optional[Mapping[str, Any]], + is_sync: bool, +) -> dict[str, SSLContext]: + """Parse KMS TLS connection options.""" + if not kms_tls_options: + return {} + if not isinstance(kms_tls_options, dict): + raise TypeError("kms_tls_options must be a dict") + contexts = {} + for provider, options in kms_tls_options.items(): + if not isinstance(options, dict): + raise TypeError(f'kms_tls_options["{provider}"] must be a dict') + options.setdefault("tls", True) + opts = _CaseInsensitiveDictionary(options) + opts = _handle_security_options(opts) + opts = _normalize_options(opts) + opts = cast(_CaseInsensitiveDictionary, validate_options(opts)) + ssl_context, allow_invalid_hostnames = _parse_ssl_options(opts, is_sync) + if ssl_context is None: + raise ConfigurationError("TLS is required for KMS providers") + if allow_invalid_hostnames: + raise ConfigurationError("Insecure TLS options prohibited") + + for n in [ + "tlsInsecure", + "tlsAllowInvalidCertificates", + "tlsAllowInvalidHostnames", + "tlsDisableCertificateRevocationCheck", + ]: + if n in opts: + raise ConfigurationError(f"Insecure TLS options prohibited: {n}") + contexts[provider] = ssl_context + return contexts + + +def _validate_uri( + uri: str, + default_port: Optional[int] = DEFAULT_PORT, + validate: bool = True, + warn: bool = False, + normalize: bool = True, + srv_max_hosts: Optional[int] = None, +) -> dict[str, Any]: + if uri.startswith(SCHEME): + is_srv = False + scheme_free = uri[SCHEME_LEN:] + elif uri.startswith(SRV_SCHEME): + if not _have_dnspython(): + python_path = sys.executable or "python" + raise ConfigurationError( + 'The "dnspython" module must be ' + "installed to use mongodb+srv:// URIs. " + "To fix this error install pymongo again:\n " + "%s -m pip install pymongo>=4.3" % (python_path) + ) + is_srv = True + scheme_free = uri[SRV_SCHEME_LEN:] + else: + raise InvalidURI(f"Invalid URI scheme: URI must begin with '{SCHEME}' or '{SRV_SCHEME}'") + + if not scheme_free: + raise InvalidURI("Must provide at least one hostname or IP") + + user = None + passwd = None + dbase = None + collection = None + options = _CaseInsensitiveDictionary() + + host_plus_db_part, _, opts = scheme_free.partition("?") + if "/" in host_plus_db_part: + host_part, _, dbase = host_plus_db_part.partition("/") + else: + host_part = host_plus_db_part + + if dbase: + dbase = unquote_plus(dbase) + if "." in dbase: + dbase, collection = dbase.split(".", 1) + if _BAD_DB_CHARS.search(dbase): + raise InvalidURI('Bad database name "%s"' % dbase) + else: + dbase = None + + if opts: + options.update(split_options(opts, validate, warn, normalize)) + if "@" in host_part: + userinfo, _, hosts = host_part.rpartition("@") + user, passwd = parse_userinfo(userinfo) + else: + hosts = host_part + + if "/" in hosts: + raise InvalidURI("Any '/' in a unix domain socket must be percent-encoded: %s" % host_part) + + hosts = unquote_plus(hosts) + fqdn = None + srv_max_hosts = srv_max_hosts or options.get("srvMaxHosts") + if is_srv: + if options.get("directConnection"): + raise ConfigurationError(f"Cannot specify directConnection=true with {SRV_SCHEME} URIs") + nodes = split_hosts(hosts, default_port=None) + if len(nodes) != 1: + raise InvalidURI(f"{SRV_SCHEME} URIs must include one, and only one, hostname") + fqdn, port = nodes[0] + if port is not None: + raise InvalidURI(f"{SRV_SCHEME} URIs must not include a port number") + elif not is_srv and options.get("srvServiceName") is not None: + raise ConfigurationError( + "The srvServiceName option is only allowed with 'mongodb+srv://' URIs" + ) + elif not is_srv and srv_max_hosts: + raise ConfigurationError( + "The srvMaxHosts option is only allowed with 'mongodb+srv://' URIs" + ) + else: + nodes = split_hosts(hosts, default_port=default_port) + + _check_options(nodes, options) + + return { + "nodelist": nodes, + "username": user, + "password": passwd, + "database": dbase, + "collection": collection, + "options": options, + "fqdn": fqdn, + } + + +def _make_options_case_sensitive(options: _CaseInsensitiveDictionary) -> dict[str, Any]: + case_sensitive = {} + for option in URI_OPTIONS: + if option.lower() in options: + case_sensitive[option] = options[option] + options.pop(option) + for k, v in options.items(): + case_sensitive[k] = v + return case_sensitive diff --git a/pymongo/write_concern.py b/pymongo/write_concern.py index ced71d0488..1f9da7af2e 100644 --- a/pymongo/write_concern.py +++ b/pymongo/write_concern.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,37 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tools for working with write concerns.""" +"""Tools for working with write concerns. -from typing import Any, Dict, Optional, Union +.. seealso:: This module is compatible with both the synchronous and asynchronous PyMongo APIs. +""" +from __future__ import annotations + +from typing import Any, Optional, Union from pymongo.errors import ConfigurationError -class WriteConcern(object): +# Duplicated here to avoid a circular import. +def validate_boolean(option: str, value: Any) -> bool: + """Validates that 'value' is True or False.""" + if isinstance(value, bool): + return value + raise TypeError(f"{option} must be True or False, was: {option}={value}") + + +class WriteConcern: """WriteConcern - :Parameters: - - `w`: (integer or string) Used with replication, write operations + :param w: (integer or string) Used with replication, write operations will block until they have been replicated to the specified number or tagged set of servers. `w=` always includes the replica set primary (e.g. w=3 means write to the primary and wait until replicated to **two** secondaries). **w=0 disables acknowledgement of write operations and can not be used with other write concern options.** - - `wtimeout`: (integer) Used in conjunction with `w`. Specify a value - in milliseconds to control how long to wait for write propagation - to complete. If replication does not complete in the given + :param wtimeout: (integer) **DEPRECATED** Used in conjunction with `w`. + Specify a value in milliseconds to control how long to wait for write + propagation to complete. If replication does not complete in the given timeframe, a timeout exception is raised. - - `j`: If ``True`` block until write operations have been committed + :param j: If ``True`` block until write operations have been committed to the journal. Cannot be used in combination with `fsync`. Write operations will fail with an exception if this option is used when the server is running without journaling. - - `fsync`: If ``True`` and the server is running without journaling, + :param fsync: If ``True`` and the server is running without journaling, blocks until the server has synced all data files to disk. If the server is running with journaling, this acts the same as the `j` option, blocking until write operations have been committed to the journal. Cannot be used in combination with `j`. + + + .. versionchanged:: 4.7 + Deprecated parameter ``wtimeout``, use :meth:`~pymongo.timeout`. """ __slots__ = ("__document", "__acknowledged", "__server_default") @@ -54,24 +69,22 @@ def __init__( j: Optional[bool] = None, fsync: Optional[bool] = None, ) -> None: - self.__document: Dict[str, Any] = {} + self.__document: dict[str, Any] = {} self.__acknowledged = True if wtimeout is not None: if not isinstance(wtimeout, int): - raise TypeError("wtimeout must be an integer") + raise TypeError(f"wtimeout must be an integer, not {type(wtimeout)}") if wtimeout < 0: raise ValueError("wtimeout cannot be less than 0") self.__document["wtimeout"] = wtimeout if j is not None: - if not isinstance(j, bool): - raise TypeError("j must be True or False") + validate_boolean("j", j) self.__document["j"] = j if fsync is not None: - if not isinstance(fsync, bool): - raise TypeError("fsync must be True or False") + validate_boolean("fsync", fsync) if j and fsync: raise ConfigurationError("Can't set both j and fsync at the same time") self.__document["fsync"] = fsync @@ -85,7 +98,7 @@ def __init__( raise ValueError("w cannot be less than 0") self.__acknowledged = w > 0 elif not isinstance(w, str): - raise TypeError("w must be an integer or string") + raise TypeError(f"w must be an integer or string, not {type(w)}") self.__document["w"] = w self.__server_default = not self.__document @@ -96,7 +109,7 @@ def is_server_default(self) -> bool: return self.__server_default @property - def document(self) -> Dict[str, Any]: + def document(self) -> dict[str, Any]: """The document representation of this write concern. .. note:: @@ -112,8 +125,10 @@ def acknowledged(self) -> bool: """ return self.__acknowledged - def __repr__(self): - return "WriteConcern(%s)" % (", ".join("%s=%s" % kvt for kvt in self.__document.items()),) + def __repr__(self) -> str: + return "WriteConcern({})".format( + ", ".join(f"{k}={v!r}" for k, v in self.__document.items()) + ) def __eq__(self, other: Any) -> bool: if isinstance(other, WriteConcern): diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..623eb6c164 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,271 @@ +[build-system] +requires = ["hatchling>1.24","setuptools>=65.0","hatch-requirements-txt>=0.4.1"] +build-backend = "hatchling.build" + +[project] +name = "pymongo" +dynamic = ["version", "dependencies", "optional-dependencies"] +description = "PyMongo - the Official MongoDB Python driver" +readme = "README.md" +license = {file="LICENSE"} +requires-python = ">=3.9" +authors = [ + { name = "The MongoDB Python Team" }, +] +keywords = [ + "bson", + "gridfs", + "mongo", + "mongodb", + "pymongo", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Database", + "Typing :: Typed", +] + +[project.urls] +Homepage = "https://www.mongodb.org" +Documentation = "https://www.mongodb.com/docs/languages/python/pymongo-driver/current/" +Source = "https://github.com/mongodb/mongo-python-driver" +Tracker = "https://jira.mongodb.org/projects/PYTHON/issues" + +[dependency-groups] +dev = [] +pip = ["pip"] +gevent = ["gevent>=20.6.0"] +coverage = [ + "pytest-cov", + "coverage>=5,<=7.10.6" +] +mockupdb = [ + "mockupdb@git+https://github.com/mongodb-labs/mongo-mockup-db@master" +] +perf = ["simplejson>=3.17.0"] +typing = [ + "mypy==1.18.2", + "pyright==1.1.406", + "typing_extensions", + "pip" +] + +# Used to call hatch_build.py +[tool.hatch.build.hooks.custom] + +[tool.hatch.version] +path = "pymongo/_version.py" +validate-bump = false + +[tool.hatch.build.targets.wheel] +packages = ["bson","gridfs", "pymongo"] + +[tool.hatch.metadata.hooks.requirements_txt] +files = ["requirements.txt"] + +[tool.hatch.metadata.hooks.requirements_txt.optional-dependencies] +aws = ["requirements/aws.txt"] +docs = ["requirements/docs.txt"] +encryption = ["requirements/encryption.txt"] +gssapi = ["requirements/gssapi.txt"] +ocsp = ["requirements/ocsp.txt"] +snappy = ["requirements/snappy.txt"] +test = ["requirements/test.txt"] +zstd = ["requirements/zstd.txt"] + +[tool.pytest.ini_options] +minversion = "7" +addopts = ["-ra", "--strict-config", "--strict-markers", "--junitxml=xunit-results/TEST-results.xml", "-m default or default_async"] +testpaths = ["test"] +log_cli_level = "INFO" +faulthandler_timeout = 1500 +asyncio_default_fixture_loop_scope = "session" +xfail_strict = true +filterwarnings = [ + "error", + # Internal warnings raised during tests. + "module:use an explicit session with no_cursor_timeout=True:UserWarning", + "module:serverselectiontimeoutms must be:UserWarning", + "module:Unsupported compressor:UserWarning", + "module:zlibcompressionlevel must be:UserWarning", + "module:Wire protocol compression with:UserWarning", + "module:GridIn property:DeprecationWarning", + "module:GridOut property:DeprecationWarning", + # pytest-asyncio known issue: https://github.com/pytest-dev/pytest-asyncio/issues/1032 + "module:.*WindowsSelectorEventLoopPolicy:DeprecationWarning", + "module:.*et_event_loop_policy:DeprecationWarning", + # TODO: Remove as part of PYTHON-3923. + "module:unclosed =2.6.1,<3.0.0 diff --git a/requirements/aws.txt b/requirements/aws.txt new file mode 100644 index 0000000000..06e30c11c3 --- /dev/null +++ b/requirements/aws.txt @@ -0,0 +1 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000000..54ebf3625d --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,6 @@ +sphinx>=5.3,<9 +sphinx_rtd_theme>=2,<4 +readthedocs-sphinx-search~=0.3 +sphinxcontrib-shellcheck>=1,<2 +sphinx-autobuild>=2020.9.1 +furo==2025.9.25 diff --git a/requirements/encryption.txt b/requirements/encryption.txt new file mode 100644 index 0000000000..eec1c990f7 --- /dev/null +++ b/requirements/encryption.txt @@ -0,0 +1,3 @@ +pymongo-auth-aws>=1.1.0,<2.0.0 +pymongocrypt>=1.13.0,<2.0.0 +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' diff --git a/requirements/gssapi.txt b/requirements/gssapi.txt new file mode 100644 index 0000000000..7f156b9cea --- /dev/null +++ b/requirements/gssapi.txt @@ -0,0 +1,2 @@ +pykerberos;os.name!='nt' +winkerberos>=0.5.0;os.name=='nt' diff --git a/requirements/ocsp.txt b/requirements/ocsp.txt new file mode 100644 index 0000000000..39dbddef14 --- /dev/null +++ b/requirements/ocsp.txt @@ -0,0 +1,12 @@ +# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced +# a related feature we need. 17.2.0 fixes a bug +# in set_default_verify_paths we should really avoid. +# service_identity 18.1.0 introduced support for IP addr matching. +# Fallback to certifi on Windows if we can't load CA certs from the system +# store and just use certifi on macOS. +# https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths +certifi>=2023.7.22;os.name=='nt' or sys_platform=='darwin' +pyopenssl>=17.2.0 +requests<3.0.0 +cryptography>=2.5 +service_identity>=18.1.0 diff --git a/requirements/snappy.txt b/requirements/snappy.txt new file mode 100644 index 0000000000..9bb71204b8 --- /dev/null +++ b/requirements/snappy.txt @@ -0,0 +1 @@ +python-snappy diff --git a/requirements/test.txt b/requirements/test.txt new file mode 100644 index 0000000000..566cade7ec --- /dev/null +++ b/requirements/test.txt @@ -0,0 +1,3 @@ +pytest>=8.2 +pytest-asyncio>=0.24.0 +importlib_metadata>=7.0;python_version < "3.13" diff --git a/requirements/zstd.txt b/requirements/zstd.txt new file mode 100644 index 0000000000..864700d2b3 --- /dev/null +++ b/requirements/zstd.txt @@ -0,0 +1 @@ +zstandard diff --git a/sbom.json b/sbom.json new file mode 100644 index 0000000000..56e27f5361 --- /dev/null +++ b/sbom.json @@ -0,0 +1,11 @@ +{ + "metadata": { + "timestamp": "2024-05-02T17:36:12.698229+00:00" + }, + "components": [], + "serialNumber": "urn:uuid:9876a8a6-060e-486f-b128-910aecf0fe7b", + "version": 1, + "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", + "bomFormat": "CycloneDX", + "specVersion": "1.5" + } \ No newline at end of file diff --git a/setup.py b/setup.py old mode 100755 new mode 100644 index ce6cce712e..f371b3d75b --- a/setup.py +++ b/setup.py @@ -1,347 +1,8 @@ -import os -import platform -import re -import sys -import warnings +from __future__ import annotations -if sys.version_info[:3] < (3, 7): - raise RuntimeError("Python version >= 3.7 required.") - - -# Hack to silence atexit traceback in some Python versions -try: - import multiprocessing # noqa: F401 -except ImportError: - pass - -from setuptools import setup - -if sys.version_info[:2] < (3, 10): - from distutils.cmd import Command - from distutils.command.build_ext import build_ext - from distutils.core import Extension -else: - from setuptools import Command - from setuptools.command.build_ext import build_ext - from setuptools.extension import Extension - -_HAVE_SPHINX = True -try: - from sphinx.cmd import build as sphinx -except ImportError: - try: - import sphinx - except ImportError: - _HAVE_SPHINX = False - -version = "4.2.0.dev2" - -f = open("README.rst") -try: - try: - readme_content = f.read() - except BaseException: - readme_content = "" -finally: - f.close() - -# PYTHON-654 - Clang doesn't support -mno-fused-madd but the pythons Apple -# ships are built with it. This is a problem starting with Xcode 5.1 -# since clang 3.4 errors out when it encounters unrecognized compiler -# flags. This hack removes -mno-fused-madd from the CFLAGS automatically -# generated by distutils for Apple provided pythons, allowing C extension -# builds to complete without error. The inspiration comes from older -# versions of distutils.sysconfig.get_config_vars. -if sys.platform == "darwin" and "clang" in platform.python_compiler().lower(): - from distutils.sysconfig import get_config_vars - - res = get_config_vars() - for key in ("CFLAGS", "PY_CFLAGS"): - if key in res: - flags = res[key] - flags = re.sub("-mno-fused-madd", "", flags) - res[key] = flags - - -class test(Command): - description = "run the tests" - - user_options = [ - ("test-module=", "m", "Discover tests in specified module"), - ("test-suite=", "s", "Test suite to run (e.g. 'some_module.test_suite')"), - ("failfast", "f", "Stop running tests on first failure or error"), - ("xunit-output=", "x", "Generate a results directory with XUnit XML format"), - ] - - def initialize_options(self): - self.test_module = None - self.test_suite = None - self.failfast = False - self.xunit_output = None - - def finalize_options(self): - if self.test_suite is None and self.test_module is None: - self.test_module = "test" - elif self.test_module is not None and self.test_suite is not None: - raise Exception("You may specify a module or suite, but not both") - - def run(self): - # Installing required packages, running egg_info and build_ext are - # part of normal operation for setuptools.command.test.test - if self.distribution.install_requires: - self.distribution.fetch_build_eggs(self.distribution.install_requires) - if self.distribution.tests_require: - self.distribution.fetch_build_eggs(self.distribution.tests_require) - if self.xunit_output: - self.distribution.fetch_build_eggs(["unittest-xml-reporting"]) - self.run_command("egg_info") - build_ext_cmd = self.reinitialize_command("build_ext") - build_ext_cmd.inplace = 1 - self.run_command("build_ext") - - # Construct a TextTestRunner directly from the unittest imported from - # test, which creates a TestResult that supports the 'addSkip' method. - # setuptools will by default create a TextTestRunner that uses the old - # TestResult class. - from test import PymongoTestRunner, test_cases, unittest - - if self.test_suite is None: - all_tests = unittest.defaultTestLoader.discover(self.test_module) - suite = unittest.TestSuite() - suite.addTests(sorted(test_cases(all_tests), key=lambda x: x.__module__)) - else: - suite = unittest.defaultTestLoader.loadTestsFromName(self.test_suite) - if self.xunit_output: - from test import PymongoXMLTestRunner - - runner = PymongoXMLTestRunner( - verbosity=2, failfast=self.failfast, output=self.xunit_output - ) - else: - runner = PymongoTestRunner(verbosity=2, failfast=self.failfast) - result = runner.run(suite) - sys.exit(not result.wasSuccessful()) - - -class doc(Command): - - description = "generate or test documentation" - - user_options = [("test", "t", "run doctests instead of generating documentation")] - - boolean_options = ["test"] - - def initialize_options(self): - self.test = False - - def finalize_options(self): - pass - - def run(self): - - if not _HAVE_SPHINX: - raise RuntimeError("You must install Sphinx to build or test the documentation.") - - if self.test: - path = os.path.join(os.path.abspath("."), "doc", "_build", "doctest") - mode = "doctest" - else: - path = os.path.join(os.path.abspath("."), "doc", "_build", version) - mode = "html" - - try: - os.makedirs(path) - except BaseException: - pass - - sphinx_args = ["-E", "-b", mode, "doc", path] - - # sphinx.main calls sys.exit when sphinx.build_main exists. - # Call build_main directly so we can check status and print - # the full path to the built docs. - if hasattr(sphinx, "build_main"): - status = sphinx.build_main(sphinx_args) - else: - status = sphinx.main(sphinx_args) - - if status: - raise RuntimeError("documentation step '%s' failed" % (mode,)) - - sys.stdout.write( - "\nDocumentation step '%s' performed, results here:\n %s/\n" % (mode, path) - ) - - -class custom_build_ext(build_ext): - """Allow C extension building to fail. - - The C extension speeds up BSON encoding, but is not essential. - """ - - warning_message = """ -******************************************************************** -WARNING: %s could not -be compiled. No C extensions are essential for PyMongo to run, -although they do result in significant speed improvements. -%s - -Please see the installation docs for solutions to build issues: - -https://pymongo.readthedocs.io/en/stable/installation.html - -Here are some hints for popular operating systems: - -If you are seeing this message on Linux you probably need to -install GCC and/or the Python development package for your -version of Python. - -Debian and Ubuntu users should issue the following command: - - $ sudo apt-get install build-essential python-dev - -Users of Red Hat based distributions (RHEL, CentOS, Amazon Linux, -Oracle Linux, Fedora, etc.) should issue the following command: - - $ sudo yum install gcc python-devel - -If you are seeing this message on Microsoft Windows please install -PyMongo using pip. Modern versions of pip will install PyMongo -from binary wheels available on pypi. If you must install from -source read the documentation here: - -https://pymongo.readthedocs.io/en/stable/installation.html#installing-from-source-on-windows - -If you are seeing this message on macOS / OSX please install PyMongo -using pip. Modern versions of pip will install PyMongo from binary -wheels available on pypi. If wheels are not available for your version -of macOS / OSX, or you must install from source read the documentation -here: - -https://pymongo.readthedocs.io/en/stable/installation.html#osx -******************************************************************** -""" - - def run(self): - try: - build_ext.run(self) - except Exception: - e = sys.exc_info()[1] - sys.stdout.write("%s\n" % str(e)) - warnings.warn( - self.warning_message - % ( - "Extension modules", - "There was an issue with your platform configuration - see above.", - ) - ) - - def build_extension(self, ext): - name = ext.name - try: - build_ext.build_extension(self, ext) - except Exception: - e = sys.exc_info()[1] - sys.stdout.write("%s\n" % str(e)) - warnings.warn( - self.warning_message - % ( - "The %s extension module" % (name,), - "The output above this warning shows how the compilation failed.", - ) - ) - - -ext_modules = [ - Extension( - "bson._cbson", - include_dirs=["bson"], - sources=["bson/_cbsonmodule.c", "bson/time64.c", "bson/buffer.c"], - ), - Extension( - "pymongo._cmessage", - include_dirs=["bson"], - sources=["pymongo/_cmessagemodule.c", "bson/buffer.c"], - ), -] - -# PyOpenSSL 17.0.0 introduced support for OCSP. 17.1.0 introduced -# a related feature we need. 17.2.0 fixes a bug -# in set_default_verify_paths we should really avoid. -# service_identity 18.1.0 introduced support for IP addr matching. -pyopenssl_reqs = ["pyopenssl>=17.2.0", "requests<3.0.0", "service_identity>=18.1.0"] -if sys.platform in ("win32", "darwin"): - # Fallback to certifi on Windows if we can't load CA certs from the system - # store and just use certifi on macOS. - # https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_default_verify_paths - pyopenssl_reqs.append("certifi") - -extras_require = { - "encryption": ["pymongocrypt>=1.3.0,<2.0.0"], - "ocsp": pyopenssl_reqs, - "snappy": ["python-snappy"], - "zstd": ["zstandard"], - "aws": ["pymongo-auth-aws<2.0.0"], - "srv": ["dnspython>=1.16.0,<3.0.0"], -} - -# GSSAPI extras -if sys.platform == "win32": - extras_require["gssapi"] = ["winkerberos>=0.5.0"] -else: - extras_require["gssapi"] = ["pykerberos"] - -extra_opts = {} - -if "--no_ext" in sys.argv: - sys.argv.remove("--no_ext") -elif sys.platform.startswith("java") or sys.platform == "cli" or "PyPy" in sys.version: - sys.stdout.write( - """ -*****************************************************\n -The optional C extensions are currently not supported\n -by this python implementation.\n -*****************************************************\n -""" - ) -else: - extra_opts["ext_modules"] = ext_modules - -setup( - name="pymongo", - version=version, - description="Python driver for MongoDB ", - long_description=readme_content, - author="The MongoDB Python Team", - url="http://github.com/mongodb/mongo-python-driver", - keywords=["mongo", "mongodb", "pymongo", "gridfs", "bson"], - install_requires=[], - license="Apache License, Version 2.0", - python_requires=">=3.7", - classifiers=[ - "Development Status :: 5 - Production/Stable", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Operating System :: MacOS :: MacOS X", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", - "Topic :: Database", - "Typing :: Typed", - ], - cmdclass={"build_ext": custom_build_ext, "doc": doc, "test": test}, - extras_require=extras_require, - packages=["bson", "pymongo", "gridfs"], - package_data={ - "bson": ["py.typed", "*.pyi"], - "pymongo": ["py.typed", "*.pyi"], - "gridfs": ["py.typed", "*.pyi"], - }, - **extra_opts +msg = ( + "PyMongo>=4.8 no longer supports building via setup.py, use python -m pip install instead. If " + "this is an editable install (-e) please upgrade to pip>=21.3 first: python -m pip install --upgrade pip" ) + +raise RuntimeError(msg) diff --git a/strict_pyrightconfig.json b/strict_pyrightconfig.json new file mode 100644 index 0000000000..9684598cd9 --- /dev/null +++ b/strict_pyrightconfig.json @@ -0,0 +1 @@ +{"strict": ["tests/test_typing_strict.py"]} \ No newline at end of file diff --git a/test/__init__.py b/test/__init__.py index 4ecc3c9e9e..1ee2c283d6 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -12,245 +12,86 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test suite for pymongo, bson, and gridfs. -""" +"""Synchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations -import base64 +import asyncio import gc +import inspect +import logging +import multiprocessing import os +import signal import socket +import subprocess import sys import threading +import time import traceback import unittest import warnings +from inspect import iscoroutinefunction -try: - from xmlrunner import XMLTestRunner - - HAVE_XML = True -# ValueError is raised when version 3+ is installed on Jython 2.7. -except (ImportError, ValueError): - HAVE_XML = False +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import AutoReconnect +from pymongo.synchronous.uri_parser import parse_uri try: - import ipaddress # noqa + import ipaddress HAVE_IPADDRESS = True except ImportError: HAVE_IPADDRESS = False - from contextlib import contextmanager -from functools import wraps -from test.version import Version -from typing import Dict, no_type_check +from functools import partial, wraps +from typing import Any, Callable, Dict, Generator, overload from unittest import SkipTest from urllib.parse import quote_plus import pymongo import pymongo.errors from bson.son import SON -from pymongo import common, message from pymongo.common import partition_node -from pymongo.database import Database from pymongo.hello import HelloCompat -from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi -from pymongo.ssl_support import HAVE_SSL, _ssl -from pymongo.uri_parser import parse_uri - -if HAVE_SSL: - import ssl - -try: - # Enable the fault handler to dump the traceback of each running thread - # after a segfault. - import faulthandler - - faulthandler.enable() -except ImportError: - pass - -# Enable debug output for uncollectable objects. PyPy does not have set_debug. -if hasattr(gc, "set_debug"): - gc.set_debug( - gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) - ) - -# The host and port of a single mongod or mongos, or the seed host -# for a replica set. -host = os.environ.get("DB_IP", "localhost") -port = int(os.environ.get("DB_PORT", 27017)) - -db_user = os.environ.get("DB_USER", "user") -db_pwd = os.environ.get("DB_PASSWORD", "password") - -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") -CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) -CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) - -TLS_OPTIONS: Dict = dict(tls=True) -if CLIENT_PEM: - TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM -if CA_PEM: - TLS_OPTIONS["tlsCAFile"] = CA_PEM - -COMPRESSORS = os.environ.get("COMPRESSORS") -MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") -TEST_LOADBALANCER = bool(os.environ.get("TEST_LOADBALANCER")) -TEST_SERVERLESS = bool(os.environ.get("TEST_SERVERLESS")) -SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") -MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") -if TEST_LOADBALANCER: - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd -elif TEST_SERVERLESS: - TEST_LOADBALANCER = True - res = parse_uri(SINGLE_MONGOS_LB_URI or "") - host, port = res["nodelist"][0] - db_user = res["username"] or db_user - db_pwd = res["password"] or db_pwd - TLS_OPTIONS = {"tls": True} - # Spec says serverless tests must be run with compression. - COMPRESSORS = COMPRESSORS or "zlib" - - -# Shared KMS data. -LOCAL_MASTER_KEY = base64.b64decode( - b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" - b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test.helpers import client_knobs, global_knobs +from test.helpers_shared import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TLS_OPTIONS, + SystemCertsPatcher, + db_pwd, + db_user, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, ) -AWS_CREDS = { - "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), - "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), -} -AZURE_CREDS = { - "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), - "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), - "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), -} -GCP_CREDS = { - "email": os.environ.get("FLE_GCP_EMAIL", ""), - "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), -} -KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} - - -def is_server_resolvable(): - """Returns True if 'server' is resolvable.""" - socket_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(1) - try: - try: - socket.gethostbyname("server") - return True - except socket.error: - return False - finally: - socket.setdefaulttimeout(socket_timeout) - - -def _create_user(authdb, user, pwd=None, roles=None, **kwargs): - cmd = SON([("createUser", user)]) - # X509 doesn't use a password - if pwd: - cmd["pwd"] = pwd - cmd["roles"] = roles or ["root"] - cmd.update(**kwargs) - return authdb.command(cmd) - - -class client_knobs(object): - def __init__( - self, - heartbeat_frequency=None, - min_heartbeat_interval=None, - kill_cursor_frequency=None, - events_queue_frequency=None, - ): - self.heartbeat_frequency = heartbeat_frequency - self.min_heartbeat_interval = min_heartbeat_interval - self.kill_cursor_frequency = kill_cursor_frequency - self.events_queue_frequency = events_queue_frequency - - self.old_heartbeat_frequency = None - self.old_min_heartbeat_interval = None - self.old_kill_cursor_frequency = None - self.old_events_queue_frequency = None - self._enabled = False - self._stack = None - - def enable(self): - self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY - self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL - self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY - self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY - - if self.heartbeat_frequency is not None: - common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency - - if self.min_heartbeat_interval is not None: - common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval - - if self.kill_cursor_frequency is not None: - common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency - - if self.events_queue_frequency is not None: - common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency - self._enabled = True - # Store the allocation traceback to catch non-disabled client_knobs. - self._stack = "".join(traceback.format_stack()) - - def __enter__(self): - self.enable() - - @no_type_check - def disable(self): - common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency - common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval - common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency - common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency - self._enabled = False - - def __exit__(self, exc_type, exc_val, exc_tb): - self.disable() - - def __call__(self, func): - def make_wrapper(f): - @wraps(f) - def wrap(*args, **kwargs): - with self: - return f(*args, **kwargs) - - return wrap +from test.version import Version - return make_wrapper(func) - - def __del__(self): - if self._enabled: - msg = ( - "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY=%s, " - "MIN_HEARTBEAT_INTERVAL=%s, KILL_CURSOR_FREQUENCY=%s, " - "EVENTS_QUEUE_FREQUENCY=%s, stack:\n%s" - % ( - common.HEARTBEAT_FREQUENCY, - common.MIN_HEARTBEAT_INTERVAL, - common.KILL_CURSOR_FREQUENCY, - common.EVENTS_QUEUE_FREQUENCY, - self._stack, - ) - ) - self.disable() - raise Exception(msg) +_IS_SYNC = True -def _all_users(db): - return set(u["user"] for u in db.command("usersInfo").get("users", [])) +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" -class ClientContext(object): +class ClientContext: client: MongoClient MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI @@ -280,10 +121,9 @@ def __init__(self): self.sessions_enabled = False self.client = None # type: ignore self.conn_lock = threading.Lock() - self.is_data_lake = False self.load_balancer = TEST_LOADBALANCER - self.serverless = TEST_SERVERLESS - if self.load_balancer or self.serverless: + self._fips_enabled = None + if self.load_balancer: self.default_client_options["loadBalanced"] = True if COMPRESSORS: self.default_client_options["compressors"] = COMPRESSORS @@ -295,6 +135,8 @@ def __init__(self): def client_options(self): """Return the MongoClient options for creating a duplicate client.""" opts = client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port if client_context.auth_enabled: opts["username"] = db_user opts["password"] = db_pwd @@ -317,49 +159,46 @@ def uri(self): auth_part = "" if client_context.auth_enabled: auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" - return f"mongodb://{auth_part}{self.pair}/?{opts_part}" + pair = self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" @property def hello(self): if not self._hello: - self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) + if self.load_balancer: + self._hello = self.client.admin.command(HelloCompat.CMD) + else: + self._hello = self.client.admin.command(HelloCompat.LEGACY_CMD) return self._hello def _connect(self, host, port, **kwargs): kwargs.update(self.default_client_options) - client = pymongo.MongoClient(host, port, serverSelectionTimeoutMS=5000, **kwargs) + client: MongoClient = pymongo.MongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) try: try: - client.admin.command(HelloCompat.LEGACY_CMD) # Can we connect? + client.admin.command("ping") # Can we connect? except pymongo.errors.OperationFailure as exc: # SERVER-32063 self.connection_attempts.append( - "connected client %r, but legacy hello failed: %s" % (client, exc) + f"connected client {client!r}, but legacy hello failed: {exc}" ) else: - self.connection_attempts.append("successfully connected client %r" % (client,)) + self.connection_attempts.append(f"successfully connected client {client!r}") # If connected, then return client with default timeout return pymongo.MongoClient(host, port, **kwargs) except pymongo.errors.ConnectionFailure as exc: - self.connection_attempts.append("failed to connect client %r: %s" % (client, exc)) + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") return None finally: client.close() def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] self.client = self._connect(host, port) - if self.client is not None: - # Return early when connected to dataLake as mongohoused does not - # support the getCmdLineOpts command and is tested without TLS. - build_info = self.client.admin.command("buildInfo") - if "dataLake" in build_info: - self.is_data_lake = True - self.auth_enabled = True - self.client = self._connect(host, port, username=db_user, password=db_pwd) - self.connected = True - return - if HAVE_SSL and not self.client: # Is MongoDB configured for SSL? self.client = self._connect(host, port, **TLS_OPTIONS) @@ -371,28 +210,28 @@ def _init_client(self): if self.client: self.connected = True - if self.serverless: - self.auth_enabled = True - else: - try: - self.cmd_line = self.client.admin.command("getCmdLineOpts") - except pymongo.errors.OperationFailure as e: - assert e.details is not None - msg = e.details.get("errmsg", "") - if e.code == 13 or "unauthorized" in msg or "login" in msg: - # Unauthorized. - self.auth_enabled = True - else: - raise + try: + self.cmd_line = self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True else: - self.auth_enabled = self._server_started_with_auth() + raise + else: + self.auth_enabled = self._server_started_with_auth() if self.auth_enabled: - if not self.serverless: + if not IS_SRV: # See if db_user already exists. if not self._check_user_provided(): _create_user(self.client.admin, db_user, db_pwd) + if self.client: + self.client.close() + self.client = self._connect( host, port, @@ -405,13 +244,10 @@ def _init_client(self): # May not have this if OperationFailure was raised earlier. self.cmd_line = self.client.admin.command("getCmdLineOpts") - if self.serverless: - self.server_status = {} - else: - self.server_status = self.client.admin.command("serverStatus") - if self.storage_engine == "mmapv1": - # MMAPv1 does not support retryWrites=True. - self.default_client_options["retryWrites"] = False + self.server_status = self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False hello = self.hello self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello @@ -419,6 +255,8 @@ def _init_client(self): if "setName" in hello: self.replica_set_name = str(hello["setName"]) self.is_rs = True + if self.client: + self.client.close() if self.auth_enabled: # It doesn't matter which member we use as the seed here. self.client = pymongo.MongoClient( @@ -442,45 +280,37 @@ def _init_client(self): nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) self.nodes = set(nodes) else: - self.nodes = set([(host, port)]) + self.nodes = {(host, port)} self.w = len(hello.get("hosts", [])) or 1 self.version = Version.from_client(self.client) - if self.serverless: - self.server_parameters = { - "requireApiVersion": False, - "enableTestCommands": True, - } + self.server_parameters = self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: self.test_commands_enabled = True - self.has_ipv6 = False - else: - self.server_parameters = self.client.admin.command("getParameter", "*") - assert self.cmd_line is not None - if "enableTestCommands=1" in self.cmd_line["argv"]: + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: self.test_commands_enabled = True - elif "parsed" in self.cmd_line: - params = self.cmd_line["parsed"].get("setParameter", []) - if "enableTestCommands=1" in params: + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": self.test_commands_enabled = True - else: - params = self.cmd_line["parsed"].get("setParameter", {}) - if params.get("enableTestCommands") == "1": - self.test_commands_enabled = True - self.has_ipv6 = self._server_started_with_ipv6() + self.has_ipv6 = self._server_started_with_ipv6() - self.is_mongos = self.hello.get("msg") == "isdbgrid" + self.is_mongos = (self.hello).get("msg") == "isdbgrid" if self.is_mongos: address = self.client.address self.mongoses.append(address) - if not self.serverless: - # Check for another mongos on the next port. - assert address is not None - next_address = address[0], address[1] + 1 - mongos_client = self._connect(*next_address, **self.default_client_options) - if mongos_client: - hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) - if hello.get("msg") == "isdbgrid": - self.mongoses.append(next_address) + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + mongos_client.close() def init(self): with self.conn_lock: @@ -492,14 +322,14 @@ def connection_attempt_info(self): @property def host(self): - if self.is_rs: + if self.is_rs and not IS_SRV: primary = self.client.primary return str(primary[0]) if primary is not None else host return host @property def port(self): - if self.is_rs: + if self.is_rs and not IS_SRV: primary = self.client.primary return primary[1] if primary is not None else port return port @@ -517,19 +347,37 @@ def has_secondaries(self): @property def storage_engine(self): try: - return self.server_status.get("storageEngine", {}).get("name") + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) except AttributeError: # Raised if self.server_status is None. return None + @property + def fips_enabled(self): + if self._fips_enabled is not None: + return self._fips_enabled + try: + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) + self._fips_enabled = True + except (subprocess.SubprocessError, FileNotFoundError): + self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") + return self._fips_enabled + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + def _check_user_provided(self): """Return True if db_user/db_password is already an admin user.""" - client = pymongo.MongoClient( + client: MongoClient = pymongo.MongoClient( host, port, username=db_user, password=db_pwd, - serverSelectionTimeoutMS=100, **self.default_client_options, ) @@ -543,6 +391,8 @@ def _check_user_provided(self): return False else: raise + finally: + client.close() def _server_started_with_auth(self): # MongoDB >= 2.0 @@ -579,22 +429,41 @@ def _server_started_with_ipv6(self): for info in socket.getaddrinfo(self.host, self.port): if info[0] == socket.AF_INET6: return True - except socket.error: + except OSError: pass return False def _require(self, condition, msg, func=None): def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + @wraps(f) def wrap(*args, **kwargs): self.init() # Always raise SkipTest if we can't connect to MongoDB if not self.connected: - raise SkipTest("Cannot connect to MongoDB on %s" % (self.pair,)) - if condition(): - return f(*args, **kwargs) - raise SkipTest(msg) + pair = self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition): + if condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) return wrap @@ -617,30 +486,10 @@ def require_connection(self, func): """Run a test only if we can connect to MongoDB.""" return self._require( lambda: True, # _require checks if we're connected - "Cannot connect to MongoDB on %s" % (self.pair,), - func=func, - ) - - def require_data_lake(self, func): - """Run a test only if we are connected to Atlas Data Lake.""" - return self._require( - lambda: self.is_data_lake, - "Not connected to Atlas Data Lake on %s" % (self.pair,), + "Cannot connect to MongoDB on self.pair", func=func, ) - def require_no_mmap(self, func): - """Run a test only if the server is not using the MMAPv1 storage - engine. Only works for standalone and replica sets; tests are - run regardless of storage engine on sharded clusters.""" - - def is_not_mmap(): - if self.is_mongos: - return True - return self.storage_engine != "mmapv1" - - return self._require(is_not_mmap, "Storage engine must not be MMAPv1", func=func) - def require_version_min(self, *ver): """Run a test only if the server version is at least ``version``.""" other_version = Version(*ver) @@ -657,6 +506,32 @@ def require_version_max(self, *ver): "Server version must be at most %s" % str(other_version), ) + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + def require_auth(self, func): """Run a test only if the server is running with auth enabled.""" return self._require( @@ -671,6 +546,12 @@ def require_no_auth(self, func): func=func, ) + def require_no_fips(self, func): + """Run a test only if the host does not have FIPS enabled.""" + return self._require( + lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func + ) + def require_replica_set(self, func): """Run a test only if the client is connected to a replica set.""" return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) @@ -683,14 +564,17 @@ def require_secondaries_count(self, count): def sec_count(): return 0 if not self.client else len(self.client.secondaries) - return self._require(lambda: sec_count() >= count, "Not enough secondaries available") + def check(): + return sec_count() >= count + + return self._require(check, "Not enough secondaries available") @property def supports_secondary_read_pref(self): if self.has_secondaries: return True if self.is_mongos: - shard = self.client.config.shards.find_one()["host"] + shard = (self.client.config.shards.find_one())["host"] # type:ignore[index] num_members = shard.count(",") + 1 return num_members > 1 return False @@ -726,7 +610,8 @@ def require_mongos(self, func): def require_multiple_mongoses(self, func): """Run a test only if the client is connected to a sharded cluster - that has 2 mongos nodes.""" + that has 2 mongos nodes. + """ return self._require( lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func ) @@ -759,16 +644,19 @@ def require_no_load_balancer(self, func): lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func ) + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_standalone(func) + def is_topology_type(self, topologies): unknown = set(topologies) - { "single", "replicaset", "sharded", - "sharded-replicaset", "load-balanced", } if unknown: - raise AssertionError("Unknown topologies: %r" % (unknown,)) + raise AssertionError(f"Unknown topologies: {unknown!r}") if self.load_balancer: if "load-balanced" in topologies: return True @@ -779,22 +667,14 @@ def is_topology_type(self, topologies): return True if "sharded" in topologies and self.is_mongos: return True - if "sharded-replicaset" in topologies and self.is_mongos: - shards = list(client_context.client.config.shards.find()) - for shard in shards: - # For a 3-member RS-backed sharded cluster, shard['host'] - # will be 'replicaName/ip1:port1,ip2:port2,ip3:port3' - # Otherwise it will be 'ip1:port1' - host_spec = shard["host"] - if not len(host_spec.split("/")) > 1: - return False - return True return False - def require_cluster_type(self, topologies=[]): # noqa + def require_cluster_type(self, topologies=None): """Run a test only if the client is connected to a cluster that conforms to one of the specified topologies. Acceptable topologies - are 'single', 'replicaset', and 'sharded'.""" + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] def _is_valid_topology(): return self.is_topology_type(topologies) @@ -809,7 +689,8 @@ def require_test_commands(self, func): def require_failCommand_fail_point(self, func): """Run a test only if the server supports the failCommand fail - point.""" + point. + """ return self._require( lambda: self.supports_failCommand_fail_point, "failCommand fail point must be supported", @@ -818,9 +699,9 @@ def require_failCommand_fail_point(self, func): def require_failCommand_appName(self, func): """Run a test only if the server supports the failCommand appName.""" - # SERVER-47195 + # SERVER-47195 and SERVER-49336. return self._require( - lambda: (self.test_commands_enabled and self.version >= (4, 4, -1)), + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), "failCommand appName must be supported", func=func, ) @@ -868,8 +749,6 @@ def require_sessions(self, func): return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) def supports_retryable_writes(self): - if self.storage_engine == "mmapv1": - return False if not self.sessions_enabled: return False return self.is_mongos or self.is_rs @@ -883,9 +762,6 @@ def require_retryable_writes(self, func): ) def supports_transactions(self): - if self.storage_engine == "mmapv1": - return False - if self.version.at_least(4, 1, 8): return self.is_mongos or self.is_rs @@ -911,8 +787,22 @@ def require_no_api_version(self, func): func=func, ) + def require_sync(self, func): + """Run a test only if using the synchronous API.""" + return self._require( + lambda: _IS_SYNC, "This test only works with the synchronous API", func=func + ) + + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + def mongos_seeds(self): - return ",".join("%s:%s" % address for address in self.mongoses) + return ",".join("{}:{}".format(*address) for address in self.mongoses) @property def supports_failCommand_fail_point(self): @@ -930,85 +820,354 @@ def requires_hint_with_min_max_queries(self): @property def max_bson_size(self): - return self.hello["maxBsonObjectSize"] + return (self.hello)["maxBsonObjectSize"] @property def max_write_batch_size(self): - return self.hello["maxWriteBatchSize"] + return (self.hello)["maxWriteBatchSize"] + + @property + def max_message_size_bytes(self): + return (self.hello)["maxMessageSizeBytes"] # Reusable client context client_context = ClientContext() - -def sanitize_cmd(cmd): - cp = cmd.copy() - cp.pop("$clusterTime", None) - cp.pop("$db", None) - cp.pop("$readPreference", None) - cp.pop("lsid", None) - if MONGODB_API_VERSION: - # Stable API parameters - cp.pop("apiVersion", None) - # OP_MSG encoding may move the payload type one field to the - # end of the command. Do the same here. - name = next(iter(cp)) - try: - identifier = message._FIELD_MAP[name] - docs = cp.pop(identifier) - cp[identifier] = docs - except KeyError: - pass - return cp +# Global event loop for async tests. +LOOP = None -def sanitize_reply(reply): - cp = reply.copy() - cp.pop("$clusterTime", None) - cp.pop("operationTime", None) - return cp +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP class PyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by TestCase. + def setUp(self): + pass + + def tearDown(self): + pass + + def addCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.setUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.tearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) + def assertEqualCommand(self, expected, actual, msg=None): self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) def assertEqualReply(self, expected, actual, msg=None): self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + @staticmethod + def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + client.admin.command(cmd) + @contextmanager def fail_point(self, command_args): - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - client_context.client.admin.command(cmd_on) + self.configure_fail_point(client_context.client, command_args) try: yield finally: - client_context.client.admin.command( - "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + self.configure_fail_point(client_context.client, command_args, off=True) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + @classmethod + def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + return client + + def _async_mongo_client(self, host, port, authenticate=True, directConnection=None, **kwargs): + """Create a new client over SSL/TLS if necessary.""" + host = host or client_context.host + port = port or client_context.port + client_options: dict = client_context.default_client_options.copy() + if client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = MongoClient(uri, port, **client_options) + if client._options.connect: + client._connect() + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + def unmanaged_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + def unmanaged_rs_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + def unmanaged_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + def unmanaged_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return cls._unmanaged_async_mongo_client(h, p, **kwargs) + + def single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) + + def single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return self._async_mongo_client(h, p, directConnection=True, **kwargs) + + def rs_client_noauth(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return self._async_mongo_client(h, p, **kwargs) + + def rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> MongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return self._async_mongo_client(h, p, authenticate=False, **kwargs) + + def rs_or_single_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + self.addCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client(cls, h: Any = None, p: Any = None, **kwargs: Any) -> MongoClient: + if not h and not p: + client = MongoClient(**kwargs) + else: + client = MongoClient(h, p, **kwargs) + return client + + def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in client.secondaries: + secondary = self.single_client(h, p) + secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + + +class UnitTest(PyMongoTestCase): + """Async base class for TestCases that don't require a connection to MongoDB.""" + + def setUp(self) -> None: + pass + + def tearDown(self) -> None: + pass class IntegrationTest(PyMongoTestCase): - """Base class for TestCases that need a connection to MongoDB to pass.""" + """Async base class for TestCases that need a connection to MongoDB to pass.""" - client: MongoClient + client: MongoClient[dict] db: Database credentials: Dict[str, str] - @classmethod @client_context.require_connection - def setUpClass(cls): - if client_context.load_balancer and not getattr(cls, "RUN_ON_LOAD_BALANCER", False): + def setUp(self) -> None: + if client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): raise SkipTest("this test does not support load balancers") - if client_context.serverless and not getattr(cls, "RUN_ON_SERVERLESS", False): - raise SkipTest("this test does not support serverless") - cls.client = client_context.client - cls.db = cls.client.pymongo_test + self.client = client_context.client + self.db = self.client.pymongo_test if client_context.auth_enabled: - cls.credentials = {"username": db_user, "password": db_pwd} + self.credentials = {"username": db_user, "password": db_pwd} else: - cls.credentials = {} + self.credentials = {} def cleanup_colls(self, *collections): """Cleanup collections faster than drop_collection.""" @@ -1022,7 +1181,7 @@ def patch_system_certs(self, ca_certs): self.addCleanup(patcher.disable) -class MockClientTest(unittest.TestCase): +class MockClientTest(UnitTest): """Base class for TestCases that use MockClient. This class is *not* an IntegrationTest: if properly written, MockClient @@ -1034,62 +1193,60 @@ class MockClientTest(unittest.TestCase): # MockClients tests that use replicaSet, directConnection=True, pass # multiple seed addresses, or wait for heartbeat events are incompatible # with loadBalanced=True. - @classmethod @client_context.require_no_load_balancer - def setUpClass(cls): - pass - - def setUp(self): - super(MockClientTest, self).setUp() + def setUp(self) -> None: + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) - self.client_knobs.enable() - def tearDown(self): + def tearDown(self) -> None: self.client_knobs.disable() - super(MockClientTest, self).tearDown() - - -# Global knobs to speed up the test suite. -global_knobs = client_knobs(events_queue_frequency=0.05) + super().tearDown() def setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() client_context.init() warnings.resetwarnings() warnings.simplefilter("always") global_knobs.enable() -def _get_executors(topology): - executors = [] - for server in topology._servers.values(): - # Some MockMonitor do not have an _executor. - if hasattr(server._monitor, "_executor"): - executors.append(server._monitor._executor) - if hasattr(server._monitor, "_rtt_monitor"): - executors.append(server._monitor._rtt_monitor._executor) - executors.append(topology._Topology__events_executor) - if topology._srv_monitor: - executors.append(topology._srv_monitor._executor) - - return [e for e in executors if e is not None] - - -def print_running_topology(topology): - running = [e for e in _get_executors(topology) if not e._stopped] - if running: - print( - "WARNING: found Topology with running threads:\n" - " Threads: %s\n" - " Topology: %s\n" - " Creation traceback:\n%s" % (running, topology, topology._settings._stack) - ) +def teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + print_running_clients() + + +@contextmanager +def simple_test_client(): + client_context.init() + yield client_context.client + client_context.client.close() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) def print_running_clients(): - from pymongo.topology import Topology + from pymongo.synchronous.topology import Topology processed = set() # Avoid false positives on the main test client. @@ -1112,84 +1269,36 @@ def print_running_clients(): pass -def teardown(): - global_knobs.disable() - garbage = [] - for g in gc.garbage: - garbage.append("GARBAGE: %r" % (g,)) - garbage.append(" gc.get_referents: %r" % (gc.get_referents(g),)) - garbage.append(" gc.get_referrers: %r" % (gc.get_referrers(g),)) - if garbage: - assert False, "\n".join(garbage) - c = client_context.client - if c: - if not client_context.is_data_lake: - c.drop_database("pymongo-pooling-tests") - c.drop_database("pymongo_test") - c.drop_database("pymongo_test1") - c.drop_database("pymongo_test2") - c.drop_database("pymongo_test_mike") - c.drop_database("pymongo_test_bernie") - c.close() +def _all_users(db): + return {u["user"] for u in (db.command("usersInfo")).get("users", [])} - print_running_clients() +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) -class PymongoTestRunner(unittest.TextTestRunner): - def run(self, test): - setup() - result = super(PymongoTestRunner, self).run(test) - teardown() - return result +def connected(client): + """Convenience to wait for a newly-constructed client to connect.""" + with warnings.catch_warnings(): + # Ignore warning that ping is always routed to primary even + # if client's read preference isn't PRIMARY. + warnings.simplefilter("ignore", UserWarning) + client.admin.command("ping") # Force connection. -if HAVE_XML: + return client - class PymongoXMLTestRunner(XMLTestRunner): # type: ignore[misc] - def run(self, test): - setup() - result = super(PymongoXMLTestRunner, self).run(test) - teardown() - return result +def drop_collections(db: Database): + # Drop all non-system collections in this database. + for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): + db.drop_collection(coll) -def test_cases(suite): - """Iterator over all TestCases within a TestSuite.""" - for suite_or_case in suite._tests: - if isinstance(suite_or_case, unittest.TestCase): - # unittest.TestCase - yield suite_or_case - else: - # unittest.TestSuite - for case in test_cases(suite_or_case): - yield case - - -# Helper method to workaround https://bugs.python.org/issue21724 -def clear_warning_registry(): - """Clear the __warningregistry__ for all modules.""" - for _, module in list(sys.modules.items()): - if hasattr(module, "__warningregistry__"): - setattr(module, "__warningregistry__", {}) # noqa - - -class SystemCertsPatcher(object): - def __init__(self, ca_certs): - if ( - ssl.OPENSSL_VERSION.lower().startswith("libressl") - and sys.platform == "darwin" - and not _ssl.IS_PYOPENSSL - ): - raise SkipTest( - "LibreSSL on OSX doesn't support setting CA certificates " - "using SSL_CERT_FILE environment variable." - ) - self.original_certs = os.environ.get("SSL_CERT_FILE") - # Tell OpenSSL where CA certificates live. - os.environ["SSL_CERT_FILE"] = ca_certs - def disable(self): - if self.original_certs is None: - os.environ.pop("SSL_CERT_FILE") - else: - os.environ["SSL_CERT_FILE"] = self.original_certs +def remove_all_users(db: Database): + db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) diff --git a/test/asynchronous/__init__.py b/test/asynchronous/__init__.py new file mode 100644 index 0000000000..78d0576add --- /dev/null +++ b/test/asynchronous/__init__.py @@ -0,0 +1,1320 @@ +# Copyright 2010-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous test suite for pymongo, bson, and gridfs.""" +from __future__ import annotations + +import asyncio +import gc +import inspect +import logging +import multiprocessing +import os +import signal +import socket +import subprocess +import sys +import threading +import time +import traceback +import unittest +import warnings +from inspect import iscoroutinefunction + +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from pymongo.errors import AutoReconnect + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from contextlib import asynccontextmanager, contextmanager +from functools import partial, wraps +from typing import Any, Callable, Dict, Generator, overload +from unittest import SkipTest +from urllib.parse import quote_plus + +import pymongo +import pymongo.errors +from bson.son import SON +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.common import partition_node +from pymongo.hello import HelloCompat +from pymongo.server_api import ServerApi +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] + +sys.path[0:0] = [""] + +from test.asynchronous.helpers import client_knobs, global_knobs +from test.helpers_shared import ( + COMPRESSORS, + IS_SRV, + MONGODB_API_VERSION, + MULTI_MONGOS_LB_URI, + TEST_LOADBALANCER, + TLS_OPTIONS, + SystemCertsPatcher, + db_pwd, + db_user, + host, + is_server_resolvable, + port, + print_running_topology, + print_thread_stacks, + print_thread_tracebacks, + sanitize_cmd, + sanitize_reply, +) +from test.version import Version + +_IS_SYNC = False + + +def _connection_string(h): + if h.startswith(("mongodb://", "mongodb+srv://")): + return h + return f"mongodb://{h!s}" + + +class AsyncClientContext: + client: AsyncMongoClient + + MULTI_MONGOS_LB_URI = MULTI_MONGOS_LB_URI + + def __init__(self): + """Create a client and grab essential information from the server.""" + self.connection_attempts = [] + self.connected = False + self.w = None + self.nodes = set() + self.replica_set_name = None + self.cmd_line = None + self.server_status = None + self.version = Version(-1) # Needs to be comparable with Version + self.auth_enabled = False + self.test_commands_enabled = False + self.server_parameters = {} + self._hello = None + self.is_mongos = False + self.mongoses = [] + self.is_rs = False + self.has_ipv6 = False + self.tls = False + self.tlsCertificateKeyFile = False + self.server_is_resolvable = is_server_resolvable() + self.default_client_options: Dict = {} + self.sessions_enabled = False + self.client = None # type: ignore + self.conn_lock = threading.Lock() + self.load_balancer = TEST_LOADBALANCER + self._fips_enabled = None + if self.load_balancer: + self.default_client_options["loadBalanced"] = True + if COMPRESSORS: + self.default_client_options["compressors"] = COMPRESSORS + if MONGODB_API_VERSION: + server_api = ServerApi(MONGODB_API_VERSION) + self.default_client_options["server_api"] = server_api + + @property + def client_options(self): + """Return the MongoClient options for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts["host"] = host + opts["port"] = port + if async_client_context.auth_enabled: + opts["username"] = db_user + opts["password"] = db_pwd + if self.replica_set_name: + opts["replicaSet"] = self.replica_set_name + return opts + + @property + async def uri(self): + """Return the MongoClient URI for creating a duplicate client.""" + opts = async_client_context.default_client_options.copy() + opts.pop("server_api", None) # Cannot be set from the URI + opts_parts = [] + for opt, val in opts.items(): + strval = str(val) + if isinstance(val, bool): + strval = strval.lower() + opts_parts.append(f"{opt}={quote_plus(strval)}") + opts_part = "&".join(opts_parts) + auth_part = "" + if async_client_context.auth_enabled: + auth_part = f"{quote_plus(db_user)}:{quote_plus(db_pwd)}@" + pair = await self.pair + return f"mongodb://{auth_part}{pair}/?{opts_part}" + + @property + async def hello(self): + if not self._hello: + if self.load_balancer: + self._hello = await self.client.admin.command(HelloCompat.CMD) + else: + self._hello = await self.client.admin.command(HelloCompat.LEGACY_CMD) + return self._hello + + async def _connect(self, host, port, **kwargs): + kwargs.update(self.default_client_options) + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, port, serverSelectionTimeoutMS=5000, **kwargs + ) + try: + try: + await client.admin.command("ping") # Can we connect? + except pymongo.errors.OperationFailure as exc: + # SERVER-32063 + self.connection_attempts.append( + f"connected client {client!r}, but legacy hello failed: {exc}" + ) + else: + self.connection_attempts.append(f"successfully connected client {client!r}") + # If connected, then return client with default timeout + return pymongo.AsyncMongoClient(host, port, **kwargs) + except pymongo.errors.ConnectionFailure as exc: + self.connection_attempts.append(f"failed to connect client {client!r}: {exc}") + return None + finally: + await client.close() + + async def _init_client(self): + self.mongoses = [] + self.connection_attempts = [] + self.client = await self._connect(host, port) + + if HAVE_SSL and not self.client: + # Is MongoDB configured for SSL? + self.client = await self._connect(host, port, **TLS_OPTIONS) + if self.client: + self.tls = True + self.default_client_options.update(TLS_OPTIONS) + self.tlsCertificateKeyFile = True + + if self.client: + self.connected = True + + try: + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 13 or "unauthorized" in msg or "login" in msg: + # Unauthorized. + self.auth_enabled = True + else: + raise + else: + self.auth_enabled = self._server_started_with_auth() + + if self.auth_enabled: + if not IS_SRV: + # See if db_user already exists. + if not await self._check_user_provided(): + await _create_user(self.client.admin, db_user, db_pwd) + + if self.client: + await self.client.close() + + self.client = await self._connect( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + + # May not have this if OperationFailure was raised earlier. + self.cmd_line = await self.client.admin.command("getCmdLineOpts") + + self.server_status = await self.client.admin.command("serverStatus") + if self.storage_engine == "mmapv1": + # MMAPv1 does not support retryWrites=True. + self.default_client_options["retryWrites"] = False + + hello = await self.hello + self.sessions_enabled = "logicalSessionTimeoutMinutes" in hello + + if "setName" in hello: + self.replica_set_name = str(hello["setName"]) + self.is_rs = True + if self.client: + await self.client.close() + if self.auth_enabled: + # It doesn't matter which member we use as the seed here. + self.client = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + replicaSet=self.replica_set_name, + **self.default_client_options, + ) + else: + self.client = pymongo.AsyncMongoClient( + host, port, replicaSet=self.replica_set_name, **self.default_client_options + ) + + # Get the authoritative hello result from the primary. + self._hello = None + hello = await self.hello + nodes = [partition_node(node.lower()) for node in hello.get("hosts", [])] + nodes.extend([partition_node(node.lower()) for node in hello.get("passives", [])]) + nodes.extend([partition_node(node.lower()) for node in hello.get("arbiters", [])]) + self.nodes = set(nodes) + else: + self.nodes = {(host, port)} + self.w = len(hello.get("hosts", [])) or 1 + self.version = await Version.async_from_client(self.client) + + self.server_parameters = await self.client.admin.command("getParameter", "*") + assert self.cmd_line is not None + if self.server_parameters["enableTestCommands"]: + self.test_commands_enabled = True + elif "parsed" in self.cmd_line: + params = self.cmd_line["parsed"].get("setParameter", []) + if "enableTestCommands=1" in params: + self.test_commands_enabled = True + else: + params = self.cmd_line["parsed"].get("setParameter", {}) + if params.get("enableTestCommands") == "1": + self.test_commands_enabled = True + self.has_ipv6 = await self._server_started_with_ipv6() + + self.is_mongos = (await self.hello).get("msg") == "isdbgrid" + if self.is_mongos: + address = await self.client.address + self.mongoses.append(address) + # Check for another mongos on the next port. + assert address is not None + next_address = address[0], address[1] + 1 + mongos_client = await self._connect(*next_address, **self.default_client_options) + if mongos_client: + hello = await mongos_client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") == "isdbgrid": + self.mongoses.append(next_address) + await mongos_client.close() + + async def init(self): + with self.conn_lock: + if not self.client and not self.connection_attempts: + await self._init_client() + + def connection_attempt_info(self): + return "\n".join(self.connection_attempts) + + @property + async def host(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return str(primary[0]) if primary is not None else host + return host + + @property + async def port(self): + if self.is_rs and not IS_SRV: + primary = await self.client.primary + return primary[1] if primary is not None else port + return port + + @property + async def pair(self): + return "%s:%d" % (await self.host, await self.port) + + @property + async def has_secondaries(self): + if not self.client: + return False + return bool(len(await self.client.secondaries)) + + @property + def storage_engine(self): + try: + return self.server_status.get("storageEngine", {}).get( # type:ignore[union-attr] + "name" + ) + except AttributeError: + # Raised if self.server_status is None. + return None + + @property + def fips_enabled(self): + if self._fips_enabled is not None: + return self._fips_enabled + try: + subprocess.run(["fips-mode-setup", "--is-enabled"], check=True) + self._fips_enabled = True + except (subprocess.SubprocessError, FileNotFoundError): + self._fips_enabled = False + if os.environ.get("REQUIRE_FIPS") and not self._fips_enabled: + raise RuntimeError("Expected FIPS to be enabled") + return self._fips_enabled + + def check_auth_type(self, auth_type): + auth_mechs = self.server_parameters.get("authenticationMechanisms", []) + return auth_type in auth_mechs + + async def _check_user_provided(self): + """Return True if db_user/db_password is already an admin user.""" + client: AsyncMongoClient = pymongo.AsyncMongoClient( + host, + port, + username=db_user, + password=db_pwd, + **self.default_client_options, + ) + + try: + return db_user in await _all_users(client.admin) + except pymongo.errors.OperationFailure as e: + assert e.details is not None + msg = e.details.get("errmsg", "") + if e.code == 18 or "auth fails" in msg: + # Auth failed. + return False + else: + raise + finally: + await client.close() + + def _server_started_with_auth(self): + # MongoDB >= 2.0 + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + parsed = self.cmd_line["parsed"] + # MongoDB >= 2.6 + if "security" in parsed: + security = parsed["security"] + # >= rc3 + if "authorization" in security: + return security["authorization"] == "enabled" + # < rc3 + return security.get("auth", False) or bool(security.get("keyFile")) + return parsed.get("auth", False) or bool(parsed.get("keyFile")) + # Legacy + argv = self.cmd_line["argv"] + return "--auth" in argv or "--keyFile" in argv + + async def _server_started_with_ipv6(self): + if not socket.has_ipv6: + return False + + assert self.cmd_line is not None + if "parsed" in self.cmd_line: + if not self.cmd_line["parsed"].get("net", {}).get("ipv6"): + return False + else: + if "--ipv6" not in self.cmd_line["argv"]: + return False + + # The server was started with --ipv6. Is there an IPv6 route to it? + try: + for info in socket.getaddrinfo(await self.host, await self.port): + if info[0] == socket.AF_INET6: + return True + except OSError: + pass + + return False + + def _require(self, condition, msg, func=None): + def make_wrapper(f): + if iscoroutinefunction(f): + wraps_async = True + else: + wraps_async = False + + @wraps(f) + async def wrap(*args, **kwargs): + await self.init() + # Always raise SkipTest if we can't connect to MongoDB + if not self.connected: + pair = await self.pair + raise SkipTest(f"Cannot connect to MongoDB on {pair}") + if iscoroutinefunction(condition): + if await condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + elif condition(): + if wraps_async: + return await f(*args, **kwargs) + else: + return f(*args, **kwargs) + if "self.pair" in msg: + new_msg = msg.replace("self.pair", await self.pair) + else: + new_msg = msg + raise SkipTest(new_msg) + + return wrap + + if func is None: + + def decorate(f): + return make_wrapper(f) + + return decorate + return make_wrapper(func) + + async def create_user(self, dbname, user, pwd=None, roles=None, **kwargs): + kwargs["writeConcern"] = {"w": self.w} + return await _create_user(self.client[dbname], user, pwd, roles, **kwargs) + + async def drop_user(self, dbname, user): + await self.client[dbname].command("dropUser", user, writeConcern={"w": self.w}) + + def require_connection(self, func): + """Run a test only if we can connect to MongoDB.""" + return self._require( + lambda: True, # _require checks if we're connected + "Cannot connect to MongoDB on self.pair", + func=func, + ) + + def require_version_min(self, *ver): + """Run a test only if the server version is at least ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version >= other_version, + "Server version must be at least %s" % str(other_version), + ) + + def require_version_max(self, *ver): + """Run a test only if the server version is at most ``version``.""" + other_version = Version(*ver) + return self._require( + lambda: self.version <= other_version, + "Server version must be at most %s" % str(other_version), + ) + + def require_libmongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import libmongocrypt_version + + version = Version.from_string(libmongocrypt_version()) + return self._require( + lambda: version >= other_version, + "Libmongocrypt version must be at least %s" % str(other_version), + ) + + def require_pymongocrypt_min(self, *ver): + other_version = Version(*ver) + if not _HAVE_PYMONGOCRYPT: + version = Version.from_string("0.0.0") + else: + from pymongocrypt import __version__ as pymongocrypt_version + + version = Version.from_string(pymongocrypt_version) + return self._require( + lambda: version >= other_version, + "PyMongoCrypt version must be at least %s" % str(other_version), + ) + + def require_auth(self, func): + """Run a test only if the server is running with auth enabled.""" + return self._require( + lambda: self.auth_enabled, "Authentication is not enabled on the server", func=func + ) + + def require_no_auth(self, func): + """Run a test only if the server is running without auth enabled.""" + return self._require( + lambda: not self.auth_enabled, + "Authentication must not be enabled on the server", + func=func, + ) + + def require_no_fips(self, func): + """Run a test only if the host does not have FIPS enabled.""" + return self._require( + lambda: not self.fips_enabled, "Test cannot run on a FIPS-enabled host", func=func + ) + + def require_replica_set(self, func): + """Run a test only if the client is connected to a replica set.""" + return self._require(lambda: self.is_rs, "Not connected to a replica set", func=func) + + def require_secondaries_count(self, count): + """Run a test only if the client is connected to a replica set that has + `count` secondaries. + """ + + async def sec_count(): + return 0 if not self.client else len(await self.client.secondaries) + + async def check(): + return await sec_count() >= count + + return self._require(check, "Not enough secondaries available") + + @property + async def supports_secondary_read_pref(self): + if await self.has_secondaries: + return True + if self.is_mongos: + shard = (await self.client.config.shards.find_one())["host"] # type:ignore[index] + num_members = shard.count(",") + 1 + return num_members > 1 + return False + + def require_secondary_read_pref(self): + """Run a test only if the client is connected to a cluster that + supports secondary read preference + """ + return self._require( + lambda: self.supports_secondary_read_pref, + "This cluster does not support secondary read preference", + ) + + def require_no_replica_set(self, func): + """Run a test if the client is *not* connected to a replica set.""" + return self._require( + lambda: not self.is_rs, "Connected to a replica set, not a standalone mongod", func=func + ) + + def require_ipv6(self, func): + """Run a test only if the client can connect to a server via IPv6.""" + return self._require(lambda: self.has_ipv6, "No IPv6", func=func) + + def require_no_mongos(self, func): + """Run a test only if the client is not connected to a mongos.""" + return self._require( + lambda: not self.is_mongos, "Must be connected to a mongod, not a mongos", func=func + ) + + def require_mongos(self, func): + """Run a test only if the client is connected to a mongos.""" + return self._require(lambda: self.is_mongos, "Must be connected to a mongos", func=func) + + def require_multiple_mongoses(self, func): + """Run a test only if the client is connected to a sharded cluster + that has 2 mongos nodes. + """ + return self._require( + lambda: len(self.mongoses) > 1, "Must have multiple mongoses available", func=func + ) + + def require_standalone(self, func): + """Run a test only if the client is connected to a standalone.""" + return self._require( + lambda: not (self.is_mongos or self.is_rs), + "Must be connected to a standalone", + func=func, + ) + + def require_no_standalone(self, func): + """Run a test only if the client is not connected to a standalone.""" + return self._require( + lambda: self.is_mongos or self.is_rs, + "Must be connected to a replica set or mongos", + func=func, + ) + + def require_load_balancer(self, func): + """Run a test only if the client is connected to a load balancer.""" + return self._require( + lambda: self.load_balancer, "Must be connected to a load balancer", func=func + ) + + def require_no_load_balancer(self, func): + """Run a test only if the client is not connected to a load balancer.""" + return self._require( + lambda: not self.load_balancer, "Must not be connected to a load balancer", func=func + ) + + def require_change_streams(self, func): + """Run a test only if the server supports change streams.""" + return self.require_no_standalone(func) + + async def is_topology_type(self, topologies): + unknown = set(topologies) - { + "single", + "replicaset", + "sharded", + "load-balanced", + } + if unknown: + raise AssertionError(f"Unknown topologies: {unknown!r}") + if self.load_balancer: + if "load-balanced" in topologies: + return True + return False + if "single" in topologies and not (self.is_mongos or self.is_rs): + return True + if "replicaset" in topologies and self.is_rs: + return True + if "sharded" in topologies and self.is_mongos: + return True + return False + + def require_cluster_type(self, topologies=None): + """Run a test only if the client is connected to a cluster that + conforms to one of the specified topologies. Acceptable topologies + are 'single', 'replicaset', and 'sharded'. + """ + topologies = topologies or [] + + async def _is_valid_topology(): + return await self.is_topology_type(topologies) + + return self._require(_is_valid_topology, "Cluster type not in %s" % (topologies)) + + def require_test_commands(self, func): + """Run a test only if the server has test commands enabled.""" + return self._require( + lambda: self.test_commands_enabled, "Test commands must be enabled", func=func + ) + + def require_failCommand_fail_point(self, func): + """Run a test only if the server supports the failCommand fail + point. + """ + return self._require( + lambda: self.supports_failCommand_fail_point, + "failCommand fail point must be supported", + func=func, + ) + + def require_failCommand_appName(self, func): + """Run a test only if the server supports the failCommand appName.""" + # SERVER-47195 and SERVER-49336. + return self._require( + lambda: (self.test_commands_enabled and self.version >= (4, 4, 7)), + "failCommand appName must be supported", + func=func, + ) + + def require_failCommand_blockConnection(self, func): + """Run a test only if the server supports failCommand blockConnection.""" + return self._require( + lambda: ( + self.test_commands_enabled + and ( + (not self.is_mongos and self.version >= (4, 2, 9)) + or (self.is_mongos and self.version >= (4, 4)) + ) + ), + "failCommand blockConnection is not supported", + func=func, + ) + + def require_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: self.tls, "Must be able to connect via TLS", func=func) + + def require_no_tls(self, func): + """Run a test only if the client can connect over TLS.""" + return self._require(lambda: not self.tls, "Must be able to connect without TLS", func=func) + + def require_tlsCertificateKeyFile(self, func): + """Run a test only if the client can connect with tlsCertificateKeyFile.""" + return self._require( + lambda: self.tlsCertificateKeyFile, + "Must be able to connect with tlsCertificateKeyFile", + func=func, + ) + + def require_server_resolvable(self, func): + """Run a test only if the hostname 'server' is resolvable.""" + return self._require( + lambda: self.server_is_resolvable, + "No hosts entry for 'server'. Cannot validate hostname in the certificate", + func=func, + ) + + def require_sessions(self, func): + """Run a test only if the deployment supports sessions.""" + return self._require(lambda: self.sessions_enabled, "Sessions not supported", func=func) + + def supports_retryable_writes(self): + if not self.sessions_enabled: + return False + return self.is_mongos or self.is_rs + + def require_retryable_writes(self, func): + """Run a test only if the deployment supports retryable writes.""" + return self._require( + self.supports_retryable_writes, + "This server does not support retryable writes", + func=func, + ) + + def supports_transactions(self): + if self.version.at_least(4, 1, 8): + return self.is_mongos or self.is_rs + + if self.version.at_least(4, 0): + return self.is_rs + + return False + + def require_transactions(self, func): + """Run a test only if the deployment might support transactions. + + *Might* because this does not test the storage engine or FCV. + """ + return self._require( + self.supports_transactions, "Transactions are not supported", func=func + ) + + def require_no_api_version(self, func): + """Skip this test when testing with requireApiVersion.""" + return self._require( + lambda: not MONGODB_API_VERSION, + "This test does not work with requireApiVersion", + func=func, + ) + + def require_sync(self, func): + """Run a test only if using the synchronous API.""" + return self._require( + lambda: _IS_SYNC, "This test only works with the synchronous API", func=func + ) + + def require_async(self, func): + """Run a test only if using the asynchronous API.""" # unasync: off + return self._require( + lambda: not _IS_SYNC, + "This test only works with the asynchronous API", # unasync: off + func=func, + ) + + def mongos_seeds(self): + return ",".join("{}:{}".format(*address) for address in self.mongoses) + + @property + def supports_failCommand_fail_point(self): + """Does the server support the failCommand fail point?""" + if self.is_mongos: + return self.version.at_least(4, 1, 5) and self.test_commands_enabled + else: + return self.version.at_least(4, 0) and self.test_commands_enabled + + @property + def requires_hint_with_min_max_queries(self): + """Does the server require a hint with min/max queries.""" + # Changed in SERVER-39567. + return self.version.at_least(4, 1, 10) + + @property + async def max_bson_size(self): + return (await self.hello)["maxBsonObjectSize"] + + @property + async def max_write_batch_size(self): + return (await self.hello)["maxWriteBatchSize"] + + @property + async def max_message_size_bytes(self): + return (await self.hello)["maxMessageSizeBytes"] + + +# Reusable client context +async_client_context = AsyncClientContext() + +# Global event loop for async tests. +LOOP = None + + +def get_loop() -> asyncio.AbstractEventLoop: + """Get the test suite's global event loop.""" + global LOOP + if LOOP is None: + try: + LOOP = asyncio.get_running_loop() + except RuntimeError: + # no running event loop, fallback to get_event_loop. + try: + # Ignore DeprecationWarning: There is no current event loop + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + LOOP = asyncio.get_event_loop() + except RuntimeError: + LOOP = asyncio.new_event_loop() + asyncio.set_event_loop(LOOP) + return LOOP + + +class AsyncPyMongoTestCase(unittest.TestCase): + if not _IS_SYNC: + # An async TestCase that uses a single event loop for all tests. + # Inspired by IsolatedAsyncioTestCase. + async def asyncSetUp(self): + pass + + async def asyncTearDown(self): + pass + + def addAsyncCleanup(self, func, /, *args, **kwargs): + self.addCleanup(*(func, *args), **kwargs) + + def _callSetUp(self): + self.setUp() + self._callAsync(self.asyncSetUp) + + def _callTestMethod(self, method): + self._callMaybeAsync(method) + + def _callTearDown(self): + self._callAsync(self.asyncTearDown) + self.tearDown() + + def _callCleanup(self, function, *args, **kwargs): + self._callMaybeAsync(function, *args, **kwargs) + + def _callAsync(self, func, /, *args, **kwargs): + assert inspect.iscoroutinefunction(func), f"{func!r} is not an async function" + return get_loop().run_until_complete(func(*args, **kwargs)) + + def _callMaybeAsync(self, func, /, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return get_loop().run_until_complete(func(*args, **kwargs)) + else: + return func(*args, **kwargs) + + def assertEqualCommand(self, expected, actual, msg=None): + self.assertEqual(sanitize_cmd(expected), sanitize_cmd(actual), msg) + + def assertEqualReply(self, expected, actual, msg=None): + self.assertEqual(sanitize_reply(expected), sanitize_reply(actual), msg) + + @staticmethod + async def configure_fail_point(client, command_args, off=False): + cmd = {"configureFailPoint": "failCommand"} + cmd.update(command_args) + if off: + cmd["mode"] = "off" + cmd.pop("data", None) + await client.admin.command(cmd) + + @asynccontextmanager + async def fail_point(self, command_args): + await self.configure_fail_point(async_client_context.client, command_args) + try: + yield + finally: + await self.configure_fail_point(async_client_context.client, command_args, off=True) + + @contextmanager + def fork( + self, target: Callable, timeout: float = 60 + ) -> Generator[multiprocessing.Process, None, None]: + """Helper for tests that use os.fork() + + Use in a with statement: + + with self.fork(target=lambda: print('in child')) as proc: + self.assertTrue(proc.pid) # Child process was started + """ + + def _print_threads(*args: object) -> None: + if _print_threads.called: # type:ignore[attr-defined] + return + _print_threads.called = True # type:ignore[attr-defined] + print_thread_tracebacks() + + _print_threads.called = False # type:ignore[attr-defined] + + def _target() -> None: + signal.signal(signal.SIGUSR1, _print_threads) + try: + target() + except Exception as exc: + sys.stderr.write(f"Child process failed with: {exc}\n") + _print_threads() + # Sleep for a while to let the parent attach via GDB. + time.sleep(2 * timeout) + raise + + ctx = multiprocessing.get_context("fork") + proc = ctx.Process(target=_target) + proc.start() + try: + yield proc # type: ignore + finally: + proc.join(timeout) + pid = proc.pid + assert pid + if proc.exitcode is None: + # gdb to get C-level tracebacks + print_thread_stacks(pid) + # If it failed, SIGUSR1 to get thread tracebacks. + os.kill(pid, signal.SIGUSR1) + proc.join(5) + if proc.exitcode is None: + # SIGINT to get main thread traceback in case SIGUSR1 didn't work. + os.kill(pid, signal.SIGINT) + proc.join(5) + if proc.exitcode is None: + # SIGKILL in case SIGINT didn't work. + proc.kill() + proc.join(1) + self.fail(f"child timed out after {timeout}s (see traceback in logs): deadlock?") + self.assertEqual(proc.exitcode, 0) + + @classmethod + async def _unmanaged_async_mongo_client( + cls, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = await parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + return client + + async def _async_mongo_client( + self, host, port, authenticate=True, directConnection=None, **kwargs + ): + """Create a new client over SSL/TLS if necessary.""" + host = host or await async_client_context.host + port = port or await async_client_context.port + client_options: dict = async_client_context.default_client_options.copy() + if async_client_context.replica_set_name and not directConnection: + client_options["replicaSet"] = async_client_context.replica_set_name + if directConnection is not None: + client_options["directConnection"] = directConnection + client_options.update(kwargs) + + uri = _connection_string(host) + auth_mech = kwargs.get("authMechanism", "") + if async_client_context.auth_enabled and authenticate and auth_mech != "MONGODB-OIDC": + # Only add the default username or password if one is not provided. + res = await parse_uri(uri) + if ( + not res["username"] + and not res["password"] + and "username" not in client_options + and "password" not in client_options + ): + client_options["username"] = db_user + client_options["password"] = db_pwd + client = AsyncMongoClient(uri, port, **client_options) + if client._options.connect: + await client.aconnect() + self.addAsyncCleanup(client.close) + return client + + @classmethod + async def unmanaged_async_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + @classmethod + async def unmanaged_async_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, directConnection=True, **kwargs) + + @classmethod + async def unmanaged_async_rs_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + @classmethod + async def unmanaged_async_rs_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client_noauth( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, authenticate=False, **kwargs) + + @classmethod + async def unmanaged_async_rs_or_single_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await cls._unmanaged_async_mongo_client(h, p, **kwargs) + + async def async_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection. Don't authenticate.""" + return await self._async_mongo_client( + h, p, authenticate=False, directConnection=True, **kwargs + ) + + async def async_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Make a direct connection, and authenticate if necessary.""" + return await self._async_mongo_client(h, p, directConnection=True, **kwargs) + + async def async_rs_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set. Don't authenticate.""" + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set and authenticate if necessary.""" + return await self._async_mongo_client(h, p, **kwargs) + + async def async_rs_or_single_client_noauth( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[dict]: + """Connect to the replica set if there is one, otherwise the standalone. + + Like rs_or_single_client, but does not authenticate. + """ + return await self._async_mongo_client(h, p, authenticate=False, **kwargs) + + async def async_rs_or_single_client( + self, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient[Any]: + """Connect to the replica set if there is one, otherwise the standalone. + + Authenticates if necessary. + """ + return await self._async_mongo_client(h, p, **kwargs) + + def simple_client(self, h: Any = None, p: Any = None, **kwargs: Any) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + self.addAsyncCleanup(client.close) + return client + + @classmethod + def unmanaged_simple_client( + cls, h: Any = None, p: Any = None, **kwargs: Any + ) -> AsyncMongoClient: + if not h and not p: + client = AsyncMongoClient(**kwargs) + else: + client = AsyncMongoClient(h, p, **kwargs) + return client + + async def disable_replication(self, client): + """Disable replication on all secondaries.""" + for h, p in await client.secondaries: + secondary = await self.async_single_client(h, p) + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") + + async def enable_replication(self, client): + """Enable replication on all secondaries.""" + for h, p in await client.secondaries: + secondary = await self.async_single_client(h, p) + await secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") + + +class AsyncUnitTest(AsyncPyMongoTestCase): + """Async base class for TestCases that don't require a connection to MongoDB.""" + + async def asyncSetUp(self) -> None: + pass + + async def asyncTearDown(self) -> None: + pass + + +class AsyncIntegrationTest(AsyncPyMongoTestCase): + """Async base class for TestCases that need a connection to MongoDB to pass.""" + + client: AsyncMongoClient[dict] + db: AsyncDatabase + credentials: Dict[str, str] + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + if async_client_context.load_balancer and not getattr(self, "RUN_ON_LOAD_BALANCER", False): + raise SkipTest("this test does not support load balancers") + self.client = async_client_context.client + self.db = self.client.pymongo_test + if async_client_context.auth_enabled: + self.credentials = {"username": db_user, "password": db_pwd} + else: + self.credentials = {} + + async def cleanup_colls(self, *collections): + """Cleanup collections faster than drop_collection.""" + for c in collections: + c = self.client[c.database.name][c.name] + await c.delete_many({}) + await c.drop_indexes() + + def patch_system_certs(self, ca_certs): + patcher = SystemCertsPatcher(ca_certs) + self.addCleanup(patcher.disable) + + +class AsyncMockClientTest(AsyncUnitTest): + """Base class for TestCases that use MockClient. + + This class is *not* an IntegrationTest: if properly written, MockClient + tests do not require a running server. + + The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests. + """ + + # MockClients tests that use replicaSet, directConnection=True, pass + # multiple seed addresses, or wait for heartbeat events are incompatible + # with loadBalanced=True. + @async_client_context.require_no_load_balancer + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + self.client_knobs = client_knobs(heartbeat_frequency=0.001, min_heartbeat_interval=0.001) + self.client_knobs.enable() + + async def asyncTearDown(self) -> None: + self.client_knobs.disable() + await super().asyncTearDown() + + +async def async_setup(): + if not _IS_SYNC: + # Set up the event loop. + get_loop() + await async_client_context.init() + warnings.resetwarnings() + warnings.simplefilter("always") + global_knobs.enable() + + +async def async_teardown(): + global_knobs.disable() + garbage = [] + for g in gc.garbage: + garbage.append(f"GARBAGE: {g!r}") + garbage.append(f" gc.get_referents: {gc.get_referents(g)!r}") + garbage.append(f" gc.get_referrers: {gc.get_referrers(g)!r}") + if garbage: + raise AssertionError("\n".join(garbage)) + print_running_clients() + + +@asynccontextmanager +async def async_simple_test_client(): + await async_client_context.init() + yield async_client_context.client + await async_client_context.client.close() + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +def print_running_clients(): + from pymongo.asynchronous.topology import Topology + + processed = set() + # Avoid false positives on the main test client. + # XXX: Can be removed after PYTHON-1634 or PYTHON-1896. + c = async_client_context.client + if c: + processed.add(c._topology._topology_id) + # Call collect to manually cleanup any would-be gc'd clients to avoid + # false positives. + gc.collect() + for obj in gc.get_objects(): + try: + if isinstance(obj, Topology): + # Avoid printing the same Topology multiple times. + if obj._topology_id in processed: + continue + print_running_topology(obj) + processed.add(obj._topology_id) + except ReferenceError: + pass + + +async def _all_users(db): + return {u["user"] for u in (await db.command("usersInfo")).get("users", [])} + + +async def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return await authdb.command(cmd) + + +async def connected(client): + """Convenience to wait for a newly-constructed client to connect.""" + with warnings.catch_warnings(): + # Ignore warning that ping is always routed to primary even + # if client's read preference isn't PRIMARY. + warnings.simplefilter("ignore", UserWarning) + await client.admin.command("ping") # Force connection. + + return client + + +async def drop_collections(db: AsyncDatabase): + # Drop all non-system collections in this database. + for coll in await db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): + await db.drop_collection(coll) + + +async def remove_all_users(db: AsyncDatabase): + await db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": async_client_context.w}) diff --git a/test/asynchronous/conftest.py b/test/asynchronous/conftest.py new file mode 100644 index 0000000000..a27a9f213d --- /dev/null +++ b/test/asynchronous/conftest.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import asyncio +import sys +from test import pytest_conf +from test.asynchronous import async_setup, async_teardown + +import pytest +import pytest_asyncio + +_IS_SYNC = False + + +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + +@pytest_asyncio.fixture(scope="package", autouse=True) +async def test_setup_and_teardown(): + await async_setup() + yield + await async_teardown() + + +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/asynchronous/helpers.py b/test/asynchronous/helpers.py new file mode 100644 index 0000000000..892c629631 --- /dev/null +++ b/test/asynchronous/helpers.py @@ -0,0 +1,176 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" +from __future__ import annotations + +import asyncio +import threading +import traceback +from functools import wraps +from typing import Optional, no_type_check + +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +async def async_repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + await client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + await client.admin.command(cmd) + + +class client_knobs: + def __init__( + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): + self.heartbeat_frequency = heartbeat_frequency + self.min_heartbeat_interval = min_heartbeat_interval + self.kill_cursor_frequency = kill_cursor_frequency + self.events_queue_frequency = events_queue_frequency + + self.old_heartbeat_frequency = None + self.old_min_heartbeat_interval = None + self.old_kill_cursor_frequency = None + self.old_events_queue_frequency = None + self._enabled = False + self._stack = None + + def enable(self): + self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY + self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL + self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY + self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY + + if self.heartbeat_frequency is not None: + common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency + + if self.min_heartbeat_interval is not None: + common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval + + if self.kill_cursor_frequency is not None: + common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency + + if self.events_queue_frequency is not None: + common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) + + def __enter__(self): + self.enable() + + @no_type_check + def disable(self): + common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency + common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval + common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency + common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + async def wrap(*args, **kwargs): + with self: + return await f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, **kwargs): + if _IS_SYNC: + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") + self.stopped = False + self.task = None + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) + + if not _IS_SYNC: + + async def start(self): + self.task = create_task(self.run(), name=self.name) + + async def join(self, timeout: Optional[float] = None): # type: ignore[override] + if self.task is not None: + await asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + async def run(self): + try: + await self.target(*self.args) + finally: + self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + async def run(self): + try: + await super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/asynchronous/pymongo_mocks.py b/test/asynchronous/pymongo_mocks.py new file mode 100644 index 0000000000..40beb3c0dc --- /dev/null +++ b/test/asynchronous/pymongo_mocks.py @@ -0,0 +1,252 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for mocking parts of PyMongo to test other parts.""" +from __future__ import annotations + +import contextlib +import weakref +from functools import partial +from test import client_context +from test.asynchronous import async_client_context + +from pymongo import AsyncMongoClient, common +from pymongo.asynchronous.monitor import Monitor +from pymongo.asynchronous.pool import Pool +from pymongo.errors import AutoReconnect, NetworkTimeout +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription + +_IS_SYNC = False + + +class MockPool(Pool): + def __init__(self, client, pair, *args, **kwargs): + # MockPool gets a 'client' arg, regular pools don't. Weakref it to + # avoid cycle with __del__, causing ResourceWarnings in Python 3.3. + self.client = weakref.proxy(client) + self.mock_host, self.mock_port = pair + + # Actually connect to the default server. + Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + client = self.client + host_and_port = f"{self.mock_host}:{self.mock_port}" + if host_and_port in client.mock_down_hosts: + raise AutoReconnect("mock error") + + assert host_and_port in ( + client.mock_standalones + client.mock_members + client.mock_mongoses + ), "bad host: %s" % host_and_port + + async with Pool.checkout(self, handler) as conn: + conn.mock_host = self.mock_host + conn.mock_port = self.mock_port + yield conn + + +class DummyMonitor: + def __init__(self, server_description, topology, pool, topology_settings): + self._server_description = server_description + self.opened = False + + def cancel_check(self): + pass + + async def join(self): + pass + + def open(self): + self.opened = True + + def request_check(self): + pass + + async def close(self): + self.opened = False + + +class AsyncMockMonitor(Monitor): + def __init__(self, client, server_description, topology, pool, topology_settings): + # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it + # to avoid cycles. + self.client = weakref.proxy(client) + Monitor.__init__(self, server_description, topology, pool, topology_settings) + + async def _check_once(self): + client = self.client + address = self._server_description.address + response, rtt = client.mock_hello("%s:%d" % address) # type: ignore[str-format] + return ServerDescription(address, Hello(response), rtt) + + +class AsyncMockClient(AsyncMongoClient): + def __init__( + self, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + """An AsyncMongoClient connected to the default server, with a mock topology. + + standalones, members, mongoses, arbiters, and down_hosts determine the + configuration of the topology. They are formatted like ['a:1', 'b:2']. + hello_hosts provides an alternative host list for the server's + mocked hello response; see test_connect_with_internal_ips. + """ + self.mock_standalones = standalones[:] + self.mock_members = members[:] + + if self.mock_members: + self.mock_primary = self.mock_members[0] + else: + self.mock_primary = None + + # Hosts that should be considered an arbiter. + self.mock_arbiters = arbiters[:] if arbiters else [] + + if hello_hosts is not None: + self.mock_hello_hosts = hello_hosts + else: + self.mock_hello_hosts = members[:] + + self.mock_mongoses = mongoses[:] + + # Hosts that should raise socket errors. + self.mock_down_hosts = down_hosts[:] if down_hosts else [] + + # Hostname -> (min wire version, max wire version) + self.mock_wire_versions = {} + + # Hostname -> max write batch size + self.mock_max_write_batch_sizes = {} + + # Hostname -> round trip time + self.mock_rtts = {} + + kwargs["_pool_class"] = partial(MockPool, self) + kwargs["_monitor_class"] = partial(AsyncMockMonitor, self) + + client_options = async_client_context.default_client_options.copy() + client_options.update(kwargs) + + super().__init__(*args, **client_options) + + @classmethod + async def get_async_mock_client( + cls, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + c = AsyncMockClient( + standalones, members, mongoses, hello_hosts, arbiters, down_hosts, *args, **kwargs + ) + + await c.aconnect() + return c + + def kill_host(self, host): + """Host is like 'a:1'.""" + self.mock_down_hosts.append(host) + + def revive_host(self, host): + """Host is like 'a:1'.""" + self.mock_down_hosts.remove(host) + + def set_wire_version_range(self, host, min_version, max_version): + self.mock_wire_versions[host] = (min_version, max_version) + + def set_max_write_batch_size(self, host, size): + self.mock_max_write_batch_sizes[host] = size + + def mock_hello(self, host): + """Return mock hello response (a dict) and round trip time.""" + if host in self.mock_wire_versions: + min_wire_version, max_wire_version = self.mock_wire_versions[host] + else: + min_wire_version = common.MIN_SUPPORTED_WIRE_VERSION + max_wire_version = common.MAX_SUPPORTED_WIRE_VERSION + + max_write_batch_size = self.mock_max_write_batch_sizes.get( + host, common.MAX_WRITE_BATCH_SIZE + ) + + rtt = self.mock_rtts.get(host, 0) + + # host is like 'a:1'. + if host in self.mock_down_hosts: + raise NetworkTimeout("mock timeout") + + elif host in self.mock_standalones: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } + elif host in self.mock_members: + primary = host == self.mock_primary + + # Simulate a replica set member. + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: primary, + "secondary": not primary, + "setName": "rs", + "hosts": self.mock_hello_hosts, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "maxWriteBatchSize": max_write_batch_size, + } + + if self.mock_primary: + response["primary"] = self.mock_primary + + if host in self.mock_arbiters: + response["arbiterOnly"] = True + response["secondary"] = False + elif host in self.mock_mongoses: + response = { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "minWireVersion": min_wire_version, + "maxWireVersion": max_wire_version, + "msg": "isdbgrid", + "maxWriteBatchSize": max_write_batch_size, + } + else: + # In test_internal_ips(), we try to connect to a host listed + # in hello['hosts'] but not publicly accessible. + raise AutoReconnect("Unknown host: %s" % host) + + return response, rtt + + def _process_periodic_tasks(self): + # Avoid the background thread causing races, e.g. a surprising + # reconnect while we're trying to test a disconnected client. + pass diff --git a/test/asynchronous/qcheck.py b/test/asynchronous/qcheck.py new file mode 100644 index 0000000000..190a7f1a91 --- /dev/null +++ b/test/asynchronous/qcheck.py @@ -0,0 +1,255 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import random +import re +import sys +import traceback + +sys.path[0:0] = [""] + +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.son import SON + +_IS_SYNC = False + +gen_target = 100 +reduction_attempts = 10 +examples = 5 + + +def lift(value): + return lambda: value + + +def choose_lifted(generator_list): + return lambda: random.choice(generator_list) + + +def my_map(generator, function): + return lambda: function(generator()) + + +def choose(list): + return lambda: random.choice(list)() + + +def gen_range(start, stop): + return lambda: random.randint(start, stop) + + +def gen_int(): + max_int = 2147483647 + return lambda: random.randint(-max_int - 1, max_int) + + +def gen_float(): + return lambda: (random.random() - 0.5) * sys.maxsize + + +def gen_boolean(): + return lambda: random.choice([True, False]) + + +def gen_printable_char(): + return lambda: chr(random.randint(32, 126)) + + +def gen_printable_string(gen_length): + return lambda: "".join(gen_list(gen_printable_char(), gen_length)()) + + +def gen_char(set=None): + return lambda: bytes([random.randint(0, 255)]) + + +def gen_string(gen_length): + return lambda: b"".join(gen_list(gen_char(), gen_length)()) + + +def gen_unichar(): + return lambda: chr(random.randint(1, 0xFFF)) + + +def gen_unicode(gen_length): + return lambda: "".join([x for x in gen_list(gen_unichar(), gen_length)() if x not in ".$"]) + + +def gen_list(generator, gen_length): + return lambda: [generator() for _ in range(gen_length())] + + +def gen_datetime(): + return lambda: datetime.datetime( + random.randint(1970, 2037), + random.randint(1, 12), + random.randint(1, 28), + random.randint(0, 23), + random.randint(0, 59), + random.randint(0, 59), + random.randint(0, 999) * 1000, + ) + + +def gen_dict(gen_key, gen_value, gen_length): + def a_dict(gen_key, gen_value, length): + result = {} + for _ in range(length): + result[gen_key()] = gen_value() + return result + + return lambda: a_dict(gen_key, gen_value, gen_length()) + + +def gen_regexp(gen_length): + # TODO our patterns only consist of one letter. + # this is because of a bug in CPython's regex equality testing, + # which I haven't quite tracked down, so I'm just ignoring it... + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) + + def gen_flags(): + flags = 0 + if random.random() > 0.5: + flags = flags | re.IGNORECASE + if random.random() > 0.5: + flags = flags | re.MULTILINE + if random.random() > 0.5: + flags = flags | re.VERBOSE + + return flags + + return lambda: re.compile(pattern(), gen_flags()) + + +def gen_objectid(): + return lambda: ObjectId() + + +def gen_dbref(): + collection = gen_unicode(gen_range(0, 20)) + return lambda: DBRef(collection(), gen_mongo_value(1, True)()) + + +def gen_mongo_value(depth, ref): + choices = [ + gen_unicode(gen_range(0, 50)), + gen_printable_string(gen_range(0, 50)), + my_map(gen_string(gen_range(0, 1000)), bytes), + gen_int(), + gen_float(), + gen_boolean(), + gen_datetime(), + gen_objectid(), + lift(None), + ] + if ref: + choices.append(gen_dbref()) + if depth > 0: + choices.append(gen_mongo_list(depth, ref)) + choices.append(gen_mongo_dict(depth, ref)) + return choose(choices) + + +def gen_mongo_list(depth, ref): + return gen_list(gen_mongo_value(depth - 1, ref), gen_range(0, 10)) + + +def gen_mongo_dict(depth, ref=True): + return my_map( + gen_dict(gen_unicode(gen_range(0, 20)), gen_mongo_value(depth - 1, ref), gen_range(0, 10)), + SON, + ) + + +def simplify(case): # TODO this is a hack + if isinstance(case, SON) and "$ref" not in case: + simplified = SON(case) # make a copy! + if random.choice([True, False]): + # delete + simplified_keys = list(simplified) + if not len(simplified_keys): + return (False, case) + simplified.pop(random.choice(simplified_keys)) + return (True, simplified) + else: + # simplify a value + simplified_items = list(simplified.items()) + if not len(simplified_items): + return (False, case) + (key, value) = random.choice(simplified_items) + (success, value) = simplify(value) + simplified[key] = value + return (success, success and simplified or case) + if isinstance(case, list): + simplified = list(case) + if random.choice([True, False]): + # delete + if not len(simplified): + return (False, case) + simplified.pop(random.randrange(len(simplified))) + return (True, simplified) + else: + # simplify an item + if not len(simplified): + return (False, case) + index = random.randrange(len(simplified)) + (success, value) = simplify(simplified[index]) + simplified[index] = value + return (success, success and simplified or case) + return (False, case) + + +async def reduce(case, predicate, reductions=0): + for _ in range(reduction_attempts): + (reduced, simplified) = simplify(case) + if reduced and not await predicate(simplified): + return await reduce(simplified, predicate, reductions + 1) + return (reductions, case) + + +async def isnt(predicate): + async def is_not(x): + return not await predicate(x) + + return is_not + + +async def check(predicate, generator): + counter_examples = [] + for _ in range(gen_target): + case = generator() + try: + if not await predicate(case): + reduction = await reduce(case, predicate) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) + except: + counter_examples.append(f"{case!r} : {traceback.format_exc()}") + return counter_examples + + +async def check_unittest(test, predicate, generator): + counter_examples = await check(predicate, generator) + if counter_examples: + failures = len(counter_examples) + message = "\n".join([" -> %s" % f for f in counter_examples[:examples]]) + message = "found %d counter examples, displaying first %d:\n%s" % ( + failures, + min(failures, examples), + message, + ) + test.fail(message) diff --git a/test/asynchronous/test_async_cancellation.py b/test/asynchronous/test_async_cancellation.py new file mode 100644 index 0000000000..f450ea23cc --- /dev/null +++ b/test/asynchronous/test_async_cancellation.py @@ -0,0 +1,129 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that async cancellation performed by users clean up resources correctly.""" +from __future__ import annotations + +import asyncio +import sys +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected + + +class TestAsyncCancellation(AsyncIntegrationTest): + async def test_async_cancellation_closes_connection(self): + pool = await async_get_pool(self.client) + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + conn = one(pool.conns) + + async def task(): + await self.client.db.test.find_one({"$where": delay(0.2)}) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(conn.closed) + + @async_client_context.require_transactions + async def test_async_cancellation_aborts_transaction(self): + await self.client.db.test.insert_one({"x": 1}) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + session = self.client.start_session() + + async def callback(session): + await self.client.db.test.find_one({"$where": delay(0.2)}, session=session) + + async def task(): + await session.with_transaction(callback) + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertFalse(session.in_transaction) + + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_cursor(self): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + + cursor = self.client.db.test.find({}, batch_size=1) + await cursor.next() + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await cursor.next() + + task = asyncio.create_task(task()) + + await asyncio.sleep(0.1) + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(cursor._killed) + + @async_client_context.require_change_streams + @async_client_context.require_failCommand_blockConnection + async def test_async_cancellation_closes_change_stream(self): + self.addAsyncCleanup(self.client.db.test.delete_many, {}) + change_stream = await self.client.db.test.watch(batch_size=2) + event = asyncio.Event() + + # Make sure getMore commands block + fail_command = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 200}, + } + + async def task(): + async with self.fail_point(fail_command): + await self.client.db.test.insert_many([{"x": 1}, {"x": 2}]) + event.set() + await change_stream.next() + + task = asyncio.create_task(task()) + + await event.wait() + + task.cancel() + with self.assertRaises(asyncio.CancelledError): + await task + + self.assertTrue(change_stream._closed) diff --git a/test/asynchronous/test_async_contextvars_reset.py b/test/asynchronous/test_async_contextvars_reset.py new file mode 100644 index 0000000000..c6e825bbdf --- /dev/null +++ b/test/asynchronous/test_async_contextvars_reset.py @@ -0,0 +1,41 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that AsyncPeriodicExecutors do not copy ContextVars from their parents.""" +from __future__ import annotations + +import asyncio +import sys +from test.asynchronous.utils import async_get_pool +from test.utils_shared import delay, one + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest + + +class TestAsyncContextVarsReset(AsyncIntegrationTest): + async def test_context_vars_are_reset_in_executor(self): + if sys.version_info < (3, 12): + self.skipTest("Test requires asyncio.Task.get_context (added in Python 3.12)") + + await self.client.db.test.insert_one({"x": 1}) + for server in self.client._topology._servers.values(): + for context in [ + c + for c in server._monitor._executor._task.get_context() + if c.name in ["TIMEOUT", "RTT", "DEADLINE"] + ]: + self.assertIn(context.get(), [None, float("inf"), 0.0]) + await self.client.db.test.delete_many({}) diff --git a/test/asynchronous/test_async_loop_safety.py b/test/asynchronous/test_async_loop_safety.py new file mode 100644 index 0000000000..7516cb8eeb --- /dev/null +++ b/test/asynchronous/test_async_loop_safety.py @@ -0,0 +1,36 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the asynchronous API detects event loop changes and fails correctly.""" +from __future__ import annotations + +import asyncio +import unittest + +from pymongo import AsyncMongoClient + + +class TestClientLoopSafety(unittest.TestCase): + def test_client_errors_on_different_loop(self): + client = AsyncMongoClient() + loop1 = asyncio.new_event_loop() + loop1.run_until_complete(client.aconnect()) + loop2 = asyncio.new_event_loop() + with self.assertRaisesRegex( + RuntimeError, "Cannot use AsyncMongoClient in different event loop" + ): + loop2.run_until_complete(client.aconnect()) + loop1.run_until_complete(client.close()) + loop1.close() + loop2.close() diff --git a/test/asynchronous/test_async_loop_unblocked.py b/test/asynchronous/test_async_loop_unblocked.py new file mode 100644 index 0000000000..86f934b798 --- /dev/null +++ b/test/asynchronous/test_async_loop_unblocked.py @@ -0,0 +1,56 @@ +# Copyright 2025-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that the asynchronous API does not block the event loop.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest + +from pymongo.errors import ServerSelectionTimeoutError + + +class TestClientLoopUnblocked(AsyncIntegrationTest): + async def test_client_does_not_block_loop(self): + # Use an unreachable TEST-NET host to ensure that the client times out attempting to create a connection. + client = self.simple_client("192.0.2.1", serverSelectionTimeoutMS=500) + latencies = [] + + # If the loop is being blocked, at least one iteration will have a latency much more than 0.1 seconds + async def background_task(): + start = time.monotonic() + try: + while True: + start = time.monotonic() + await asyncio.sleep(0.1) + latencies.append(time.monotonic() - start) + except asyncio.CancelledError: + latencies.append(time.monotonic() - start) + raise + + t = asyncio.create_task(background_task()) + + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No servers found yet"): + await client.admin.command("ping") + + t.cancel() + with self.assertRaises(asyncio.CancelledError): + await t + + self.assertLessEqual( + sorted(latencies, reverse=True)[0], + 1.0, + "Background task was blocked from running", + ) diff --git a/test/asynchronous/test_auth.py b/test/asynchronous/test_auth.py new file mode 100644 index 0000000000..904674db16 --- /dev/null +++ b/test/asynchronous/test_auth.py @@ -0,0 +1,748 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Authentication Tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import threading +from urllib.parse import quote_plus + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + unittest, +) +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations + +import pytest + +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.auth import HAVE_KERBEROS, _canonicalize_hostname +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.errors import OperationFailure +from pymongo.hello import HelloCompat +from pymongo.read_preferences import ReadPreference +from pymongo.saslprep import HAVE_STRINGPREP + +_IS_SYNC = False + +pytestmark = pytest.mark.auth + +# YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. +GSSAPI_HOST = os.environ.get("GSSAPI_HOST") +GSSAPI_PORT = int(os.environ.get("GSSAPI_PORT", "27017")) +GSSAPI_PRINCIPAL = os.environ.get("GSSAPI_PRINCIPAL") +GSSAPI_SERVICE_NAME = os.environ.get("GSSAPI_SERVICE_NAME", "mongodb") +GSSAPI_CANONICALIZE = os.environ.get("GSSAPI_CANONICALIZE", "false") +GSSAPI_SERVICE_REALM = os.environ.get("GSSAPI_SERVICE_REALM") +GSSAPI_PASS = os.environ.get("GSSAPI_PASS") +GSSAPI_DB = os.environ.get("GSSAPI_DB", "test") + +SASL_HOST = os.environ.get("SASL_HOST") +SASL_PORT = int(os.environ.get("SASL_PORT", "27017")) +SASL_USER = os.environ.get("SASL_USER") +SASL_PASS = os.environ.get("SASL_PASS") +SASL_DB = os.environ.get("SASL_DB", "$external") + + +class AutoAuthenticateThread(threading.Thread): + """Used in testing threaded authentication. + + This does await collection.find_one() with a 1-second delay to ensure it must + check out and authenticate multiple connections from the pool concurrently. + + :Parameters: + `collection`: An auth-protected collection containing one document. + """ + + def __init__(self, collection): + super().__init__() + self.collection = collection + self.success = False + + def run(self): + assert self.collection.find_one({"$where": delay(1)}) is not None + self.success = True + + +class TestGSSAPI(AsyncPyMongoTestCase): + mech_properties: str + service_realm_required: bool + + @classmethod + def setUpClass(cls): + if not HAVE_KERBEROS: + raise SkipTest("Kerberos module not available.") + if not GSSAPI_HOST or not GSSAPI_PRINCIPAL: + raise SkipTest("Must set GSSAPI_HOST and GSSAPI_PRINCIPAL to test GSSAPI") + cls.service_realm_required = ( + GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL + ) + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) + if GSSAPI_SERVICE_REALM is not None: + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM + cls.mech_properties = mech_properties + + async def test_credentials_hashing(self): + # GSSAPI credentials are properly hashed. + creds0 = _build_credentials_tuple("GSSAPI", None, "user", "pass", {}, None) + + creds1 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds2 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "A"}}, None + ) + + creds3 = _build_credentials_tuple( + "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None + ) + + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) + + @ignore_deprecations + async def test_gssapi_simple(self): + assert GSSAPI_PRINCIPAL is not None + if GSSAPI_PASS is not None: + uri = "mongodb://%s:%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_PASS, + GSSAPI_HOST, + GSSAPI_PORT, + ) + else: + uri = "mongodb://%s@%s:%d/?authMechanism=GSSAPI" % ( + quote_plus(GSSAPI_PRINCIPAL), + GSSAPI_HOST, + GSSAPI_PORT, + ) + + if not self.service_realm_required: + # Without authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, without authMechanismProperties. + client = self.simple_client(uri) + await client[GSSAPI_DB].collection.find_one() + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + await client[GSSAPI_DB].collection.find_one() + + # Log in using URI, with authMechanismProperties. + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" + client = self.simple_client(mech_uri) + await client[GSSAPI_DB].collection.find_one() + + set_name = async_client_context.replica_set_name + if set_name: + if not self.service_realm_required: + # Without authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + uri = uri + f"&replicaSet={set_name!s}" + client = self.simple_client(uri) + await client[GSSAPI_DB].list_collection_names() + + # With authMechanismProperties + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + await client[GSSAPI_DB].list_collection_names() + + mech_uri = mech_uri + f"&replicaSet={set_name!s}" + client = self.simple_client(mech_uri) + await client[GSSAPI_DB].list_collection_names() + + @ignore_deprecations + @async_client_context.require_sync + async def test_gssapi_threaded(self): + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + + # Authentication succeeded? + await client.server_info() + db = client[GSSAPI_DB] + + # Need one document in the collection. AutoAuthenticateThread does + # collection.find_one with a 1-second delay, forcing it to check out + # multiple connections from the pool concurrently, proving that + # auto-authentication works with GSSAPI. + collection = db.test + if not await collection.count_documents({}): + try: + await collection.drop() + await collection.insert_one({"_id": 1}) + except OperationFailure: + raise SkipTest("User must be able to write.") + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + set_name = async_client_context.replica_set_name + if set_name: + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + replicaSet=set_name, + ) + + # Succeeded? + await client.server_info() + + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(collection)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + async def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = await _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = await _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + await client.server_info() + + async def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + await client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + await client.server_info() + + +class TestSASLPlain(AsyncPyMongoTestCase): + @classmethod + def setUpClass(cls): + if not SASL_HOST or not SASL_USER or not SASL_PASS: + raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") + + async def test_sasl_plain(self): + client = self.simple_client( + SASL_HOST, + SASL_PORT, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + assert SASL_USER is not None + assert SASL_PASS is not None + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + client = self.simple_client(uri) + await client.ldap.test.find_one() + + set_name = async_client_context.replica_set_name + if set_name: + client = self.simple_client( + SASL_HOST, + SASL_PORT, + replicaSet=set_name, + username=SASL_USER, + password=SASL_PASS, + authSource=SASL_DB, + authMechanism="PLAIN", + ) + await client.ldap.test.find_one() + + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s;replicaSet=%s" % ( + quote_plus(SASL_USER), + quote_plus(SASL_PASS), + SASL_HOST, + SASL_PORT, + SASL_DB, + str(set_name), + ) + client = self.simple_client(uri) + await client.ldap.test.find_one() + + async def test_sasl_plain_bad_credentials(self): + def auth_string(user, password): + uri = "mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;authSource=%s" % ( + quote_plus(user), + quote_plus(password), + SASL_HOST, + SASL_PORT, + SASL_DB, + ) + return uri + + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) + # OperationFailure raised upon connecting. + with self.assertRaises(OperationFailure): + await bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + await bad_pwd.admin.command("ping") + + +class TestSCRAMSHA1(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user( + "pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await super().asyncTearDown() + + @async_client_context.require_no_fips + async def test_scram_sha1(self): + host, port = await async_client_context.host, await async_client_context.port + + client = await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) + ) + await client.pymongo_test.command("dbstats") + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass" + "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" + "&replicaSet=%s" % (host, port, async_client_context.replica_set_name) + ) + client = await self.async_single_client_noauth(uri) + await client.pymongo_test.command("dbstats") + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation +class TestSCRAM(AsyncIntegrationTest): + @async_client_context.require_auth + @async_client_context.require_version_min(3, 7, 2) + async def asyncSetUp(self): + await super().asyncSetUp() + self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS + monitoring._SENSITIVE_COMMANDS = set() + self.listener = AllowListEventListener("saslStart") + + async def asyncTearDown(self): + monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS + await async_client_context.client.testscram.command("dropAllUsersFromDatabase") + await async_client_context.client.drop_database("testscram") + await super().asyncTearDown() + + async def test_scram_skip_empty_exchange(self): + listener = AllowListEventListener("saslStart", "saslContinue") + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] + ) + await client.testscram.command("dbstats") + + if async_client_context.version < (4, 4, -1): + # Assert we sent the skipEmptyExchange option. + first_event = listener.started_events[0] + self.assertEqual(first_event.command_name, "saslStart") + self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) + + # Assert the third exchange was skipped on servers that support it. + # Note that the first exchange occurs on the connection handshake. + started = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + self.assertEqual(started, ["saslContinue"]) + else: + self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + + @async_client_context.require_no_fips + async def test_scram(self): + # Step 1: create users + await async_client_context.create_user( + "testscram", "sha1", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-1"] + ) + await async_client_context.create_user( + "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", + "both", + "pwd", + roles=["dbOwner"], + mechanisms=["SCRAM-SHA-1", "SCRAM-SHA-256"], + ) + + # Step 2: verify auth success cases + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + self.listener.reset() + client = await self.async_rs_or_single_client_noauth( + username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] + ) + await client.testscram.command("dbstats") + if async_client_context.version.at_least(4, 4, -1): + # Speculative authentication in 4.4+ sends saslStart with the + # handshake. + self.assertEqual(self.listener.started_events, []) + else: + started = self.listener.started_events[0] + self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") + + # Step 3: verify auth failure conditions + client = await self.async_rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="not-a-user", password="pwd", authSource="testscram" + ) + with self.assertRaises(OperationFailure): + await client.testscram.command("dbstats") + + if async_client_context.is_rs: + host, port = await async_client_context.host, await async_client_context.port + uri = "mongodb://both:pwd@%s:%d/testscram?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + await client.testscram.command("dbstats") + db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) + await db.command("dbstats") + + @unittest.skipUnless(HAVE_STRINGPREP, "Cannot test without stringprep") + async def test_scram_saslprep(self): + # Step 4: test SASLprep + host, port = await async_client_context.host, await async_client_context.port + # Test the use of SASLprep on passwords. For example, + # saslprep('\u2136') becomes 'IV' and saslprep('I\u00ADX') + # becomes 'IX'. SASLprep is only supported when the standard + # library provides stringprep. + await async_client_context.create_user( + "testscram", "\u2168", "\u2163", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + await async_client_context.create_user( + "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] + ) + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", password="\u2163", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", + password="\u2163", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="\u2168", password="IV", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", password="I\u00ADX", authSource="testscram" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", + password="I\u00ADX", + authSource="testscram", + authMechanism="SCRAM-SHA-256", + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + client = await self.async_rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + client = await self.async_rs_or_single_client_noauth( + "mongodb://IX:IX@%s:%d/testscram" % (host, port) + ) + await client.testscram.command("dbstats") + + async def test_cache(self): + client = await self.async_single_client() + credentials = client.options.pool_options._credentials + cache = credentials.cache + self.assertIsNotNone(cache) + self.assertIsNone(cache.data) + # Force authentication. + await client.admin.command("ping") + cache = credentials.cache + self.assertIsNotNone(cache) + data = cache.data + self.assertIsNotNone(data) + self.assertEqual(len(data), 4) + ckey, skey, salt, iterations = data + self.assertIsInstance(ckey, bytes) + self.assertIsInstance(skey, bytes) + self.assertIsInstance(salt, bytes) + self.assertIsInstance(iterations, int) + + @async_client_context.require_sync + async def test_scram_threaded(self): + coll = async_client_context.client.db.test + await coll.drop() + await coll.insert_one({"_id": 1}) + + # The first thread to call find() will authenticate + client = await self.async_rs_or_single_client() + coll = client.db.test + threads = [] + for _ in range(4): + threads.append(AutoAuthenticateThread(coll)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() + self.assertTrue(thread.success) + + +class TestAuthURIOptions(AsyncIntegrationTest): + @async_client_context.require_auth + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user("admin", "admin", "pass") + await async_client_context.create_user( + "pymongo_test", "user", "pass", ["userAdmin", "readWrite"] + ) + + async def asyncTearDown(self): + await async_client_context.drop_user("pymongo_test", "user") + await async_client_context.drop_user("admin", "admin") + await super().asyncTearDown() + + async def test_uri_options(self): + # Test default to admin + host, port = await async_client_context.host, await async_client_context.port + client = await self.async_rs_or_single_client_noauth( + "mongodb://admin:pass@%s:%d" % (host, port) + ) + self.assertTrue(await client.admin.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://admin:pass@%s:%d/?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + self.assertTrue(await client.admin.command("dbstats")) + db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test explicit database + uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + client = await self.async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = "mongodb://user:pass@%s:%d/pymongo_test?replicaSet=%s" % ( + host, + port, + async_client_context.replica_set_name, + ) + client = await self.async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.admin.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + # Test authSource + uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) + client = await self.async_rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + + if async_client_context.is_rs: + uri = ( + "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" + "%s;authSource=pymongo_test" % (host, port, async_client_context.replica_set_name) + ) + client = await self.async_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + await client.pymongo_test2.command("dbstats") + self.assertTrue(await client.pymongo_test.command("dbstats")) + db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) + self.assertTrue(await db.command("dbstats")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_auth_oidc.py b/test/asynchronous/test_auth_oidc.py new file mode 100644 index 0000000000..639c155e73 --- /dev/null +++ b/test/asynchronous/test_auth_oidc.py @@ -0,0 +1,1192 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +import warnings +from contextlib import asynccontextmanager +from pathlib import Path +from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.helpers import ConcurrentRunner +from typing import Dict + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import EventListener, OvertCommandListener + +from bson import SON +from pymongo import AsyncMongoClient +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.asynchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) +from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.cursor_shared import CursorType +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne +from pymongo.synchronous.uri_parser import parse_uri + +_IS_SYNC = False + +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") + +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) + +pytestmark = pytest.mark.auth_oidc + + +class OIDCTestBase(AsyncPyMongoTestCase): + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") + cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") + + async def asyncSetUp(self): + self.request_called = 0 + + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) + with open(token_file) as fid: # noqa: ASYNC101,RUF100 + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") + + @asynccontextmanager + async def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = AsyncMongoClient(self.uri_admin) + await client.admin.command(cmd_on) + try: + yield + finally: + await client.admin.command( + "configureFailPoint", cmd_on["configureFailPoint"], mode="off" + ) + await client.close() + + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + async def asyncSetUp(self): + self.refresh_present = 0 + await super().asyncSetUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client + + async def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(self.uri_multiple) + # Assert that a find operation fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. + request_token = self.create_request_cb() + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = await self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = await self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + c = AsyncMongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), + ) + await c.aconnect() + + async def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = await self.create_client(username="test_machine") + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient with a human callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_2_callback_returns_missing_data(self): + # Create a AsyncMongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() + + client = await self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + # Close the client. + await client.close() + + async def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a AsyncMongoClient with a human callback that checks for the presence of a refresh token. + client = await self.create_client() + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) + + async def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = await self.create_client() + + # Set a fail point for ``find`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that succeeds + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Close the client. + await client.close() + + async def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = OvertCommandListener() + client = await self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Clear the listener state if possible. + listener.reset() + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform another find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + # Assert that the list of command succeeded events is [find]. + self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. + self.assertEqual(failed_events, ["find"]) + # Close the client. + await client.close() + + async def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + # Close the client. + await client.close() + + async def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + await client.test.test.find_one() + + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) + + # Close the client. + await client.close() + + async def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + fetch_called = 0 + + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result + + client = await self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + await client.test.test.find_one() + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthentication using a failCommand. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. + await client.close() + + async def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = await self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + await client.test.test.find_one() + await client.close() + + async def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + client1 = await self.create_client(request_cb=request_cb) + client2 = await self.create_client(request_cb=request_cb) + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + await client1.close() + await client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + async def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_bulk_read(self): + # Create a client. + client = await self.create_client() + + # Perform a find operation. + await client.test.test.find_one() + + # Perform a bulk write operation. + await client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + await cursor.to_list() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_cursor(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + client = await self.create_client() + hello = await client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + # Create a client with the callback. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(await cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + async def test_reauthenticate_succeeds_command(self): + # Create a client. + client = await self.create_client() + + # Perform an insert operation. + await client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = await client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(cursor), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + await client.close() + + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + async def asyncSetUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + async def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + client = AsyncMongoClient(*args, authmechanismproperties=props, **kwargs) + self.addAsyncCleanup(client.close) + return client + + async def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Start 10 tasks and run 100 find operations that all succeed in each task. + async def target(): + for _ in range(100): + await client.test.test.find_one() + + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_2_1_valid_callback_inputs(self): + # Create a AsyncMongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = await self.create_client() + # Perform a find operation that succeeds. + await client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + + async def test_2_2_oidc_callback_returns_null(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = await self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_3_oidc_callback_returns_missing_data(self): + # Create a AsyncMongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = await self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_2_4_invalid_client_configuration_with_callback(self): + # Create a AsyncMongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. + request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + await self.create_client(authmechanismproperties=props) + + async def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + + async def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a AsyncMongoClient and an OIDC callback that implements the provider logic. + client = await self.create_client() + await client.aconnect() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``AsyncMongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + async def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a AsyncMongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + + async def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``AsyncMongoClient`` with a human callback that returns a valid token + client = await self.create_client() + + # Set a fail point for ``saslStart`` commands. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + async def test_4_1_reauthentication_succeeds(self): + # Create a ``AsyncMongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = await self.create_client() + await client.aconnect() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + await client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + async def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + await client.test.test.find_one() + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``AsyncMongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = await self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + await client.test.test.insert_one({}) + + # Set a fail point for the find command. + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + await client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + async def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = await self.create_client(event_listeners=[listener]) + await client.aconnect() + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = await self.create_client() + await client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + await client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + await client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + async def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = await self.create_client() + + # Set a fail point for `find` commands of the form: + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + async with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + await client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + async def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = await self.create_client(authMechanismProperties=props) + await client.test.test.find_one() + + async def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = await self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + await client.test.test.find_one() + + async def test_speculative_auth_success(self): + client1 = await self.create_client() + await client1.test.test.find_one() + client2 = await self.create_client() + await client2.aconnect() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + async with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + await client2.test.test.find_one() + + async def test_reauthentication_succeeds_multiple_connections(self): + client1 = await self.create_client() + client2 = await self.create_client() + + # Perform an insert operation. + await client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + await client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + await client1.test.test.find_one() + await client2.test.test.find_one() + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + await client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_auth_spec.py b/test/asynchronous/test_auth_spec.py new file mode 100644 index 0000000000..7c659c6d93 --- /dev/null +++ b/test/asynchronous/test_auth_spec.py @@ -0,0 +1,113 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import glob +import json +import os +import sys +import warnings +from test.asynchronous import AsyncPyMongoTestCase + +import pytest + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +from pymongo import AsyncMongoClient +from pymongo.auth_oidc_shared import OIDCCallback + +pytestmark = pytest.mark.auth + +_IS_SYNC = False + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") + + +class TestAuthSpec(AsyncPyMongoTestCase): + pass + + +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + +def create_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + credential = test_case.get("credential") + + if not valid: + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, AsyncMongoClient, uri, connect=False) + else: + client = self.simple_client(uri, connect=False) + credentials = client.options.pool_options._credentials + if credential is None: + self.assertIsNone(credentials) + else: + self.assertIsNotNone(credentials) + self.assertEqual(credentials.username, credential["username"]) + self.assertEqual(credentials.password, credential["password"]) + self.assertEqual(credentials.source, credential["source"]) + if credential["mechanism"] is not None: + self.assertEqual(credentials.mechanism, credential["mechanism"]) + else: + self.assertEqual(credentials.mechanism, "DEFAULT") + expected = credential["mechanism_properties"] + if expected is not None: + actual = credentials.mechanism_properties + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) + else: + if credential["mechanism"] == "MONGODB-AWS": + self.assertIsNone(credentials.mechanism_properties.aws_session_token) + else: + self.assertIsNone(credentials.mechanism_properties) + + return run_test + + +def create_tests(): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as auth_tests: + test_cases = json.load(auth_tests)["tests"] + for test_case in test_cases: + if test_case.get("optional", False): + continue + test_method = create_test(test_case) + name = str(test_case["description"].lower().replace(" ", "_")) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) + + +create_tests() + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_bulk.py b/test/asynchronous/test_bulk.py new file mode 100644 index 0000000000..02958e6f0e --- /dev/null +++ b/test/asynchronous/test_bulk.py @@ -0,0 +1,1122 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the bulk API.""" +from __future__ import annotations + +import sys +import uuid +from typing import Any, Optional + +from pymongo.asynchronous.mongo_client import AsyncMongoClient + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, remove_all_users, unittest +from test.utils_shared import async_wait_until + +from bson.binary import Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.common import partition_node +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import * +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncBulkTestBase(AsyncIntegrationTest): + coll: AsyncCollection + coll_w0: AsyncCollection + + async def asyncSetUp(self): + await super().asyncSetUp() + self.coll = self.db.test + await self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) + + def assertEqualResponse(self, expected, actual): + """Compare response from bulk.execute() to expected response.""" + for key, value in expected.items(): + if key == "nModified": + self.assertEqual(value, actual["nModified"]) + elif key == "upserted": + expected_upserts = value + actual_upserts = actual["upserted"] + self.assertEqual( + len(expected_upserts), + len(actual_upserts), + 'Expected %d elements in "upserted", got %d' + % (len(expected_upserts), len(actual_upserts)), + ) + + for e, a in zip(expected_upserts, actual_upserts): + self.assertEqualUpsert(e, a) + + elif key == "writeErrors": + expected_errors = value + actual_errors = actual["writeErrors"] + self.assertEqual( + len(expected_errors), + len(actual_errors), + 'Expected %d elements in "writeErrors", got %d' + % (len(expected_errors), len(actual_errors)), + ) + + for e, a in zip(expected_errors, actual_errors): + self.assertEqualWriteError(e, a) + + else: + self.assertEqual( + actual.get(key), + value, + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", + ) + + def assertEqualUpsert(self, expected, actual): + """Compare bulk.execute()['upserts'] to expected value. + + Like: {'index': 0, '_id': ObjectId()} + """ + self.assertEqual(expected["index"], actual["index"]) + if expected["_id"] == "...": + # Unspecified value. + self.assertIn("_id", actual) + else: + self.assertEqual(expected["_id"], actual["_id"]) + + def assertEqualWriteError(self, expected, actual): + """Compare bulk.execute()['writeErrors'] to expected value. + + Like: {'index': 0, 'code': 123, 'errmsg': '...', 'op': { ... }} + """ + self.assertEqual(expected["index"], actual["index"]) + self.assertEqual(expected["code"], actual["code"]) + if expected["errmsg"] == "...": + # Unspecified value. + self.assertIn("errmsg", actual) + else: + self.assertEqual(expected["errmsg"], actual["errmsg"]) + + expected_op = expected["op"].copy() + actual_op = actual["op"].copy() + if expected_op.get("_id") == "...": + # Unspecified _id. + self.assertIn("_id", actual_op) + actual_op.pop("_id") + expected_op.pop("_id") + + self.assertEqual(expected_op, actual_op) + + +class AsyncTestBulk(AsyncBulkTestBase): + async def test_empty(self): + with self.assertRaises(InvalidOperation): + await self.coll.bulk_write([]) + + async def test_insert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([InsertOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.inserted_count) + self.assertEqual(1, await self.coll.count_documents({})) + + async def _test_update_many(self, update): + expected = { + "nMatched": 2, + "nModified": 2, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateMany({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (2, None)) + + async def test_update_many(self): + await self._test_update_many({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 2, 0) + async def test_update_many_pipeline(self): + await self._test_update_many([{"$set": {"foo": "bar"}}]) + + async def test_array_filters_validation(self): + with self.assertRaises(TypeError): + await UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + coll = self.coll_w0 + update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + await coll.bulk_write([update_many]) + + async def _test_update_one(self, update): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([UpdateOne({}, update)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) + + async def test_update_one(self): + await self._test_update_one({"$set": {"foo": "bar"}}) + + @async_client_context.require_version_min(4, 2, 0) + async def test_update_one_pipeline(self): + await self._test_update_one([{"$set": {"foo": "bar"}}]) + + async def test_replace_one(self): + expected = { + "nMatched": 1, + "nModified": 1, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (1, None)) + + async def test_remove(self): + # Test removing all documents, ordered. + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 2, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + await self.coll.insert_many([{}, {}]) + + result = await self.coll.bulk_write([DeleteMany({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(2, result.deleted_count) + + async def test_remove_one(self): + # Test removing one document, empty selector. + await self.coll.insert_many([{}, {}]) + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 0, + "nRemoved": 1, + "upserted": [], + "writeErrors": [], + "writeConcernErrors": [], + } + + result = await self.coll.bulk_write([DeleteOne({})]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.deleted_count) + self.assertEqual(await self.coll.count_documents({}), 1) + + async def test_upsert(self): + expected = { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + } + + result = await self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"}, upsert=True)]) + self.assertEqualResponse(expected, result.bulk_api_result) + self.assertEqual(1, result.upserted_count) + assert result.upserted_ids is not None + self.assertEqual(1, len(result.upserted_ids)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) + + self.assertEqual(await self.coll.count_documents({"foo": "bar"}), 1) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + n_docs = await async_client_context.max_write_batch_size + 100 + requests = [InsertOne[dict]({}) for _ in range(n_docs)] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + # Same with ordered bulk. + await self.coll.drop() + result = await self.coll.bulk_write(requests) + self.assertEqual(n_docs, result.inserted_count) + self.assertEqual(n_docs, await self.coll.count_documents({})) + + async def test_bulk_max_message_size(self): + await self.coll.delete_many({}) + self.addAsyncCleanup(self.coll.delete_many, {}) + _16_MB = 16 * 1000 * 1000 + # Generate a list of documents such that the first batched OP_MSG is + # as close as possible to the 48MB limit. + docs = [ + {"_id": 1, "l": "s" * _16_MB}, + {"_id": 2, "l": "s" * _16_MB}, + {"_id": 3, "l": "s" * (_16_MB - 10000)}, + ] + # Fill in the remaining ~10000 bytes with small documents. + for i in range(4, 10000): + docs.append({"_id": i}) + result = await self.coll.insert_many(docs) + self.assertEqual(len(docs), len(result.inserted_ids)) + + async def test_generator_insert(self): + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await self.coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_bulk_write_no_results(self): + result = await self.coll_w0.bulk_write([InsertOne({})]) + self.assertFalse(result.acknowledged) + self.assertRaises(InvalidOperation, lambda: result.inserted_count) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_ids) + + async def test_bulk_write_invalid_arguments(self): + # The requests argument must be a list. + generator = (InsertOne[dict]({}) for _ in range(10)) + with self.assertRaises(TypeError): + await self.coll.bulk_write(generator) # type: ignore[arg-type] + + # Document is not wrapped in a bulk write operation. + with self.assertRaises(TypeError): + await self.coll.bulk_write([{}]) # type: ignore[list-item] + + async def test_upsert_large(self): + big = "a" * (await async_client_context.max_bson_size - 37) + result = await self.coll.bulk_write( + [UpdateOne({"x": 1}, {"$set": {"s": big}}, upsert=True)] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 0, + "nRemoved": 0, + "upserted": [{"index": 0, "_id": "..."}], + }, + result.bulk_api_result, + ) + + self.assertEqual(1, await self.coll.count_documents({"x": 1})) + + async def test_client_generated_upsert_id(self): + result = await self.coll.bulk_write( + [ + UpdateOne({"_id": 0}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": 1}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": 2}, {"_id": 2}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": 0}, + {"index": 1, "_id": 1}, + {"index": 2, "_id": 2}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_unspecified(self): + options = CodecOptions(uuid_representation=UuidRepresentation.UNSPECIFIED) + coll = self.coll.with_options(codec_options=options) + uuids = [Binary.from_uuid(uuid.uuid4()) for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_upsert_uuid_standard_subdocuments(self): + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + coll = self.coll.with_options(codec_options=options) + ids: list = [{"f": Binary(bytes(i)), "f2": uuid.uuid4()} for i in range(3)] + + result = await coll.bulk_write( + [ + UpdateOne({"_id": ids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": ids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": ids[2]}, {"_id": ids[2]}, upsert=True), + ] + ) + + # The `Binary` values are returned as `bytes` objects. + for _id in ids: + _id["f"] = bytes(_id["f"]) + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": ids[0]}, + {"index": 1, "_id": ids[1]}, + {"index": 2, "_id": ids[2]}, + ], + }, + result.bulk_api_result, + ) + + async def test_single_ordered_batch(self): + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + ) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + }, + result.bulk_api_result, + ) + + async def test_single_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_ordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 2}}, upsert=True), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 4, "a": 3}), + InsertOne({"b": 5, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 1, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_single_unordered_batch(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 1}, {"$set": {"b": 1}}), + UpdateOne({"a": 2}, {"$set": {"b": 2}}, upsert=True), + InsertOne({"a": 3}), + DeleteOne({"a": 3}), + ] + result = await self.coll.bulk_write(requests, ordered=False) + self.assertEqualResponse( + { + "nMatched": 1, + "nModified": 1, + "nUpserted": 1, + "nInserted": 2, + "nRemoved": 1, + "upserted": [{"index": 2, "_id": "..."}], + "writeErrors": [], + "writeConcernErrors": [], + }, + result.bulk_api_result, + ) + + async def test_single_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 1}}, upsert=True), + InsertOne({"b": 3, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 1, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 2}, + "u": {"$set": {"a": 1}}, + "multi": False, + "upsert": True, + }, + } + ], + }, + result, + ) + + async def test_multiple_error_unordered_batch(self): + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + requests: list = [ + InsertOne({"b": 1, "a": 1}), + UpdateOne({"b": 2}, {"$set": {"a": 3}}, upsert=True), + UpdateOne({"b": 3}, {"$set": {"a": 4}}, upsert=True), + UpdateOne({"b": 4}, {"$set": {"a": 3}}, upsert=True), + InsertOne({"b": 5, "a": 2}), + InsertOne({"b": 6, "a": 1}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + # Assume the update at index 1 runs before the update at index 3, + # although the spec does not require it. Same for inserts. + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 2, + "nInserted": 2, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}, {"index": 2, "_id": "..."}], + "writeConcernErrors": [], + "writeErrors": [ + { + "index": 3, + "code": 11000, + "errmsg": "...", + "op": { + "q": {"b": 4}, + "u": {"$set": {"a": 3}}, + "multi": False, + "upsert": True, + }, + }, + { + "index": 5, + "code": 11000, + "errmsg": "...", + "op": {"_id": "...", "b": 6, "a": 1}, + }, + ], + }, + result, + ) + + async def test_large_inserts_ordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests) + except BulkWriteError as exc: + result = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(1, result["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + write_result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ] + ) + + self.assertEqual(6, write_result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + async def test_large_inserts_unordered(self): + big = "x" * await async_client_context.max_bson_size + requests = [ + InsertOne({"b": 1, "a": 1}), + InsertOne({"big": big}), + InsertOne({"b": 2, "a": 2}), + ] + + try: + await self.coll.bulk_write(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + + await self.coll.delete_many({}) + + big = "x" * (1024 * 1024 * 4) + result = await self.coll.bulk_write( + [ + InsertOne({"a": 1, "big": big}), + InsertOne({"a": 2, "big": big}), + InsertOne({"a": 3, "big": big}), + InsertOne({"a": 4, "big": big}), + InsertOne({"a": 5, "big": big}), + InsertOne({"a": 6, "big": big}), + ], + ordered=False, + ) + + self.assertEqual(6, result.inserted_count) + self.assertEqual(6, await self.coll.count_documents({})) + + +class AsyncBulkAuthorizationTestBase(AsyncBulkTestBase): + @async_client_context.require_auth + @async_client_context.require_no_api_version + async def asyncSetUp(self): + await super().asyncSetUp() + await async_client_context.create_user(self.db.name, "readonly", "pw", ["read"]) + await self.db.command( + "createRole", + "noremove", + privileges=[ + { + "actions": ["insert", "update", "find"], + "resource": {"db": "pymongo_test", "collection": "test"}, + } + ], + roles=[], + ) + + await async_client_context.create_user(self.db.name, "noremove", "pw", ["noremove"]) + + async def asyncTearDown(self): + await self.db.command("dropRole", "noremove") + await remove_all_users(self.db) + + +class AsyncTestBulkUnacknowledged(AsyncBulkTestBase): + async def asyncTearDown(self): + await self.coll.delete_many({}) + + async def test_no_results_ordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_ordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should not be executed since the batch is ordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 3 + + await async_wait_until(predicate, "insert 3 documents") + self.assertEqual({"_id": 1}, await self.coll.find_one({"_id": 1})) + + async def test_no_results_unordered_success(self): + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"a": 2}), + DeleteOne({"a": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + async def test_no_results_unordered_failure(self): + requests: list = [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 3}, {"$set": {"b": 1}}, upsert=True), + InsertOne({"_id": 2}), + # Fails with duplicate key error. + InsertOne({"_id": 1}), + # Should be executed since the batch is unordered. + DeleteOne({"_id": 1}), + ] + result = await self.coll_w0.bulk_write(requests, ordered=False) + self.assertFalse(result.acknowledged) + + async def predicate(): + return await self.coll.count_documents({}) == 2 + + await async_wait_until(predicate, "insert 2 documents") + + async def predicate(): + return await self.coll.find_one({"_id": 1}) is None + + await async_wait_until(predicate, 'removed {"_id": 1}') + + +class AsyncTestBulkAuthorization(AsyncBulkAuthorizationTestBase): + async def test_readonly(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await self.async_rs_or_single_client_noauth( + username="readonly", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + await coll.find_one() + with self.assertRaises(OperationFailure): + await coll.bulk_write([InsertOne({"x": 1})]) + + async def test_no_remove(self): + # We test that an authorization failure aborts the batch and is raised + # as OperationFailure. + cli = await self.async_rs_or_single_client_noauth( + username="noremove", password="pw", authSource="pymongo_test" + ) + coll = cli.pymongo_test.test + await coll.find_one() + requests = [ + InsertOne({"x": 1}), + ReplaceOne({"x": 2}, {"x": 2}, upsert=True), + DeleteMany({}), # Prohibited. + InsertOne({"x": 3}), # Never attempted. + ] + with self.assertRaises(OperationFailure): + await coll.bulk_write(requests) # type: ignore[arg-type] + self.assertEqual({1, 2}, set(await self.coll.distinct("x"))) + + +class AsyncTestBulkWriteConcern(AsyncBulkTestBase): + w: Optional[int] + secondary: AsyncMongoClient + + async def asyncSetUp(self): + await super().asyncSetUp() + self.w = async_client_context.w + self.secondary = None + if self.w is not None and self.w > 1: + for member in (await async_client_context.hello)["hosts"]: + if member != (await async_client_context.hello)["primary"]: + self.secondary = await self.async_single_client(*partition_node(member)) + break + + async def asyncTearDown(self): + if self.secondary: + await self.secondary.close() + + async def cause_wtimeout(self, requests, ordered): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled.") + + # Use the rsSyncApplyStop failpoint to pause replication on a + # secondary which will cause a wtimeout error. + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="alwaysOn") + + try: + coll = self.coll.with_options(write_concern=WriteConcern(w=self.w, wtimeout=1)) + return await coll.bulk_write(requests, ordered=ordered) + finally: + await self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + + @async_client_context.require_version_max(7, 1) # PYTHON-4560 + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_ordered(self): + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) + self.assertTrue(result.acknowledged) + + requests: list[Any] = [InsertOne({"a": 1}), InsertOne({"a": 2})] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 0, + "nInserted": 2, + "nRemoved": 0, + "upserted": [], + "writeErrors": [], + }, + details, + ) + + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 0) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on ordered batch. + requests = [ + InsertOne({"a": 1}), + ReplaceOne({"a": 3}, {"b": 1}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=True) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 1, + "nInserted": 1, + "nRemoved": 0, + "upserted": [{"index": 1, "_id": "..."}], + "writeErrors": [ + {"index": 2, "code": 11000, "errmsg": "...", "op": {"_id": "...", "a": 1}} + ], + }, + details, + ) + + self.assertGreater(len(details["writeConcernErrors"]), 1) + failed = details["writeErrors"][0] + self.assertIn("duplicate", failed["errmsg"]) + + @async_client_context.require_version_max(7, 1) # PYTHON-4560 + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + + # Ensure we don't raise on wnote. + coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) + result = await coll_ww.bulk_write( + [DeleteOne({"something": "that does no exist"})], ordered=False + ) + self.assertTrue(result.acknowledged) + + requests = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 2}), + ] + # Replication wtimeout is a 'soft' error. + # It shouldn't stop batch processing. + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(0, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + await self.coll.delete_many({}) + await self.coll.create_index("a", unique=True) + self.addAsyncCleanup(self.coll.drop_index, [("a", 1)]) + + # Fail due to write concern support as well + # as duplicate key error on unordered batch. + requests: list = [ + InsertOne({"a": 1}), + UpdateOne({"a": 3}, {"$set": {"a": 3, "b": 1}}, upsert=True), + InsertOne({"a": 1}), + InsertOne({"a": 2}), + ] + try: + await self.cause_wtimeout(requests, ordered=False) + except BulkWriteError as exc: + details = exc.details + self.assertEqual(exc.code, 65) + else: + self.fail("Error not raised") + + self.assertEqual(2, details["nInserted"]) + self.assertEqual(1, details["nUpserted"]) + self.assertEqual(1, len(details["writeErrors"])) + # When talking to legacy servers there will be a + # write concern error for each operation. + self.assertGreater(len(details["writeConcernErrors"]), 1) + + failed = details["writeErrors"][0] + self.assertEqual(2, failed["index"]) + self.assertEqual(11000, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + self.assertEqual(1, failed["op"]["a"]) + + failed = details["writeConcernErrors"][0] + self.assertEqual(64, failed["code"]) + self.assertIsInstance(failed["errmsg"], str) + + upserts = details["upserted"] + self.assertEqual(1, len(upserts)) + self.assertEqual(1, upserts[0]["index"]) + self.assertTrue(upserts[0].get("_id")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_change_stream.py b/test/asynchronous/test_change_stream.py new file mode 100644 index 0000000000..3fb8b517f3 --- /dev/null +++ b/test/asynchronous/test_change_stream.py @@ -0,0 +1,1158 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the change_stream module.""" +from __future__ import annotations + +import asyncio +import os +import random +import string +import sys +import threading +import time +import uuid +from itertools import product +from typing import no_type_check + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + Version, + async_client_context, + unittest, +) +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + async_wait_until, +) + +from bson import SON, ObjectId, Timestamp, encode +from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from pymongo import AsyncMongoClient +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.errors import ( + InvalidOperation, + OperationFailure, + ServerSelectionTimeoutError, +) +from pymongo.message import _CursorAddress +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncChangeStreamBase(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + async def change_stream_with_client(self, client, *args, **kwargs): + """Create a change stream using the given client and return it.""" + raise NotImplementedError + + async def change_stream(self, *args, **kwargs): + """Create a change stream using the default client and return it.""" + return await self.change_stream_with_client(self.client, *args, **kwargs) + + async def client_with_listener(self, *commands): + """Return a client with a AllowListEventListener.""" + listener = AllowListEventListener(*commands) + client = await self.async_rs_or_single_client(event_listeners=[listener]) + return client, listener + + def watched_collection(self, *args, **kwargs): + """Return a collection that is watched by self.change_stream().""" + # Construct a unique collection for each test. + collname = ".".join(self.id().rsplit(".", 2)[1:]) + return self.db.get_collection(collname, *args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + """Cause a change stream invalidate event.""" + raise NotImplementedError + + def generate_unique_collnames(self, numcolls): + """Generate numcolls collection names unique to a test.""" + collnames = [] + for idx in range(1, numcolls + 1): + collnames.append(self.id() + "_" + str(idx)) + return collnames + + async def get_resume_token(self, invalidate=False): + """Get a resume token to use for starting a change stream.""" + # Ensure targeted collection exists before starting. + coll = self.watched_collection(write_concern=WriteConcern("majority")) + await coll.insert_one({}) + + if invalidate: + async with await self.change_stream( + [{"$match": {"operationType": "invalidate"}}] + ) as cs: + if isinstance(cs._target, AsyncMongoClient): + self.skipTest("cluster-level change streams cannot be invalidated") + await self.generate_invalidate_event(cs) + return (await cs.next())["_id"] + else: + async with await self.change_stream() as cs: + await coll.insert_one({"data": 1}) + return (await cs.next())["_id"] + + async def get_start_at_operation_time(self): + """Get an operationTime. Advances the operation clock beyond the most + recently returned timestamp. + """ + optime = (await self.client.admin.command("ping"))["operationTime"] + return Timestamp(optime.time, optime.inc + 1) + + async def insert_one_and_check(self, change_stream, doc): + """Insert a document and check that it shows up in the change stream.""" + raise NotImplementedError + + async def kill_change_stream_cursor(self, change_stream): + """Cause a cursor not found error on the next getMore.""" + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.watched_collection().database.client + await client._close_cursor_now(cursor.cursor_id, address) + + +class APITestsMixin: + @no_type_check + async def test_watch(self): + async with await self.change_stream( + [{"$project": {"foo": 0}}], + full_document="updateLookup", + max_await_time_ms=1000, + batch_size=100, + ) as change_stream: + self.assertEqual([{"$project": {"foo": 0}}], change_stream._pipeline) + self.assertEqual("updateLookup", change_stream._full_document) + self.assertEqual(1000, change_stream._max_await_time_ms) + self.assertEqual(100, change_stream._batch_size) + self.assertIsInstance(change_stream._cursor, AsyncCommandCursor) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) + await self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) + _ = await change_stream.next() + resume_token = change_stream.resume_token + with self.assertRaises(TypeError): + await self.change_stream(pipeline={}) + with self.assertRaises(TypeError): + await self.change_stream(full_document={}) + # No Error. + async with await self.change_stream(resume_after=resume_token): + pass + + @no_type_check + async def test_try_next(self): + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + await coll.insert_one({}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream(max_await_time_ms=250) as stream: + self.assertIsNone(await stream.try_next()) # No changes initially. + await coll.insert_one({}) # Generate a change. + + # On sharded clusters, even majority-committed changes only show + # up once an event that sorts after it shows up on the other + # shard. So, we wait on try_next to eventually return changes. + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + + @no_type_check + async def test_try_next_runs_one_getmore(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + async with await self.change_stream_with_client(client, max_await_time_ms=250) as stream: + self.assertEqual(listener.started_command_names(), ["aggregate"]) + listener.reset() + + # Confirm that only a single getMore is run even when no documents + # are returned. + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore"]) + listener.reset() + + # Get at least one change before resuming. + await coll.insert_one({"_id": 2}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + listener.reset() + + # Cause the next request to initiate the resume process. + await self.kill_change_stream_cursor(stream) + listener.reset() + + # The sequence should be: + # - getMore, fail + # - resume with aggregate command + # - no results, return immediately without another getMore + self.assertIsNone(await stream.try_next()) + self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) + listener.reset() + + # Stream still works after a resume. + await coll.insert_one({"_id": 3}) + + async def _wait_until(): + return await stream.try_next() is not None + + await async_wait_until(_wait_until, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) + self.assertIsNone(await stream.try_next()) + + @no_type_check + async def test_batch_size_is_honored(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Connect to the cluster. + await client.admin.command("ping") + listener.reset() + # AsyncChangeStreams only read majority committed data so use w:majority. + coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) + await coll.drop() + # Create the watched collection before starting the change stream to + # skip any "create" events. + await coll.insert_one({"_id": 1}) + self.addAsyncCleanup(coll.drop) + # Expected batchSize. + expected = {"batchSize": 23} + async with await self.change_stream_with_client( + client, max_await_time_ms=250, batch_size=23 + ) as stream: + # Confirm that batchSize is honored for initial batch. + cmd = listener.started_events[0].command + self.assertEqual(cmd["cursor"], expected) + listener.reset() + # Confirm that batchSize is honored by getMores. + self.assertIsNone(await stream.try_next()) + cmd = listener.started_events[0].command + key = next(iter(expected)) + self.assertEqual(expected[key], cmd[key]) + + # $changeStream.startAtOperationTime was added in 4.0.0. + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_at_operation_time(self): + optime = await self.get_start_at_operation_time() + + coll = self.watched_collection(write_concern=WriteConcern("majority")) + ndocs = 3 + await coll.insert_many([{"data": i} for i in range(ndocs)]) + + async with await self.change_stream(start_at_operation_time=optime) as cs: + for _i in range(ndocs): + await cs.next() + + @no_type_check + async def _test_full_pipeline(self, expected_cs_stage): + client, listener = await self.client_with_listener("aggregate") + async with await self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: + pass + + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] + self.assertEqual("aggregate", command.command_name) + self.assertEqual( + [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], + command.command["pipeline"], + ) + + @no_type_check + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({}) + + @no_type_check + async def test_iteration(self): + async with await self.change_stream(batch_size=2) as change_stream: + num_inserted = 10 + await self.watched_collection().insert_many([{} for _ in range(num_inserted)]) + inserts_received = 0 + async for change in change_stream: + self.assertEqual(change["operationType"], "insert") + inserts_received += 1 + if inserts_received == num_inserted: + break + await self._test_invalidate_stops_iteration(change_stream) + + @no_type_check + @async_client_context.require_sync + def _test_next_blocks(self, change_stream): + inserted_doc = {"_id": ObjectId()} + changes = [] + t = threading.Thread(target=lambda: changes.append(change_stream.next())) + t.start() + # Sleep for a bit to prove that the call to next() blocks. + time.sleep(1) + self.assertTrue(t.is_alive()) + self.assertFalse(changes) + self.watched_collection().insert_one(inserted_doc) + # Join with large timeout to give the server time to return the change, + # in particular for shard clusters. + t.join(30) + self.assertFalse(t.is_alive()) + self.assertEqual(1, len(changes)) + self.assertEqual(changes[0]["operationType"], "insert") + self.assertEqual(changes[0]["fullDocument"], inserted_doc) + + @no_type_check + @async_client_context.require_sync + async def test_next_blocks(self): + """Test that next blocks until a change is readable""" + # Use a short wait time to speed up the test. + async with await self.change_stream(max_await_time_ms=250) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.watched_collection().aggregate( + [{"$changeStream": {}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + @no_type_check + @async_client_context.require_sync + def test_concurrent_close(self): + """Ensure a ChangeStream can be closed from another thread.""" + # Use a short wait time to speed up the test. + with self.change_stream(max_await_time_ms=250) as change_stream: + + def iterate_cursor(): + try: + for _ in change_stream: + pass + except OperationFailure as e: + if e.code != 237: # AsyncCursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + self.watched_collection().insert_one({}) + asyncio.sleep(1) + change_stream.close() + t.join(3) + self.assertFalse(t.is_alive()) + + @no_type_check + async def test_unknown_full_document(self): + """Must rely on the server to raise an error on unknown fullDocument.""" + try: + async with await self.change_stream(full_document="notValidatedByPyMongo"): + pass + except OperationFailure: + pass + + @no_type_check + async def test_change_operations(self): + """Test each operation type.""" + expected_ns = { + "db": self.watched_collection().database.name, + "coll": self.watched_collection().name, + } + async with await self.change_stream() as change_stream: + # Insert. + inserted_doc = {"_id": ObjectId(), "foo": "bar"} + await self.watched_collection().insert_one(inserted_doc) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Update. + update_spec = {"$set": {"new": 1}, "$unset": {"foo": 1}} + await self.watched_collection().update_one(inserted_doc, update_spec) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "update") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + + expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} + if async_client_context.version.at_least(4, 5, 0): + expected_update_description["truncatedArrays"] = [] + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) + # Replace. + await self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "replace") + self.assertEqual(change["ns"], expected_ns) + self.assertEqual(change["fullDocument"], inserted_doc) + # Delete. + await self.watched_collection().delete_one({"foo": "bar"}) + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "delete") + self.assertEqual(change["ns"], expected_ns) + self.assertNotIn("fullDocument", change) + # Invalidate. + await self._test_get_invalidate_event(change_stream) + + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_after(self): + resume_token = await self.get_resume_token(invalidate=True) + + # resume_after cannot resume after invalidate. + with self.assertRaises(OperationFailure): + await self.change_stream(resume_after=resume_token) + + # start_after can resume after invalidate. + async with await self.change_stream(start_after=resume_token) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_start_after_resume_process_with_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 3}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 3}) + + @no_type_check + @async_client_context.require_version_min(4, 2) + async def test_start_after_resume_process_without_changes(self): + resume_token = await self.get_resume_token(invalidate=True) + + async with await self.change_stream( + start_after=resume_token, max_await_time_ms=250 + ) as change_stream: + self.assertIsNone(await change_stream.try_next()) + await self.kill_change_stream_cursor(change_stream) + + await self.watched_collection().insert_one({"_id": 2}) + change = await change_stream.next() + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["fullDocument"], {"_id": 2}) + + +class ProseSpecTestsMixin: + @no_type_check + async def _client_with_listener(self, *commands): + listener = AllowListEventListener(*commands) + client = await AsyncPyMongoTestCase.unmanaged_async_rs_or_single_client( + event_listeners=[listener] + ) + self.addAsyncCleanup(client.close) + return client, listener + + @no_type_check + async def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): + await self.watched_collection().insert_many([{"data": k} for k in range(batch_size)]) + for _ in range(batch_size): + change = await anext(change_stream) + return change + + def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that don't support postBatchResumeToken. Assumes the stream + has never returned any changes if previous_change is None. + """ + if previous_change is None: + agg_cmd = listener.started_events[0] + stage = agg_cmd.command["pipeline"][0]["$changeStream"] + return stage.get("resumeAfter") or stage.get("startAfter") + + return previous_change["_id"] + + def _get_expected_resume_token(self, stream, listener, previous_change=None): + """Predicts what the resume token should currently be for server + versions that support postBatchResumeToken. Assumes the stream has + never returned any changes if previous_change is None. Assumes + listener is a AllowListEventListener that listens for aggregate and + getMore commands. + """ + if previous_change is None or stream._cursor._has_next(): + token = self._get_expected_resume_token_legacy(stream, listener, previous_change) + if token is not None: + return token + + response = listener.succeeded_events[-1].reply + return response["cursor"]["postBatchResumeToken"] + + @no_type_check + async def _test_raises_error_on_missing_id(self, expected_exception): + """AsyncChangeStream will raise an exception if the server response is + missing the resume token. + """ + async with await self.change_stream([{"$project": {"_id": 0}}]) as change_stream: + await self.watched_collection().insert_one({}) + with self.assertRaises(expected_exception): + await anext(change_stream) + # The cursor should now be closed. + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + @no_type_check + async def _test_update_resume_token(self, expected_rt_getter): + """AsyncChangeStream must continuously track the last seen resumeToken.""" + client, listener = await self._client_with_listener("aggregate", "getMore") + coll = self.watched_collection(write_concern=WriteConcern("majority")) + async with await self.change_stream_with_client(client) as change_stream: + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener) + ) + for _ in range(3): + await coll.insert_one({}) + change = await anext(change_stream) + self.assertEqual( + change_stream.resume_token, expected_rt_getter(change_stream, listener, change) + ) + + # Prose test no. 1 + @async_client_context.require_version_min(4, 2, 0) + async def test_update_resume_token(self): + await self._test_update_resume_token(self._get_expected_resume_token) + + # Prose test no. 2 + @async_client_context.require_version_min(4, 2, 0) + async def test_raises_error_on_missing_id_418plus(self): + # Server returns an error on 4.1.8+ + await self._test_raises_error_on_missing_id(OperationFailure) + + # Prose test no. 3 + @no_type_check + async def test_resume_on_error(self): + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + await self.kill_change_stream_cursor(change_stream) + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 4 + @no_type_check + @async_client_context.require_failCommand_fail_point + async def test_no_resume_attempt_if_aggregate_command_fails(self): + # Set non-retryable error on aggregate command. + fail_point = {"mode": {"times": 1}, "data": {"errorCode": 2, "failCommands": ["aggregate"]}} + client, listener = await self._client_with_listener("aggregate", "getMore") + async with self.fail_point(fail_point): + try: + _ = await self.change_stream_with_client(client) + except OperationFailure: + pass + + # Driver should have attempted aggregate command only once. + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") + + # Prose test no. 5 - REMOVED + # Prose test no. 6 - SKIPPED + # Reason: readPreference is not configurable using the watch() helpers + # so we can skip this test. Also, PyMongo performs server selection for + # each operation which ensure compliance with this prose test. + + # Prose test no. 7 + @no_type_check + async def test_initial_empty_batch(self): + async with await self.change_stream() as change_stream: + # The first batch should be empty. + self.assertFalse(change_stream._cursor._has_next()) + cursor_id = change_stream._cursor.cursor_id + self.assertTrue(cursor_id) + await self.insert_one_and_check(change_stream, {}) + # Make sure we're still using the same cursor. + self.assertEqual(cursor_id, change_stream._cursor.cursor_id) + + # Prose test no. 8 + @no_type_check + async def test_kill_cursors(self): + def raise_error(): + raise ServerSelectionTimeoutError("mock error") + + async with await self.change_stream() as change_stream: + await self.insert_one_and_check(change_stream, {"_id": 1}) + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + await self.kill_change_stream_cursor(change_stream) + cursor.close = raise_error + await self.insert_one_and_check(change_stream, {"_id": 2}) + + # Prose test no. 10 - SKIPPED + # This test is identical to prose test no. 3. + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_empty_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + self.assertIsNone(await change_stream.try_next()) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[0].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 11 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_exhausted_batch(self): + client, listener = await self._client_with_listener("getMore") + async with await self.change_stream_with_client(client) as change_stream: + await self._populate_and_exhaust_change_stream(change_stream) + resume_token = change_stream.resume_token + + response = listener.succeeded_events[-1].reply + self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) + + # Prose test no. 13 + @no_type_check + async def test_resumetoken_partially_iterated_batch(self): + # When batch has been iterated up to but not including the last element. + # Resume token should be _id of previous change document. + async with await self.change_stream() as change_stream: + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"data": k} for k in range(3)] + ) + for _ in range(2): + change = await anext(change_stream) + resume_token = change_stream.resume_token + + self.assertEqual(resume_token, change["_id"]) + + @no_type_check + async def _test_resumetoken_uniterated_nonempty_batch(self, resume_option): + # When the batch is not empty and hasn't been iterated at all. + # Resume token should be same as the resume option used. + resume_point = await self.get_resume_token() + + # Insert some documents so that firstBatch isn't empty. + await self.watched_collection(write_concern=WriteConcern("majority")).insert_many( + [{"a": 1}, {"b": 2}, {"c": 3}] + ) + + # Resume token should be same as the resume option. + async with await self.change_stream(**{resume_option: resume_point}) as change_stream: + self.assertTrue(change_stream._cursor._has_next()) + resume_token = change_stream.resume_token + self.assertEqual(resume_token, resume_point) + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + async def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("resume_after") + + # Prose test no. 14 + @no_type_check + @async_client_context.require_no_mongos + @async_client_context.require_version_min(4, 2, 0) + async def test_resumetoken_uniterated_nonempty_batch_startafter(self): + await self._test_resumetoken_uniterated_nonempty_batch("start_after") + + # Prose test no. 17 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_startafter_resume_uses_startafter_after_empty_getMore(self): + # Resume should use startAfter after no changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await change_stream.try_next() # No changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 18 + @no_type_check + @async_client_context.require_version_min(4, 2, 0) + async def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): + # Resume should use resumeAfter after some changes have been returned. + resume_point = await self.get_resume_token() + + client, listener = await self._client_with_listener("aggregate") + async with await self.change_stream_with_client( + client, start_after=resume_point + ) as change_stream: + self.assertFalse(change_stream._cursor._has_next()) # No changes + await self.watched_collection().insert_one({}) + await anext(change_stream) # Changes + await self.kill_change_stream_cursor(change_stream) + await change_stream.try_next() # Resume attempt + + response = listener.started_events[-1] + self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) + self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + + # Prose test no. 19 + @no_type_check + async def test_split_large_change(self): + server_version = async_client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + await self.db.drop_collection("test_split_large_change") + coll = await self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + await coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + async with await coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + await coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = await change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = await change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + + +class TestClusterAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + dbs: list + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.dbs = [self.db, self.client.pymongo_test_2] + + async def asyncTearDown(self): + for db in self.dbs: + await self.client.drop_database(db) + await super().asyncTearDown() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client.watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + self.skipTest("cluster-level change streams cannot be invalidated") + + async def _test_get_invalidate_event(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _test_invalidate_stops_iteration(self, change_stream): + # Cluster-level change streams don't get invalidated. + pass + + async def _insert_and_check(self, change_stream, db, collname, doc): + coll = db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + db = random.choice(self.dbs) + collname = self.id() + await self._insert_and_check(change_stream, db, collname, doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for db, collname in product(self.dbs, collnames): + await self._insert_and_check(change_stream, db, collname, {"_id": collname}) + + @async_client_context.require_sync + async def test_aggregate_cursor_blocks(self): + """Test that an aggregate cursor blocks until a change is readable.""" + async with await self.client.admin.aggregate( + [{"$changeStream": {"allChangesForCluster": True}}], maxAwaitTimeMS=250 + ) as change_stream: + self._test_next_blocks(change_stream) + + async def test_full_pipeline(self): + """$changeStream must be the first stage in a change stream pipeline + sent to the server. + """ + await self._test_full_pipeline({"allChangesForCluster": True}) + + +class TestAsyncDatabaseAsyncChangeStream(TestAsyncChangeStreamBase, APITestsMixin): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + async def change_stream_with_client(self, client, *args, **kwargs): + return await client[self.db.name].watch(*args, **kwargs) + + async def generate_invalidate_event(self, change_stream): + # Dropping the database invalidates the change stream. + await change_stream._client.drop_database(self.db.name) + + async def _test_get_invalidate_event(self, change_stream): + # Cache collection names. + dropped_colls = await self.db.list_collection_names() + # Drop the watched database to get an invalidate event. + await self.generate_invalidate_event(change_stream) + change = await change_stream.next() + # 4.1+ returns "drop" events for each collection in dropped database + # and a "dropDatabase" event for the database itself. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + for _ in range(len(dropped_colls)): + ns = change["ns"] + self.assertEqual(ns["db"], change_stream._target.name) + self.assertIn(ns["coll"], dropped_colls) + change = await change_stream.next() + self.assertEqual(change["operationType"], "dropDatabase") + self.assertTrue(change["_id"]) + self.assertEqual(change["ns"], {"db": change_stream._target.name}) + # Get next change. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def _test_invalidate_stops_iteration(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._client.drop_database(self.db.name) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "dropDatabase", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _insert_and_check(self, change_stream, collname, doc): + coll = self.db[collname] + await coll.insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"], {"db": self.db.name, "coll": collname}) + self.assertEqual(change["fullDocument"], doc) + + async def insert_one_and_check(self, change_stream, doc): + await self._insert_and_check(change_stream, self.id(), doc) + + async def test_simple(self): + collnames = self.generate_unique_collnames(3) + async with await self.change_stream() as change_stream: + for collname in collnames: + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + + async def test_isolation(self): + # Ensure inserts to other dbs don't show up in our AsyncChangeStream. + other_db = self.client.pymongo_test_temp + self.assertNotEqual(other_db, self.db, msg="Isolation must be tested on separate DBs") + collname = self.id() + async with await self.change_stream() as change_stream: + await other_db[collname].insert_one({"_id": Binary.from_uuid(uuid.uuid4())}) + await self._insert_and_check( + change_stream, collname, {"_id": Binary.from_uuid(uuid.uuid4())} + ) + await self.client.drop_database(other_db) + + +class TestAsyncCollectionAsyncChangeStream( + TestAsyncChangeStreamBase, APITestsMixin, ProseSpecTestsMixin +): + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + # Use a new collection for each test. + await self.watched_collection().drop() + await self.watched_collection().insert_one({}) + + async def change_stream_with_client(self, client, *args, **kwargs): + return ( + await client[self.db.name] + .get_collection(self.watched_collection().name) + .watch(*args, **kwargs) + ) + + async def generate_invalidate_event(self, change_stream): + # Dropping the collection invalidates the change stream. + await change_stream._target.drop() + + async def _test_invalidate_stops_iteration(self, change_stream): + await self.generate_invalidate_event(change_stream) + # Check drop and dropDatabase events. + async for change in change_stream: + self.assertIn(change["operationType"], ("drop", "invalidate")) + # Last change must be invalidate. + self.assertEqual(change["operationType"], "invalidate") + # Change stream must not allow further iteration. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + with self.assertRaises(StopAsyncIteration): + await anext(change_stream) + + async def _test_get_invalidate_event(self, change_stream): + # Drop the watched database to get an invalidate event. + await change_stream._target.drop() + change = await change_stream.next() + # 4.1+ returns a "drop" change document. + if change["operationType"] == "drop": + self.assertTrue(change["_id"]) + self.assertEqual( + change["ns"], + {"db": change_stream._target.database.name, "coll": change_stream._target.name}, + ) + # Last change should be invalidate. + change = await change_stream.next() + self.assertTrue(change["_id"]) + self.assertEqual(change["operationType"], "invalidate") + self.assertNotIn("ns", change) + self.assertNotIn("fullDocument", change) + # The AsyncChangeStream should be dead. + with self.assertRaises(StopAsyncIteration): + await change_stream.next() + + async def insert_one_and_check(self, change_stream, doc): + await self.watched_collection().insert_one(doc) + change = await anext(change_stream) + self.assertEqual(change["operationType"], "insert") + self.assertEqual( + change["ns"], + {"db": self.watched_collection().database.name, "coll": self.watched_collection().name}, + ) + self.assertEqual(change["fullDocument"], doc) + + async def test_raw(self): + """Test with RawBSONDocument.""" + raw_coll = self.watched_collection(codec_options=DEFAULT_RAW_BSON_OPTIONS) + async with await raw_coll.watch() as change_stream: + raw_doc = RawBSONDocument(encode({"_id": 1})) + await self.watched_collection().insert_one(raw_doc) + change = await anext(change_stream) + self.assertIsInstance(change, RawBSONDocument) + self.assertEqual(change["operationType"], "insert") + self.assertEqual(change["ns"]["db"], self.watched_collection().database.name) + self.assertEqual(change["ns"]["coll"], self.watched_collection().name) + self.assertEqual(change["fullDocument"], raw_doc) + + @async_client_context.require_version_min(4, 0) # Needed for start_at_operation_time. + async def test_uuid_representations(self): + """Test with uuid document _ids and different uuid_representation.""" + optime = (await self.db.command("ping"))["operationTime"] + await self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) + for uuid_representation in ALL_UUID_REPRESENTATIONS: + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch( + start_at_operation_time=optime, max_await_time_ms=1 + ) as change_stream: + _ = await change_stream.next() + resume_token_1 = change_stream.resume_token + _ = await change_stream.next() + resume_token_2 = change_stream.resume_token + + # Should not error. + async with await coll.watch(resume_after=resume_token_1): + pass + async with await coll.watch(resume_after=resume_token_2): + pass + + async def test_document_id_order(self): + """Test with document _ids that need their order preserved.""" + random_keys = random.sample(string.ascii_letters, len(string.ascii_letters)) + random_doc = {"_id": SON([(key, key) for key in random_keys])} + for document_class in (dict, SON, RawBSONDocument): + options = self.watched_collection().codec_options.with_options( + document_class=document_class + ) + coll = self.watched_collection(codec_options=options) + async with await coll.watch() as change_stream: + await coll.insert_one(random_doc) + _ = await change_stream.next() + resume_token = change_stream.resume_token + + # The resume token is always a document. + self.assertIsInstance(resume_token, document_class) + # Should not error. + async with await coll.watch(resume_after=resume_token): + pass + await coll.delete_many({}) + + async def test_read_concern(self): + """Test readConcern is not validated by the driver.""" + # Read concern 'local' is not allowed for $changeStream. + coll = self.watched_collection(read_concern=ReadConcern("local")) + with self.assertRaises(OperationFailure): + await coll.watch() + + # Does not error. + coll = self.watched_collection(read_concern=ReadConcern("majority")) + async with await coll.watch(): + pass + + +class TestAllLegacyScenarios(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + listener: AllowListEventListener + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.listener.reset() + + async def asyncSetUpCluster(self, scenario_dict): + assets = [ + (scenario_dict["database_name"], scenario_dict["collection_name"]), + ( + scenario_dict.get("database2_name", "db2"), + scenario_dict.get("collection2_name", "coll2"), + ), + ] + for db, coll in assets: + await self.client.drop_database(db) + await self.client[db].create_collection(coll) + + async def setFailPoint(self, scenario_dict): + fail_point = scenario_dict.get("failPoint") + if fail_point is None: + return + elif not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + fail_cmd = SON([("configureFailPoint", "failCommand")]) + fail_cmd.update(fail_point) + await async_client_context.client.admin.command(fail_cmd) + self.addAsyncCleanup( + async_client_context.client.admin.command, + "configureFailPoint", + fail_cmd["configureFailPoint"], + mode="off", + ) + + def assert_list_contents_are_subset(self, superlist, sublist): + """Check that each element in sublist is a subset of the corresponding + element in superlist. + """ + self.assertEqual(len(superlist), len(sublist)) + for sup, sub in zip(superlist, sublist): + if isinstance(sub, dict): + self.assert_dict_is_subset(sup, sub) + continue + if isinstance(sub, (list, tuple)): + self.assert_list_contents_are_subset(sup, sub) + continue + self.assertEqual(sup, sub) + + def assert_dict_is_subset(self, superdict, subdict): + """Check that subdict is a subset of superdict.""" + exempt_fields = ["documentKey", "_id", "getMore"] + for key, value in subdict.items(): + if key not in superdict: + self.fail(f"Key {key} not found in {superdict}") + if isinstance(value, dict): + self.assert_dict_is_subset(superdict[key], value) + continue + if isinstance(value, (list, tuple)): + self.assert_list_contents_are_subset(superdict[key], value) + continue + if key in exempt_fields: + # Only check for presence of these exempt fields, but not value. + self.assertIn(key, superdict) + else: + self.assertEqual(superdict[key], value) + + def check_event(self, event, expectation_dict): + if event is None: + self.fail() + for key, value in expectation_dict.items(): + if isinstance(value, dict): + self.assert_dict_is_subset(getattr(event, key), value) + else: + self.assertEqual(getattr(event, key), value) + + def asyncTearDown(self): + self.listener.reset() + + +_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") + + +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_client.py b/test/asynchronous/test_client.py new file mode 100644 index 0000000000..6794605339 --- /dev/null +++ b/test/asynchronous/test_client.py @@ -0,0 +1,2700 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the mongo_client module.""" +from __future__ import annotations + +import _thread as thread +import asyncio +import base64 +import contextlib +import copy +import datetime +import gc +import logging +import os +import re +import signal +import socket +import struct +import subprocess +import sys +import threading +import time +import uuid +from typing import Any, Iterable, Type, no_type_check +from unittest import mock, skipIf +from unittest.mock import patch + +import pytest +import pytest_asyncio + +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import ( + HAVE_IPADDRESS, + AsyncIntegrationTest, + AsyncMockClientTest, + AsyncUnitTest, + SkipTest, + async_client_context, + client_knobs, + connected, + db_pwd, + db_user, + remove_all_users, + unittest, +) +from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.asynchronous.utils import ( + async_get_pool, + async_wait_until, + asyncAssertRaisesExactly, +) +from test.test_binary import BinaryData +from test.utils_shared import ( + NTHREADS, + CMAPListener, + FunctionCallRecorder, + delay, + gevent_monkey_patched, + is_greenthread_patched, + lazy_client_trial, + one, +) + +import bson +import pymongo +from bson import encode +from bson.codec_options import ( + CodecOptions, + DatetimeConversion, + TypeEncoder, + TypeRegistry, +) +from bson.son import SON +from bson.tz_util import utc +from pymongo import event_loggers, message, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor, CursorType +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.asynchronous.pool import ( + AsyncConnection, +) +from pymongo.asynchronous.settings import TOPOLOGY_TYPE +from pymongo.asynchronous.topology import _ErrorContext +from pymongo.client_options import ClientOptions +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, MIN_SUPPORTED_WIRE_VERSION, has_c +from pymongo.compression_support import _have_snappy, _have_zstd +from pymongo.driver_info import DriverInfo +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + ConnectionFailure, + InvalidName, + InvalidOperation, + InvalidURI, + NetworkTimeout, + OperationFailure, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent +from pymongo.pool_options import _MAX_METADATA_SIZE, _METADATA, ENV_VAR_K8S, PoolOptions +from pymongo.read_preferences import ReadPreference +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import readable_server_selector, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncClientUnitTest(AsyncUnitTest): + """AsyncMongoClient tests that don't require a server.""" + + client: AsyncMongoClient + + async def asyncSetUp(self) -> None: + self.client = await self.async_rs_or_single_client( + connect=False, serverSelectionTimeoutMS=100 + ) + + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog + + async def test_keyword_arg_defaults(self): + client = self.simple_client( + socketTimeoutMS=None, + connectTimeoutMS=20000, + waitQueueTimeoutMS=None, + replicaSet=None, + read_preference=ReadPreference.PRIMARY, + ssl=False, + tlsCertificateKeyFile=None, + tlsAllowInvalidCertificates=True, + tlsCAFile=None, + connect=False, + serverSelectionTimeoutMS=12000, + ) + + options = client.options + pool_opts = options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + # socket.Socket.settimeout takes a float in seconds + self.assertEqual(20.0, pool_opts.connect_timeout) + self.assertEqual(None, pool_opts.wait_queue_timeout) + self.assertEqual(None, pool_opts._ssl_context) + self.assertEqual(None, options.replica_set_name) + self.assertEqual(ReadPreference.PRIMARY, client.read_preference) + self.assertAlmostEqual(12, client.options.server_selection_timeout) + + async def test_connect_timeout(self): + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + client = self.simple_client( + "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False + ) + pool_opts = client.options.pool_options + self.assertEqual(None, pool_opts.socket_timeout) + self.assertEqual(None, pool_opts.connect_timeout) + + def test_types(self): + self.assertRaises(TypeError, AsyncMongoClient, 1) + self.assertRaises(TypeError, AsyncMongoClient, 1.14) + self.assertRaises(TypeError, AsyncMongoClient, "localhost", "27017") + self.assertRaises(TypeError, AsyncMongoClient, "localhost", 1.14) + self.assertRaises(TypeError, AsyncMongoClient, "localhost", []) + + self.assertRaises(ConfigurationError, AsyncMongoClient, []) + + async def test_max_pool_size_zero(self): + self.simple_client(maxPoolSize=0) + + def test_uri_detection(self): + self.assertRaises(ConfigurationError, AsyncMongoClient, "/foo") + self.assertRaises(ConfigurationError, AsyncMongoClient, "://") + self.assertRaises(ConfigurationError, AsyncMongoClient, "foo/") + + def test_get_db(self): + def make_db(base, name): + return base[name] + + self.assertRaises(InvalidName, make_db, self.client, "") + self.assertRaises(InvalidName, make_db, self.client, "te$t") + self.assertRaises(InvalidName, make_db, self.client, "te.t") + self.assertRaises(InvalidName, make_db, self.client, "te\\t") + self.assertRaises(InvalidName, make_db, self.client, "te/t") + self.assertRaises(InvalidName, make_db, self.client, "te st") + + self.assertIsInstance(self.client.test, AsyncDatabase) + self.assertEqual(self.client.test, self.client["test"]) + self.assertEqual(self.client.test, AsyncDatabase(self.client, "test")) + + def test_get_database(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = self.client.get_database("foo", codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + def test_getattr(self): + self.assertIsInstance(self.client["_does_not_exist"], AsyncDatabase) + + with self.assertRaises(AttributeError) as context: + self.client._does_not_exist + + # Message should be: + # "AttributeError: AsyncMongoClient has no attribute '_does_not_exist'. To + # access the _does_not_exist database, use client['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + def test_iteration(self): + client = self.client + msg = "'AsyncMongoClient' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in client: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = client[0] + # next fails + with self.assertRaisesRegex(TypeError, "'AsyncMongoClient' object is not iterable"): + _ = next(client) + # .next() fails + with self.assertRaisesRegex(TypeError, "'AsyncMongoClient' object is not iterable"): + _ = client.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(client, Iterable) + + async def test_get_default_database(self): + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/foo" + % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database()) + # Test that default doesn't override the URI value. + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database("bar")) + + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + db = c.get_default_database(None, codec_options, ReadPreference.SECONDARY, write_concern) + self.assertEqual("foo", db.name) + self.assertEqual(codec_options, db.codec_options) + self.assertEqual(ReadPreference.SECONDARY, db.read_preference) + self.assertEqual(write_concern, db.write_concern) + + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database("foo")) + + async def test_get_default_database_error(self): + # URI with no database. + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_default_database) + + async def test_get_default_database_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + await async_client_context.host, + await async_client_context.port, + ) + c = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_default_database()) + + async def test_get_database_default(self): + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/foo" + % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_database()) + + async def test_get_database_default_error(self): + # URI with no database. + c = await self.async_rs_or_single_client( + "mongodb://%s:%d/" % (await async_client_context.host, await async_client_context.port), + connect=False, + ) + self.assertRaises(ConfigurationError, c.get_database) + + async def test_get_database_default_with_authsource(self): + # Ensure we distinguish database name from authSource. + uri = "mongodb://%s:%d/foo?authSource=src" % ( + await async_client_context.host, + await async_client_context.port, + ) + c = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(AsyncDatabase(c, "foo"), c.get_database()) + + async def test_primary_read_pref_with_tags(self): + # No tags allowed with "primary". + with self.assertRaises(ConfigurationError): + await self.async_single_client("mongodb://host/?readpreferencetags=dc:east") + + with self.assertRaises(ConfigurationError): + await self.async_single_client( + "mongodb://host/?readpreference=primary&readpreferencetags=dc:east" + ) + + async def test_read_preference(self): + c = await self.async_rs_or_single_client( + "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode + ) + self.assertEqual(c.read_preference, ReadPreference.NEAREST) + + async def test_metadata(self): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" + metadata["application"] = {"name": "foobar"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + client = self.simple_client("foo", 27017, appname="foobar", connect=False) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # No error + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) + # Bad "driver" options. + self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") + self.assertRaises(TypeError, DriverInfo, version="1", platform="a") + self.assertRaises(TypeError, DriverInfo) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) + # Test appending to driver info. + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|async|FooDriver" + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", None), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) + client = self.simple_client( + "foo", + 27017, + appname="foobar", + driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), + connect=False, + ) + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + # Test truncating driver info metadata. + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + + @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) + def test_container_metadata(self): + metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo|async" + metadata["env"] = {} + metadata["env"]["container"] = {"orchestrator": "kubernetes"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) + + async def test_kwargs_codec_options(self): + class MyFloatType: + def __init__(self, x): + self.__x = x + + @property + def x(self): + return self.__x + + class MyFloatAsIntEncoder(TypeEncoder): + python_type = MyFloatType + + def transform_python(self, value): + return int(value) + + # Ensure codec options are passed in correctly + document_class: Type[SON] = SON + type_registry = TypeRegistry([MyFloatAsIntEncoder()]) + tz_aware = True + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + tzinfo = utc + c = self.simple_client( + document_class=document_class, + type_registry=type_registry, + tz_aware=tz_aware, + uuidrepresentation=uuid_representation_label, + unicode_decode_error_handler=unicode_decode_error_handler, + tzinfo=tzinfo, + connect=False, + ) + self.assertEqual(c.codec_options.document_class, document_class) + self.assertEqual(c.codec_options.type_registry, type_registry) + self.assertEqual(c.codec_options.tz_aware, tz_aware) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual(c.codec_options.tzinfo, tzinfo) + + async def test_uri_codec_options(self): + # Ensure codec options are passed in correctly + uuid_representation_label = "javaLegacy" + unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" + uri = ( + "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" + "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" + % ( + await async_client_context.host, + await async_client_context.port, + uuid_representation_label, + unicode_decode_error_handler, + datetime_conversion, + ) + ) + c = self.simple_client(uri, connect=False) + self.assertEqual(c.codec_options.tz_aware, True) + self.assertEqual( + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], + ) + self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") + c = self.simple_client(uri, connect=False) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + async def test_uri_option_precedence(self): + # Ensure kwarg options override connection string options. + uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" + c = self.simple_client( + uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" + ) + clopts = c.options + opts = clopts._options + + self.assertEqual(opts["tls"], False) + self.assertEqual(clopts.replica_set_name, "newname") + self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) + + async def test_connection_timeout_ms_propagates_to_DNS_resolver(self): + # Patch the resolver. + from pymongo.asynchronous.srv_resolver import _resolve + + patched_resolver = FunctionCallRecorder(_resolve) + pymongo.asynchronous.srv_resolver._resolve = patched_resolver + + def reset_resolver(): + pymongo.asynchronous.srv_resolver._resolve = _resolve + + self.addCleanup(reset_resolver) + + # Setup. + base_uri = "mongodb+srv://test5.test.build.10gen.cc" + connectTimeoutMS = 5000 + expected_kw_value = 5.0 + uri_with_timeout = base_uri + "/?connectTimeoutMS=6000" + expected_uri_value = 6.0 + + async def test_scenario(args, kwargs, expected_value): + patched_resolver.reset() + self.simple_client(*args, **kwargs) + for _, kw in patched_resolver.call_list(): + self.assertAlmostEqual(kw["lifetime"], expected_value) + + # No timeout specified. + await test_scenario((base_uri,), {}, CONNECT_TIMEOUT) + + # Timeout only specified in connection string. + await test_scenario((uri_with_timeout,), {}, expected_uri_value) + + # Timeout only specified in keyword arguments. + kwarg = {"connectTimeoutMS": connectTimeoutMS} + await test_scenario((base_uri,), kwarg, expected_kw_value) + + # Timeout specified in both kwargs and connection string. + await test_scenario((uri_with_timeout,), kwarg, expected_kw_value) + + async def test_uri_security_options(self): + # Ensure that we don't silently override security-related options. + with self.assertRaises(InvalidURI): + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) + + # Matching SSL and TLS options should not cause errors. + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c.options._options["tls"], False) + + # Conflicting tlsInsecure options should raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidHostnames=True, + ) + + # Conflicting legacy tlsInsecure options should also raise an error. + with self.assertRaises(InvalidURI): + self.simple_client( + "mongodb://localhost/?tlsInsecure=true", + connect=False, + tlsAllowInvalidCertificates=False, + ) + + # Conflicting kwargs should raise InvalidURI + with self.assertRaises(InvalidURI): + self.simple_client(ssl=True, tls=False) + + async def test_event_listeners(self): + c = self.simple_client(event_listeners=[], connect=False) + self.assertEqual(c.options.event_listeners, []) + listeners = [ + event_loggers.CommandLogger(), + event_loggers.HeartbeatLogger(), + event_loggers.ServerLogger(), + event_loggers.TopologyLogger(), + event_loggers.ConnectionPoolLogger(), + ] + c = self.simple_client(event_listeners=listeners, connect=False) + self.assertEqual(c.options.event_listeners, listeners) + + async def test_client_options(self): + c = self.simple_client(connect=False) + self.assertIsInstance(c.options, ClientOptions) + self.assertIsInstance(c.options.pool_options, PoolOptions) + self.assertEqual(c.options.server_selection_timeout, 30) + self.assertEqual(c.options.pool_options.max_idle_time_seconds, None) + self.assertIsInstance(c.options.retry_writes, bool) + self.assertIsInstance(c.options.retry_reads, bool) + + def test_validate_suggestion(self): + """Validate kwargs in constructor.""" + for typo in ["auth", "Auth", "AUTH"]: + expected = f"Unknown option: {typo}. Did you mean one of (authsource, authmechanism, authoidcallowedhosts) or maybe a camelCase version of one? Refer to docstring." + expected = re.escape(expected) + with self.assertRaisesRegex(ConfigurationError, expected): + AsyncMongoClient(**{typo: "standard"}) # type: ignore[arg-type] + + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_logging(self, mock_get_hosts): + normal_hosts = [ + "normal.host.com", + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + with self.assertLogs("pymongo", level="INFO") as cm: + for host in normal_hosts: + AsyncMongoClient(host, connect=False) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + AsyncMongoClient(host, connect=False) + AsyncMongoClient(multi_host, connect=False) + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] + self.assertEqual(len(logs), 7) + + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") + @patch("pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts") + async def test_detected_environment_warning(self, mock_get_hosts): + with self._caplog.at_level(logging.WARN): + normal_hosts = [ + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + for host in normal_hosts: + with self.assertWarns(UserWarning): + self.simple_client(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + with self.assertWarns(UserWarning): + self.simple_client(host) + with self.assertWarns(UserWarning): + self.simple_client(multi_host) + + +class TestClient(AsyncIntegrationTest): + def test_multiple_uris(self): + with self.assertRaises(ConfigurationError): + AsyncMongoClient( + host=[ + "mongodb+srv://cluster-a.abc12.mongodb.net", + "mongodb+srv://cluster-b.abc12.mongodb.net", + "mongodb+srv://cluster-c.abc12.mongodb.net", + ] + ) + + async def test_max_idle_time_reaper_default(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper doesn't remove connections when maxIdleTimeMS not set + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + self.assertIn(conn, server._pool.conns) + + async def test_max_idle_time_reaper_removes_stale_minPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper removes idle socket and replaces it with a new one + client = await self.async_rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, two + # connections could be created and checked into the pool. + self.assertGreaterEqual(len(server._pool.conns), 1) + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") + + async def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper respects maxPoolSize when adding new connections. + client = await self.async_rs_or_single_client( + maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1 + ) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + # When the reaper runs at the same time as the get_socket, + # maxPoolSize=1 should prevent two connections from being created. + self.assertEqual(1, len(server._pool.conns)) + await async_wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + await async_wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") + + async def test_max_idle_time_reaper_removes_stale(self): + with client_knobs(kill_cursor_frequency=0.1): + # Assert reaper has removed idle socket and NOT replaced it + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn_one: + pass + # Assert that the pool does not close connections prematurely. + await asyncio.sleep(0.300) + async with server._pool.checkout() as conn_two: + pass + self.assertIs(conn_one, conn_two) + await async_wait_until( + lambda: len(server._pool.conns) == 0, + "stale socket reaped and new one NOT added to the pool", + ) + + async def test_min_pool_size(self): + with client_knobs(kill_cursor_frequency=0.1): + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + self.assertEqual(0, len(server._pool.conns)) + + # Assert that pool started up at minPoolSize + client = await self.async_rs_or_single_client(minPoolSize=10) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + # Assert that if a socket is closed, a new one takes its place + async with server._pool.checkout() as conn: + await conn.close_conn(None) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "a closed socket gets replaced from the pool", + ) + self.assertNotIn(conn, server._pool.conns) + + async def test_max_idle_time_checkout(self): + # Use high frequency to test _get_socket_no_auth. + with client_knobs(kill_cursor_frequency=99999999): + client = await self.async_rs_or_single_client(maxIdleTimeMS=500) + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + await asyncio.sleep(1) # Sleep so that the socket becomes stale. + + async with server._pool.checkout() as new_con: + self.assertNotEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) + + # Test that connections are reused if maxIdleTimeMS is not set. + client = await self.async_rs_or_single_client() + server = await (await client._get_topology()).select_server( + readable_server_selector, _Op.TEST + ) + async with server._pool.checkout() as conn: + pass + self.assertEqual(1, len(server._pool.conns)) + await asyncio.sleep(1) + async with server._pool.checkout() as new_con: + self.assertEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + + async def test_constants(self): + """This test uses AsyncMongoClient explicitly to make sure that host and + port are not overloaded. + """ + host, port = await async_client_context.host, await async_client_context.port + kwargs: dict = async_client_context.default_client_options.copy() + if async_client_context.auth_enabled: + kwargs["username"] = db_user + kwargs["password"] = db_pwd + + # Set bad defaults. + AsyncMongoClient.HOST = "somedomainthatdoesntexist.org" + AsyncMongoClient.PORT = 123456789 + with self.assertRaises(AutoReconnect): + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + await connected(c) + + c = self.simple_client(host, port, **kwargs) + # Override the defaults. No error. + await connected(c) + + # Set good defaults. + AsyncMongoClient.HOST = host + AsyncMongoClient.PORT = port + + # No error. + c = self.simple_client(**kwargs) + await connected(c) + + async def test_init_disconnected(self): + host, port = await async_client_context.host, await async_client_context.port + c = await self.async_rs_or_single_client(connect=False) + # is_primary causes client to block until connected + self.assertIsInstance(await c.is_primary, bool) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(await c.is_mongos, bool) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(c.options.pool_options.max_pool_size, int) + self.assertIsInstance(c.nodes, frozenset) + + c = await self.async_rs_or_single_client(connect=False) + self.assertEqual(c.codec_options, CodecOptions()) + c = await self.async_rs_or_single_client(connect=False) + self.assertFalse(await c.primary) + self.assertFalse(await c.secondaries) + c = await self.async_rs_or_single_client(connect=False) + self.assertIsInstance(c.topology_description, TopologyDescription) + self.assertEqual(c.topology_description, c._topology._description) + if async_client_context.is_rs: + # The primary's host and port are from the replica set config. + self.assertIsNotNone(await c.address) + else: + self.assertEqual(await c.address, (host, port)) + + bad_host = "somedomainthatdoesntexist.org" + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + await c.pymongo_test.test.find_one() + + async def test_init_disconnected_with_auth(self): + uri = "mongodb://user:pass@somedomainthatdoesntexist" + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + await c.pymongo_test.test.find_one() + + @async_client_context.require_replica_set + @async_client_context.require_no_load_balancer + @async_client_context.require_tls + async def test_init_disconnected_with_srv(self): + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) + + # address causes client to block until connected + self.assertIsNotNone(await c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + await c.primary + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + await c.secondaries + self.assertIsNotNone(c._topology) + await c.close() + + c = await self.async_rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + await c.arbiters + self.assertIsNotNone(c._topology) + + async def test_equality(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = await self.async_rs_or_single_client(seed, connect=False) + self.assertEqual(async_client_context.client, c) + # Explicitly test inequality + self.assertFalse(async_client_context.client != c) + + c = await self.async_rs_or_single_client("invalid.com", connect=False) + self.assertNotEqual(async_client_context.client, c) + self.assertTrue(async_client_context.client != c) + + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) + + # Seeds differ: + self.assertNotEqual(c1, c2) + + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) + + # Same seeds but out of order still compares equal: + self.assertEqual(c1, c2) + + async def test_hashable(self): + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = await self.async_rs_or_single_client(seed, connect=False) + self.assertIn(c, {async_client_context.client}) + c = await self.async_rs_or_single_client("invalid.com", connect=False) + self.assertNotIn(c, {async_client_context.client}) + + async def test_host_w_port(self): + with self.assertRaises(ValueError): + host = await async_client_context.host + await connected( + AsyncMongoClient( + f"{host}:1234567", + connectTimeoutMS=1, + serverSelectionTimeoutMS=10, + ) + ) + + async def test_repr(self): + # Used to test 'eval' below. + import bson + + client = AsyncMongoClient( # type: ignore[type-var] + "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" + "&connectTimeoutMS=12345&w=1&wtimeoutms=100", + connect=False, + document_class=SON, + ) + + the_repr = repr(client) + self.assertIn("AsyncMongoClient(host=", the_repr) + self.assertIn("document_class=bson.son.SON, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + client = self.simple_client( + "localhost:27017,localhost:27018", + replicaSet="replset", + connectTimeoutMS=12345, + socketTimeoutMS=None, + w=1, + wtimeoutms=100, + connect=False, + ) + the_repr = repr(client) + self.assertIn("AsyncMongoClient(host=", the_repr) + self.assertIn("document_class=dict, tz_aware=False, connect=False, ", the_repr) + self.assertIn("connecttimeoutms=12345", the_repr) + self.assertIn("replicaset='replset'", the_repr) + self.assertIn("sockettimeoutms=None", the_repr) + self.assertIn("w=1", the_repr) + self.assertIn("wtimeoutms=100", the_repr) + + async with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + async def test_repr_srv_host(self): + client = AsyncMongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + await client.aconnect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + await client.close() + + async def test_getters(self): + await async_wait_until( + lambda: async_client_context.nodes == self.client.nodes, "find all nodes" + ) + + async def test_list_databases(self): + cmd_docs = (await self.client.admin.command("listDatabases"))["databases"] + cursor = await self.client.list_databases() + self.assertIsInstance(cursor, AsyncCommandCursor) + helper_docs = await cursor.to_list() + self.assertGreater(len(helper_docs), 0) + self.assertEqual(len(helper_docs), len(cmd_docs)) + # PYTHON-3529 Some fields may change between calls, just compare names. + for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): + self.assertIs(type(helper_doc), dict) + self.assertEqual(helper_doc.keys(), cmd_doc.keys()) + client = await self.async_rs_or_single_client(document_class=SON) + async for doc in await client.list_databases(): + self.assertIs(type(doc), dict) + + await self.client.pymongo_test.test.insert_one({}) + cursor = await self.client.list_databases(filter={"name": "admin"}) + docs = await cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(docs[0]["name"], "admin") + + cursor = await self.client.list_databases(nameOnly=True) + async for doc in cursor: + self.assertEqual(["name"], list(doc)) + + async def test_list_database_names(self): + await self.client.pymongo_test.test.insert_one({"dummy": "object"}) + await self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) + cmd_docs = (await self.client.admin.command("listDatabases"))["databases"] + cmd_names = [doc["name"] for doc in cmd_docs] + + db_names = await self.client.list_database_names() + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) + self.assertEqual(db_names, cmd_names) + + async def test_drop_database(self): + with self.assertRaises(TypeError): + await self.client.drop_database(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await self.client.drop_database(None) # type: ignore[arg-type] + + await self.client.pymongo_test.test.insert_one({"dummy": "object"}) + await self.client.pymongo_test2.test.insert_one({"dummy": "object"}) + dbs = await self.client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test2", dbs) + await self.client.drop_database("pymongo_test") + + if async_client_context.is_rs: + wc_client = await self.async_rs_or_single_client(w=len(async_client_context.nodes) + 1) + with self.assertRaises(WriteConcernError): + await wc_client.drop_database("pymongo_test2") + + await self.client.drop_database(self.client.pymongo_test2) + dbs = await self.client.list_database_names() + self.assertNotIn("pymongo_test", dbs) + self.assertNotIn("pymongo_test2", dbs) + + async def test_close(self): + test_client = await self.async_rs_or_single_client() + coll = test_client.pymongo_test.bar + await test_client.close() + with self.assertRaises(InvalidOperation): + await coll.count_documents({}) + + async def test_close_kills_cursors(self): + if sys.platform.startswith("java"): + # We can't figure out how to make this test reliable with Jython. + raise SkipTest("Can't test with Jython") + test_client = await self.async_rs_or_single_client() + # Kill any cursors possibly queued up by previous tests. + gc.collect() + await test_client._process_periodic_tasks() + + # Add some test data. + coll = test_client.pymongo_test.test_close_kills_cursors + docs_inserted = 1000 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + # Open a cursor and leave it open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(await anext(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + + # Open a command cursor and leave it open on the server. + cursor = await coll.aggregate([], batchSize=10) + self.assertTrue(bool(await anext(cursor))) + del cursor + # Required for PyPy, Jython and other Python implementations that + # don't use reference counting garbage collection. + gc.collect() + + # Close the client and ensure the topology is closed. + self.assertTrue(test_client._topology._opened) + await test_client.close() + self.assertFalse(test_client._topology._opened) + test_client = await self.async_rs_or_single_client() + # The killCursors task should not need to re-open the topology. + await test_client._process_periodic_tasks() + self.assertTrue(test_client._topology._opened) + + async def test_close_stops_kill_cursors_thread(self): + client = await self.async_rs_client() + await client.test.test.find_one() + self.assertFalse(client._kill_cursors_executor._stopped) + + # Closing the client should stop the thread. + await client.close() + self.assertTrue(client._kill_cursors_executor._stopped) + + # Reusing the closed client should raise an InvalidOperation error. + with self.assertRaises(InvalidOperation): + await client.admin.command("ping") + # Thread is still stopped. + self.assertTrue(client._kill_cursors_executor._stopped) + + async def test_uri_connect_option(self): + # Ensure that topology is not opened if connect=False. + client = await self.async_rs_client(connect=False) + self.assertFalse(client._topology._opened) + + # Ensure kill cursors thread has not been started. + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) + # Using the client should open topology and start the thread. + await client.admin.command("ping") + self.assertTrue(client._topology._opened) + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) + + async def test_close_does_not_open_servers(self): + client = await self.async_rs_client(connect=False) + topology = client._topology + self.assertEqual(topology._servers, {}) + await client.close() + self.assertEqual(topology._servers, {}) + + async def test_close_closes_sockets(self): + client = await self.async_rs_client() + await client.test.test.find_one() + topology = client._topology + await client.close() + for server in topology._servers.values(): + self.assertFalse(server._pool.conns) + self.assertTrue(server._monitor._executor._stopped) + self.assertTrue(server._monitor._rtt_monitor._executor._stopped) + self.assertFalse(server._monitor._pool.conns) + self.assertFalse(server._monitor._rtt_monitor._pool.conns) + + def test_bad_uri(self): + with self.assertRaises(InvalidURI): + AsyncMongoClient("http://localhost") + + @async_client_context.require_auth + @async_client_context.require_no_fips + async def test_auth_from_uri(self): + host, port = await async_client_context.host, await async_client_context.port + await async_client_context.create_user("admin", "admin", "pass") + self.addAsyncCleanup(async_client_context.drop_user, "admin", "admin") + self.addAsyncCleanup(remove_all_users, self.client.pymongo_test) + + await async_client_context.create_user( + "pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"] + ) + + with self.assertRaises(OperationFailure): + await connected( + await self.async_rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port)) + ) + + # No error. + await connected( + await self.async_rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + ) + + # Wrong database. + uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) + with self.assertRaises(OperationFailure): + await connected(await self.async_rs_or_single_client_noauth(uri)) + + # No error. + await connected( + await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) + ) + ) + + # Auth with lazy connection. + await ( + await self.async_rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ) + ).pymongo_test.test.find_one() + + # Wrong password. + bad_client = await self.async_rs_or_single_client_noauth( + "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + @async_client_context.require_auth + async def test_username_and_password(self): + await async_client_context.create_user("admin", "ad min", "pa/ss") + self.addAsyncCleanup(async_client_context.drop_user, "admin", "ad min") + + c = await self.async_rs_or_single_client_noauth(username="ad min", password="pa/ss") + + # Username and password aren't in strings that will likely be logged. + self.assertNotIn("ad min", repr(c)) + self.assertNotIn("ad min", str(c)) + self.assertNotIn("pa/ss", repr(c)) + self.assertNotIn("pa/ss", str(c)) + + # Auth succeeds. + await c.server_info() + + with self.assertRaises(OperationFailure): + await ( + await self.async_rs_or_single_client_noauth(username="ad min", password="foo") + ).server_info() + + @async_client_context.require_auth + @async_client_context.require_no_fips + async def test_lazy_auth_raises_operation_failure(self): + host = await async_client_context.host + lazy_client = await self.async_rs_or_single_client_noauth( + f"mongodb://user:wrong@{host}/pymongo_test", connect=False + ) + + await asyncAssertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) + + @async_client_context.require_no_tls + async def test_unix_socket(self): + if not hasattr(socket, "AF_UNIX"): + raise SkipTest("UNIX-sockets are not supported on this system") + + mongodb_socket = "/tmp/mongodb-%d.sock" % (await async_client_context.port,) + encoded_socket = "%2Ftmp%2F" + "mongodb-%d.sock" % (await async_client_context.port,) + if not os.access(mongodb_socket, os.R_OK): + raise SkipTest("Socket file is not accessible") + + uri = "mongodb://%s" % encoded_socket + # Confirm we can do operations via the socket. + client = await self.async_rs_or_single_client(uri) + await client.pymongo_test.test.insert_one({"dummy": "object"}) + dbs = await client.list_database_names() + self.assertIn("pymongo_test", dbs) + + self.assertIn(mongodb_socket, repr(client)) + + # Confirm it fails with a missing socket. + with self.assertRaises(ConnectionFailure): + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 + ) + await connected(c) + + async def test_document_class(self): + c = self.client + db = c.pymongo_test + await db.test.insert_one({"x": 1}) + + self.assertEqual(dict, c.codec_options.document_class) + self.assertIsInstance(await db.test.find_one(), dict) + self.assertNotIsInstance(await db.test.find_one(), SON) + + c = await self.async_rs_or_single_client(document_class=SON) + + db = c.pymongo_test + + self.assertEqual(SON, c.codec_options.document_class) + self.assertIsInstance(await db.test.find_one(), SON) + + async def test_timeouts(self): + client = await self.async_rs_or_single_client( + connectTimeoutMS=10500, + socketTimeoutMS=10500, + maxIdleTimeMS=10500, + serverSelectionTimeoutMS=10500, + ) + self.assertEqual(10.5, (await async_get_pool(client)).opts.connect_timeout) + self.assertEqual(10.5, (await async_get_pool(client)).opts.socket_timeout) + self.assertEqual(10.5, (await async_get_pool(client)).opts.max_idle_time_seconds) + self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) + self.assertEqual(10.5, client.options.server_selection_timeout) + + async def test_socket_timeout_ms_validation(self): + c = await self.async_rs_or_single_client(socketTimeoutMS=10 * 1000) + self.assertEqual(10, (await async_get_pool(c)).opts.socket_timeout) + + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=None)) + self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) + + c = await connected(await self.async_rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, (await async_get_pool(c)).opts.socket_timeout) + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS=-1): + pass + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS=1e10): + pass + + with self.assertRaises(ValueError): + async with await self.async_rs_or_single_client(socketTimeoutMS="foo"): + pass + + async def test_socket_timeout(self): + no_timeout = self.client + timeout_sec = 1 + timeout = await self.async_rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) + + await no_timeout.pymongo_test.drop_collection("test") + await no_timeout.pymongo_test.test.insert_one({"x": 1}) + + # A $where clause that takes a second longer than the timeout + where_func = delay(timeout_sec + 1) + + async def get_x(db): + doc = await anext(db.test.find().where(where_func)) + return doc["x"] + + self.assertEqual(1, await get_x(no_timeout.pymongo_test)) + with self.assertRaises(NetworkTimeout): + await get_x(timeout.pymongo_test) + + async def test_server_selection_timeout(self): + client = AsyncMongoClient(serverSelectionTimeoutMS=100, connect=False) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient(serverSelectionTimeoutMS=0, connect=False) + + self.assertAlmostEqual(0, client.options.server_selection_timeout) + + self.assertRaises( + ValueError, AsyncMongoClient, serverSelectionTimeoutMS="foo", connect=False + ) + self.assertRaises(ValueError, AsyncMongoClient, serverSelectionTimeoutMS=-1, connect=False) + self.assertRaises( + ConfigurationError, AsyncMongoClient, serverSelectionTimeoutMS=None, connect=False + ) + await client.close() + + client = AsyncMongoClient( + "mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False + ) + self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) + await client.close() + + # Test invalid timeout in URI ignored and set to default. + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + await client.close() + + client = AsyncMongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) + self.assertAlmostEqual(30, client.options.server_selection_timeout) + + async def test_waitQueueTimeoutMS(self): + listener = CMAPListener() + client = await self.async_rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = await async_get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + async with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + await client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) + + async def test_socketKeepAlive(self): + pool = await async_get_pool(self.client) + async with pool.checkout() as conn: + keepalive = conn.conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + self.assertTrue(keepalive) + + @no_type_check + async def test_tz_aware(self): + self.assertRaises(ValueError, AsyncMongoClient, tz_aware="foo") + + aware = await self.async_rs_or_single_client(tz_aware=True) + self.addAsyncCleanup(aware.close) + naive = self.client + await aware.pymongo_test.drop_collection("test") + + now = datetime.datetime.now(tz=datetime.timezone.utc) + await aware.pymongo_test.test.insert_one({"x": now}) + + self.assertEqual(None, (await naive.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual(utc, (await aware.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual( + (await aware.pymongo_test.test.find_one())["x"].replace(tzinfo=None), + (await naive.pymongo_test.test.find_one())["x"], + ) + + @async_client_context.require_ipv6 + async def test_ipv6(self): + if async_client_context.tls: + if not HAVE_IPADDRESS: + raise SkipTest("Need the ipaddress module to test with SSL") + + if async_client_context.auth_enabled: + auth_str = f"{db_user}:{db_pwd}@" + else: + auth_str = "" + + uri = "mongodb://%s[::1]:%d" % (auth_str, await async_client_context.port) + if async_client_context.is_rs: + uri += "/?replicaSet=" + (async_client_context.replica_set_name or "") + + client = await self.async_rs_or_single_client_noauth(uri) + await client.pymongo_test.test.insert_one({"dummy": "object"}) + await client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) + + dbs = await client.list_database_names() + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) + + async def test_contextlib(self): + client = await self.async_rs_or_single_client() + await client.pymongo_test.drop_collection("test") + await client.pymongo_test.test.insert_one({"foo": "bar"}) + + # The socket used for the previous commands has been returned to the + # pool + self.assertEqual(1, len((await async_get_pool(client)).conns)) + + # contextlib async support was added in Python 3.10 + if _IS_SYNC or sys.version_info >= (3, 10): + async with contextlib.aclosing(client): + self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + await client.pymongo_test.test.find_one() + client = await self.async_rs_or_single_client() + async with client as client: + self.assertEqual("bar", (await client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + await client.pymongo_test.test.find_one() + + @async_client_context.require_sync + def test_interrupt_signal(self): + if sys.platform.startswith("java"): + # We can't figure out how to raise an exception on a thread that's + # blocked on a socket, whether that's the main thread or a worker, + # without simply killing the whole thread in Jython. This suggests + # PYTHON-294 can't actually occur in Jython. + raise SkipTest("Can't test interrupts in Jython") + if is_greenthread_patched(): + raise SkipTest("Can't reliably test interrupts with green threads") + + # Test fix for PYTHON-294 -- make sure AsyncMongoClient closes its + # socket if it gets an interrupt while waiting to recv() from it. + db = self.client.pymongo_test + + # A $where clause which takes 1.5 sec to execute + where = delay(1.5) + + # Need exactly 1 document so find() will execute its $where clause once + db.drop_collection("foo") + db.foo.insert_one({"_id": 1}) + + old_signal_handler = None + try: + # Platform-specific hacks for raising a KeyboardInterrupt on the + # main thread while find() is in-progress: On Windows, SIGALRM is + # unavailable so we use a second thread. In our Evergreen setup on + # Linux, the thread technique causes an error in the test at + # conn.recv(): TypeError: 'int' object is not callable + # We don't know what causes this, so we hack around it. + + if sys.platform == "win32": + + def interrupter(): + # Raises KeyboardInterrupt in the main thread + time.sleep(0.25) + thread.interrupt_main() + + thread.start_new_thread(interrupter, ()) + else: + # Convert SIGALRM to SIGINT -- it's hard to schedule a SIGINT + # for one second in the future, but easy to schedule SIGALRM. + def sigalarm(num, frame): + raise KeyboardInterrupt + + old_signal_handler = signal.signal(signal.SIGALRM, sigalarm) + signal.alarm(1) + + raised = False + try: + # Will be interrupted by a KeyboardInterrupt. + next(db.foo.find({"$where": where})) # type: ignore[call-overload] + except KeyboardInterrupt: + raised = True + + # Can't use self.assertRaises() because it doesn't catch system + # exceptions + self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") + + # Raises AssertionError due to PYTHON-294 -- Mongo's response to + # the previous find() is still waiting to be read on the socket, + # so the request id's don't match. + self.assertEqual({"_id": 1}, next(db.foo.find())) # type: ignore[call-overload] + finally: + if old_signal_handler: + signal.signal(signal.SIGALRM, old_signal_handler) + + async def test_operation_failure(self): + # Ensure AsyncMongoClient doesn't close socket after it gets an error + # response to getLastError. PYTHON-395. We need a new client here + # to avoid race conditions caused by replica set failover or idle + # socket reaping. + client = await self.async_single_client() + await client.pymongo_test.test.find_one() + pool = await async_get_pool(client) + socket_count = len(pool.conns) + self.assertGreaterEqual(socket_count, 1) + old_conn = next(iter(pool.conns)) + await client.pymongo_test.test.drop() + await client.pymongo_test.test.insert_one({"_id": "foo"}) + with self.assertRaises(OperationFailure): + await client.pymongo_test.test.insert_one({"_id": "foo"}) + + self.assertEqual(socket_count, len(pool.conns)) + new_con = next(iter(pool.conns)) + self.assertEqual(old_conn, new_con) + + async def test_lazy_connect_w0(self): + # Ensure that connect-on-demand works when the first operation is + # an unacknowledged write. This exercises _writable_max_wire_version(). + + # Use a separate collection to avoid races where we're still + # completing an operation on a collection while the next test begins. + await async_client_context.client.drop_database("test_lazy_connect_w0") + self.addAsyncCleanup(async_client_context.client.drop_database, "test_lazy_connect_w0") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.insert_one({}) + + async def predicate(): + return await client.test_lazy_connect_w0.test.count_documents({}) == 1 + + await async_wait_until(predicate, "find one document") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) + + async def predicate(): + return (await client.test_lazy_connect_w0.test.find_one()).get("x") == 1 + + await async_wait_until(predicate, "update one document") + + client = await self.async_rs_or_single_client(connect=False, w=0) + await client.test_lazy_connect_w0.test.delete_one({}) + + async def predicate(): + return await client.test_lazy_connect_w0.test.count_documents({}) == 0 + + await async_wait_until(predicate, "delete one document") + + @async_client_context.require_no_mongos + async def test_exhaust_network_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = await self.async_rs_or_single_client(maxPoolSize=1, retryReads=False) + collection = client.pymongo_test.test + pool = await async_get_pool(client) + pool._check_interval_seconds = None # Never check. + + # Ensure a socket. + await connected(client) + + # Cause a network error. + conn = one(pool.conns) + await conn.conn.close() + cursor = collection.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(ConnectionFailure): + await anext(cursor) + + self.assertTrue(conn.closed) + + # The semaphore was decremented despite the error. + self.assertEqual(0, pool.requests) + + @async_client_context.require_auth + async def test_auth_network_error(self): + # Make sure there's no semaphore leak if we get a network error + # when authenticating a new socket with cached credentials. + + # Get a client with one socket so we detect if it's leaked. + c = await connected( + await self.async_rs_or_single_client( + maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False + ) + ) + + # Cause a network error on the actual socket. + pool = await async_get_pool(c) + conn = one(pool.conns) + await conn.conn.close() + + # AsyncConnection.authenticate logs, but gets a socket.error. Should be + # reraised as AutoReconnect. + with self.assertRaises(AutoReconnect): + await c.test.collection.find_one() + + # No semaphore leak, the pool is allowed to make a new socket. + await c.test.collection.find_one() + + @async_client_context.require_no_replica_set + async def test_connect_to_standalone_using_replica_set_name(self): + client = await self.async_single_client(replicaSet="anything", serverSelectionTimeoutMS=100) + with self.assertRaises(AutoReconnect): + await client.test.test.find_one() + + @async_client_context.require_replica_set + async def test_stale_getmore(self): + # A cursor is created, but its member goes down and is removed from + # the topology before the getMore message is sent. Test that + # AsyncMongoClient._run_operation_with_response handles the error. + with self.assertRaises(AutoReconnect): + client = await self.async_rs_client(connect=False, serverSelectionTimeoutMS=100) + await client._run_operation( + operation=message._GetMore( + "pymongo_test", + "collection", + 101, + 1234, + client.codec_options, + ReadPreference.PRIMARY, + None, + client, + None, + None, + False, + None, + ), + unpack_res=AsyncCursor(client.pymongo_test.collection)._unpack_response, + address=("not-a-member", 27017), + ) + + async def test_heartbeat_frequency_ms(self): + class HeartbeatStartedListener(ServerHeartbeatListener): + def __init__(self): + self.results = [] + + def started(self, event): + self.results.append(event) + + def succeeded(self, event): + pass + + def failed(self, event): + pass + + old_init = ServerHeartbeatStartedEvent.__init__ + heartbeat_times = [] + + def init(self, *args): + old_init(self, *args) + heartbeat_times.append(time.time()) + + try: + ServerHeartbeatStartedEvent.__init__ = init # type: ignore + listener = HeartbeatStartedListener() + uri = "mongodb://%s:%d/?heartbeatFrequencyMS=500" % ( + await async_client_context.host, + await async_client_context.port, + ) + await self.async_single_client(uri, event_listeners=[listener]) + await async_wait_until( + lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" + ) + + # Default heartbeatFrequencyMS is 10 sec. Check the interval was + # closer to 0.5 sec with heartbeatFrequencyMS configured. + self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) + + finally: + ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore + + def test_small_heartbeat_frequency_ms(self): + uri = "mongodb://example/?heartbeatFrequencyMS=499" + with self.assertRaises(ConfigurationError) as context: + AsyncMongoClient(uri) + + self.assertIn("heartbeatFrequencyMS", str(context.exception)) + + async def test_compression(self): + def compression_settings(client): + pool_options = client.options.pool_options + return pool_options._compression_settings + + uri = "mongodb://localhost:27017/?compressors=zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, 4) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=foobar,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + # According to the connection string spec, unsupported values + # just raise a warning and are ignored. + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zlib"]) + self.assertEqual(opts.zlib_compression_level, -1) + + if not _have_snappy(): + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=snappy" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy"]) + uri = "mongodb://localhost:27017/?compressors=snappy,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["snappy", "zlib"]) + + if not _have_zstd(): + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, []) + else: + uri = "mongodb://localhost:27017/?compressors=zstd" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd"]) + uri = "mongodb://localhost:27017/?compressors=zstd,zlib" + client = self.simple_client(uri, connect=False) + opts = compression_settings(client) + self.assertEqual(opts.compressors, ["zstd", "zlib"]) + + options = async_client_context.default_client_options + if "compressors" in options and "zlib" in options["compressors"]: + for level in range(-1, 10): + client = await self.async_single_client(zlibcompressionlevel=level) + # No error + await client.pymongo_test.test.find_one() + + @async_client_context.require_sync + async def test_reset_during_update_pool(self): + client = await self.async_rs_or_single_client(minPoolSize=10) + await client.admin.command("ping") + pool = await async_get_pool(client) + generation = pool.gen.get_overall() + + # Continuously reset the pool. + class ResetPoolThread(threading.Thread): + def __init__(self, pool): + super().__init__() + self.running = True + self.pool = pool + + def stop(self): + self.running = False + + async def _run(self): + while self.running: + exc = AutoReconnect("mock pool error") + ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) + await client._topology.handle_error(pool.address, ctx) + await asyncio.sleep(0.001) + + def run(self): + self._run() + + t = ResetPoolThread(pool) + t.start() + + # Ensure that update_pool completes without error even when the pool + # is reset concurrently. + try: + while True: + for _ in range(10): + await client._topology.update_pool() + if generation != pool.gen.get_overall(): + break + finally: + t.stop() + t.join() + await client.admin.command("ping") + + async def test_background_connections_do_not_hold_locks(self): + min_pool_size = 10 + client = await self.async_rs_or_single_client( + serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False + ) + # Create a single connection in the pool. + await client.admin.command("ping") + + # Cause new connections stall for a few seconds. + pool = await async_get_pool(client) + original_connect = pool.connect + + async def stall_connect(*args, **kwargs): + await asyncio.sleep(2) + return await original_connect(*args, **kwargs) + + pool.connect = stall_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Wait for the background thread to start creating connections + await async_wait_until(lambda: len(pool.conns) > 1, "start creating connections") + + # Assert that application operations do not block. + for _ in range(10): + start = time.monotonic() + await client.admin.command("ping") + total = time.monotonic() - start + # Each ping command should not take more than 2 seconds + self.assertLess(total, 2) + + async def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = await self.async_rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + await client.admin.command("ping") + + # Cause new connections to fail. + pool = await async_get_pool(client) + + async def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + await pool.reset_without_pause() + + await async_wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + + @async_client_context.require_replica_set + async def test_direct_connection(self): + # direct_connection=True should result in Single topology. + client = await self.async_rs_or_single_client(directConnection=True) + await client.admin.command("ping") + self.assertEqual(len(client.nodes), 1) + self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) + + # direct_connection=False should result in RS topology. + client = await self.async_rs_or_single_client(directConnection=False) + await client.admin.command("ping") + self.assertGreaterEqual(len(client.nodes), 1) + self.assertIn( + client._topology_settings.get_topology_type(), + [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], + ) + + # directConnection=True, should error with multiple hosts as a list. + with self.assertRaises(ConfigurationError): + AsyncMongoClient(["host1", "host2"], directConnection=True) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2927 fails often on PyPy") + async def test_continuous_network_errors(self): + def server_description_count(): + i = 0 + for obj in gc.get_objects(): + try: + if isinstance(obj, ServerDescription): + i += 1 + except ReferenceError: + pass + return i + + gc.collect() + with client_knobs(min_heartbeat_interval=0.002): + client = self.simple_client( + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 + ) + initial_count = server_description_count() + with self.assertRaises(ServerSelectionTimeoutError): + await client.test.test.find_one() + gc.collect() + final_count = server_description_count() + await client.close() + # If a bug like PYTHON-2433 is reintroduced then too many + # ServerDescriptions will be kept alive and this test will fail: + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) + + @async_client_context.require_failCommand_fail_point + async def test_network_error_message(self): + client = await self.async_single_client(retryReads=False) + await client.admin.command("ping") # connect + async with self.fail_point( + {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} + ): + assert await client.address is not None + expected = "{}:{}: ".format(*(await client.address)) + with self.assertRaisesRegex(AutoReconnect, expected): + await client.pymongo_test.test.find_one({}) + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") + async def test_process_periodic_tasks(self): + client = await self.async_rs_or_single_client() + coll = client.db.collection + await coll.insert_many([{} for _ in range(5)]) + cursor = coll.find(batch_size=2) + await cursor.next() + c_id = cursor.cursor_id + self.assertIsNotNone(c_id) + await client.close() + # Add cursor to kill cursors queue + del cursor + await async_wait_until( + lambda: client._kill_cursors_queue, + "waited for cursor to be added to queue", + ) + await client._process_periodic_tasks() # This must not raise or print any exceptions + with self.assertRaises(InvalidOperation): + await coll.insert_many([{} for _ in range(5)]) + + async def test_service_name_from_kwargs(self): + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc", + srvServiceName="customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc" + "/?srvServiceName=shouldbeoverriden", + srvServiceName="customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + client = AsyncMongoClient( + "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", + connect=False, + ) + await client.aconnect() + self.assertEqual(client._topology_settings.srv_service_name, "customname") + await client.close() + + async def test_srv_max_hosts_kwarg(self): + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + await client.aconnect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + await client.aconnect() + self.assertEqual(len(client.topology_description.server_descriptions()), 1) + client = self.simple_client( + "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 + ) + await client.aconnect() + self.assertEqual(len(client.topology_description.server_descriptions()), 2) + + @unittest.skipIf( + async_client_context.load_balancer, + "loadBalanced clients do not run SDAM", + ) + @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + @async_client_context.require_sync + def test_sigstop_sigcont(self): + test_dir = os.path.dirname(os.path.realpath(__file__)) + script = os.path.join(test_dir, "sigstop_sigcont.py") + p = subprocess.Popen( + [sys.executable, script, async_client_context.uri], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + self.addCleanup(p.wait, timeout=1) + self.addCleanup(p.kill) + time.sleep(1) + # Stop the child, sleep for twice the streaming timeout + # (heartbeatFrequencyMS + connectTimeoutMS), and restart. + os.kill(p.pid, signal.SIGSTOP) + time.sleep(2) + os.kill(p.pid, signal.SIGCONT) + time.sleep(0.5) + # Tell the script to exit gracefully. + outs, _ = p.communicate(input=b"q\n", timeout=10) + self.assertTrue(outs) + log_output = outs.decode("utf-8") + self.assertIn("TEST STARTED", log_output) + self.assertIn("ServerHeartbeatStartedEvent", log_output) + self.assertIn("ServerHeartbeatSucceededEvent", log_output) + self.assertIn("TEST COMPLETED", log_output) + self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + + async def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|async" + else: + metadata["driver"]["name"] = "PyMongo|async" + if expected_env is not None: + metadata["env"] = expected_env + + if "AWS_REGION" not in env_vars: + os.environ["AWS_REGION"] = "" + client = await self.async_rs_or_single_client(serverSelectionTimeoutMS=10000) + await client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + + async def test_handshake_01_aws(self): + await self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + async def test_handshake_02_azure(self): + await self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + async def test_handshake_03_gcp(self): + await self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + await self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + async def test_handshake_04_vercel(self): + await self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + async def test_handshake_05_multiple(self): + await self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + await self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + await self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + async def test_handshake_06_region_too_long(self): + await self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + async def test_handshake_07_memory_invalid_int(self): + await self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, + {"name": "aws.lambda"}, + ) + + async def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + await self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + + async def test_handshake_09_container_with_provider(self): + await self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) + + def test_dict_hints(self): + self.db.t.find(hint={"x": 1}) + + def test_dict_hints_sort(self): + result = self.db.t.find() + result.sort({"x": 1}) + + self.db.t.find(sort={"x": 1}) + + async def test_dict_hints_create_index(self): + await self.db.t.create_index({"x": pymongo.ASCENDING}) + + async def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("java_uuid") + + async def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = async_client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + await coll.insert_many(docs) + self.assertEqual(5, await coll.count_documents({})) + async for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + async for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + await async_client_context.client.pymongo_test.drop_collection("csharp_uuid") + + async def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = await self.async_single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + async def test_uuid_queries(self): + db = async_client_context.client.pymongo_test + coll = db.test + await coll.drop() + + uu = uuid.uuid4() + await coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, await coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, await coll.count_documents({"uuid": uu})) + await coll.insert_one({"uuid": uu}) + self.assertEqual(2, await coll.count_documents({})) + docs = await coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, await coll.count_documents(predicate)) + docs = await coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + await coll.drop() + + +class TestExhaustCursor(AsyncIntegrationTest): + """Test that clients properly handle errors from exhaust cursors.""" + + def setUp(self): + super().setUp() + if async_client_context.is_mongos: + raise SkipTest("mongos doesn't support exhaust, SERVER-2627") + + async def test_exhaust_query_server_error(self): + # When doing an exhaust query, the socket stays checked out on success + # but must be checked in on error to avoid semaphore leaks. + client = await connected(await self.async_rs_or_single_client(maxPoolSize=1)) + + collection = client.pymongo_test.test + pool = await async_get_pool(client) + conn = one(pool.conns) + + # This will cause OperationFailure in all mongo versions since + # the value for $orderby must be a document. + cursor = collection.find( + SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST + ) + + with self.assertRaises(OperationFailure): + await cursor.next() + self.assertFalse(conn.closed) + + # The socket was checked in and the semaphore was decremented. + self.assertIn(conn, pool.conns) + self.assertEqual(0, pool.requests) + + async def test_exhaust_getmore_server_error(self): + # When doing a getmore on an exhaust cursor, the socket stays checked + # out on success but it's checked in on error to avoid semaphore leaks. + client = await self.async_rs_or_single_client(maxPoolSize=1) + collection = client.pymongo_test.test + await collection.drop() + + await collection.insert_many([{} for _ in range(200)]) + self.addAsyncCleanup(async_client_context.client.pymongo_test.test.drop) + + pool = await async_get_pool(client) + pool._check_interval_seconds = None # Never check. + conn = one(pool.conns) + + cursor = collection.find(cursor_type=CursorType.EXHAUST) + + # Initial query succeeds. + await cursor.next() + + # Cause a server error on getmore. + async def receive_message(request_id): + # Discard the actual server response. + await AsyncConnection.receive_message(conn, request_id) + + # responseFlags bit 1 is QueryFailure. + msg = struct.pack("= _NAMESPACE_DOC_BYTES: + num_models += 1 + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + num_models, models = await self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_11_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + num_models, models = await self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addAsyncCleanup(client.db["coll"].drop) + self.addAsyncCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = await client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): + client = await self.async_rs_or_single_client() + + # Document too large. + b_repeated = "b" * self.max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + # Namespace too large. + c_repeated = "c" * self.max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(DocumentTooLarge) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + @async_client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_13_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + await client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_upserted_result(self): + client = await self.async_rs_or_single_client() + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = await client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + + @async_client_context.require_version_min(8, 0, 0, -24) + async def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + await client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = await client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + await collection.count_documents({}), + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteCSOT(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.max_write_batch_size = await async_client_context.max_write_batch_size + self.max_bson_object_size = await async_client_context.max_bson_size + self.max_message_size_bytes = await async_client_context.max_message_size_bytes + + @async_client_context.require_version_min(8, 0, 0, -24) + @async_client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) + async def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") + _OVERHEAD = 500 + + internal_client = await self.async_rs_or_single_client(timeoutMS=None) + + collection = internal_client.db["coll"] + self.addAsyncCleanup(collection.drop) + await collection.drop() + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + async with self.fail_point(fail_command): + models = [] + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + await client.admin.command("ping") + with self.assertRaises(ClientBulkWriteException) as context: + await client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/asynchronous/test_client_context.py b/test/asynchronous/test_client_context.py new file mode 100644 index 0000000000..652b32e798 --- /dev/null +++ b/test/asynchronous/test_client_context.py @@ -0,0 +1,57 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncUnitTest, SkipTest, async_client_context, unittest + +_IS_SYNC = False + + +class TestAsyncClientContext(AsyncUnitTest): + def test_must_connect(self): + if not os.environ.get("PYMONGO_MUST_CONNECT"): + raise SkipTest("PYMONGO_MUST_CONNECT is not set") + + self.assertTrue( + async_client_context.connected, + "client context must be connected when " + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + async_client_context.connection_attempt_info() + ), + ) + + def test_enableTestCommands_is_disabled(self): + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") + + self.assertFalse( + async_client_context.test_commands_enabled, + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", + ) + + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") + + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_client_metadata.py b/test/asynchronous/test_client_metadata.py new file mode 100644 index 0000000000..2f175cceed --- /dev/null +++ b/test/asynchronous/test_client_metadata.py @@ -0,0 +1,232 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import AsyncMongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addAsyncCleanup(self.server.stop) + + async def send_ping_and_get_metadata( + self, client: AsyncMongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + await client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + async def check_metadata_added( + self, + client: AsyncMongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + await asyncio.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = await self.send_ping_and_get_metadata( + client, True + ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + async def test_append_metadata(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_append_metadata_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_append_metadata_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_append_metadata_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + await self.check_metadata_added(client, "framework", None, None) + + async def test_multiple_successive_metadata_updates(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", "2.0", None) + + async def test_multiple_successive_metadata_updates_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, "Framework Platform") + + async def test_multiple_successive_metadata_updates_platform_version_none(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + + async def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = await self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + await client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + async def test_duplicate_driver_name_no_op(self): + client = await self.async_rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + await self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + await asyncio.sleep(0.005) + # add same metadata again + await self.check_metadata_added(client, "Framework", None, None) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_collation.py b/test/asynchronous/test_collation.py new file mode 100644 index 0000000000..da810a2a9f --- /dev/null +++ b/test/asynchronous/test_collation.py @@ -0,0 +1,282 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collation module.""" +from __future__ import annotations + +import functools +import warnings +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import EventListener, OvertCommandListener +from typing import Any + +from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, +) +from pymongo.errors import ConfigurationError +from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollationObject(unittest.TestCase): + def test_constructor(self): + self.assertRaises(TypeError, Collation, locale=42) + # Fill in a locale to test the other options. + _Collation = functools.partial(Collation, "en_US") + # No error. + _Collation(caseFirst=CollationCaseFirst.UPPER) + self.assertRaises(TypeError, _Collation, caseLevel="true") + self.assertRaises(ValueError, _Collation, strength="six") + self.assertRaises(TypeError, _Collation, numericOrdering="true") + self.assertRaises(TypeError, _Collation, alternate=5) + self.assertRaises(TypeError, _Collation, maxVariable=2) + self.assertRaises(TypeError, _Collation, normalization="false") + self.assertRaises(TypeError, _Collation, backwards="true") + + # No errors. + Collation("en_US", future_option="bar", another_option=42) + collation = Collation( + "en_US", + caseLevel=True, + caseFirst=CollationCaseFirst.UPPER, + strength=CollationStrength.QUATERNARY, + numericOrdering=True, + alternate=CollationAlternate.SHIFTED, + maxVariable=CollationMaxVariable.SPACE, + normalization=True, + backwards=True, + ) + + self.assertEqual( + { + "locale": "en_US", + "caseLevel": True, + "caseFirst": "upper", + "strength": 4, + "numericOrdering": True, + "alternate": "shifted", + "maxVariable": "space", + "normalization": True, + "backwards": True, + }, + collation.document, + ) + + self.assertEqual( + {"locale": "en_US", "backwards": True}, Collation("en_US", backwards=True).document + ) + + +class TestCollation(AsyncIntegrationTest): + listener: EventListener + warn_context: Any + collation: Collation + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + async def asyncTearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None + self.listener.reset() + await super().asyncTearDown() + + def last_command_started(self): + return self.listener.started_events[-1].command + + def assertCollationInLastCommand(self): + self.assertEqual(self.collation.document, self.last_command_started()["collation"]) + + async def test_create_collection(self): + await self.db.test.drop() + await self.db.create_collection("test", collation=self.collation) + self.assertCollationInLastCommand() + + # Test passing collation as a dict as well. + await self.db.test.drop() + self.listener.reset() + await self.db.create_collection("test", collation=self.collation.document) + self.assertCollationInLastCommand() + + def test_index_model(self): + model = IndexModel([("a", 1), ("b", -1)], collation=self.collation) + self.assertEqual(self.collation.document, model.document["collation"]) + + async def test_create_index(self): + await self.db.test.create_index("foo", collation=self.collation) + ci_cmd = self.listener.started_events[0].command + self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) + + async def test_aggregate(self): + await self.db.test.aggregate([{"$group": {"_id": 42}}], collation=self.collation) + self.assertCollationInLastCommand() + + async def test_count_documents(self): + await self.db.test.count_documents({}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_distinct(self): + await self.db.test.distinct("foo", collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find(collation=self.collation).distinct("foo") + self.assertCollationInLastCommand() + + async def test_find_command(self): + await self.db.test.insert_one({"is this thing on?": True}) + self.listener.reset() + await anext(self.db.test.find(collation=self.collation)) + self.assertCollationInLastCommand() + + async def test_explain_command(self): + self.listener.reset() + await self.db.test.find(collation=self.collation).explain() + # The collation should be part of the explained command. + self.assertEqual( + self.collation.document, self.last_command_started()["explain"]["collation"] + ) + + async def test_delete(self): + await self.db.test.delete_one({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + self.listener.reset() + await self.db.test.delete_many({"foo": 42}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) + + async def test_update(self): + await self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + self.listener.reset() + await self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) + command = self.listener.started_events[0].command + self.assertEqual(self.collation.document, command["updates"][0]["collation"]) + + async def test_find_and(self): + await self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_update( + {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation + ) + self.assertCollationInLastCommand() + + self.listener.reset() + await self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) + self.assertCollationInLastCommand() + + async def test_bulk_write(self): + await self.db.test.collection.bulk_write( + [ + DeleteOne({"noCollation": 42}), + DeleteMany({"noCollation": 42}), + DeleteOne({"foo": 42}, collation=self.collation), + DeleteMany({"foo": 42}, collation=self.collation), + ReplaceOne({"noCollation": 24}, {"bar": 42}), + UpdateOne({"noCollation": 84}, {"$set": {"bar": 10}}, upsert=True), + UpdateMany({"noCollation": 45}, {"$set": {"bar": 42}}), + ReplaceOne({"foo": 24}, {"foo": 42}, collation=self.collation), + UpdateOne( + {"foo": 84}, {"$set": {"foo": 10}}, upsert=True, collation=self.collation + ), + UpdateMany({"foo": 45}, {"$set": {"foo": 42}}, collation=self.collation), + ] + ) + + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command + + def check_ops(ops): + for op in ops: + if "noCollation" in op["q"]: + self.assertNotIn("collation", op) + else: + self.assertEqual(self.collation.document, op["collation"]) + + check_ops(delete_cmd["deletes"]) + check_ops(update_cmd["updates"]) + + async def test_indexes_same_keys_different_collations(self): + await self.db.test.drop() + usa_collation = Collation("en_US") + ja_collation = Collation("ja") + await self.db.test.create_indexes( + [ + IndexModel("fieldname", collation=usa_collation), + IndexModel("fieldname", name="japanese_version", collation=ja_collation), + IndexModel("fieldname", name="simple"), + ] + ) + indexes = await self.db.test.index_information() + self.assertEqual( + usa_collation.document["locale"], indexes["fieldname_1"]["collation"]["locale"] + ) + self.assertEqual( + ja_collation.document["locale"], indexes["japanese_version"]["collation"]["locale"] + ) + self.assertNotIn("collation", indexes["simple"]) + await self.db.test.drop_index("fieldname_1") + indexes = await self.db.test.index_information() + self.assertIn("japanese_version", indexes) + self.assertIn("simple", indexes) + self.assertNotIn("fieldname", indexes) + + async def test_unacknowledged_write(self): + unacknowledged = WriteConcern(w=0) + collection = self.db.get_collection("test", write_concern=unacknowledged) + with self.assertRaises(ConfigurationError): + await collection.update_one( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + update_one = UpdateOne( + {"hello": "world"}, {"$set": {"hello": "moon"}}, collation=self.collation + ) + with self.assertRaises(ConfigurationError): + await collection.bulk_write([update_one]) + + async def test_cursor_collation(self): + await self.db.test.insert_one({"hello": "world"}) + await anext(self.db.test.find().collation(self.collation)) + self.assertCollationInLastCommand() diff --git a/test/asynchronous/test_collection.py b/test/asynchronous/test_collection.py new file mode 100644 index 0000000000..498563fe83 --- /dev/null +++ b/test/asynchronous/test_collection.py @@ -0,0 +1,2263 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection module.""" +from __future__ import annotations + +import asyncio +import contextlib +import re +import sys +from codecs import utf_8_decode +from collections import defaultdict +from test.asynchronous.utils import async_get_pool, async_is_mongos +from typing import Any, Iterable, no_type_check + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import ( # TODO: fix sync imports in PYTHON-4528 + AsyncIntegrationTest, + AsyncUnitTest, + async_client_context, +) +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + EventListener, + OvertCommandListener, + async_wait_until, +) +from test.version import Version + +from bson import encode +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from bson.raw_bson import RawBSONDocument +from bson.regex import Regex +from bson.son import SON +from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT +from pymongo.asynchronous.collection import AsyncCollection, ReturnDocument +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.bulk_shared import BulkWriteError +from pymongo.cursor_shared import CursorType +from pymongo.errors import ( + ConfigurationError, + DocumentTooLarge, + DuplicateKeyError, + ExecutionTimeout, + InvalidDocument, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command +from pymongo.operations import * +from pymongo.read_concern import DEFAULT_READ_CONCERN +from pymongo.read_preferences import ReadPreference +from pymongo.results import ( + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCollectionNoConnect(AsyncUnitTest): + """Test Collection features on a client that does not connect.""" + + db: AsyncDatabase + client: AsyncMongoClient + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test + + def test_collection(self): + self.assertRaises(TypeError, AsyncCollection, self.db, 5) + + def make_col(base, name): + return base[name] + + self.assertRaises(InvalidName, make_col, self.db, "") + self.assertRaises(InvalidName, make_col, self.db, "te$t") + self.assertRaises(InvalidName, make_col, self.db, ".test") + self.assertRaises(InvalidName, make_col, self.db, "test.") + self.assertRaises(InvalidName, make_col, self.db, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "") + self.assertRaises(InvalidName, make_col, self.db.test, "te$t") + self.assertRaises(InvalidName, make_col, self.db.test, ".test") + self.assertRaises(InvalidName, make_col, self.db.test, "test.") + self.assertRaises(InvalidName, make_col, self.db.test, "tes..t") + self.assertRaises(InvalidName, make_col, self.db.test, "tes\x00t") + + def test_getattr(self): + coll = self.db.test + self.assertIsInstance(coll["_does_not_exist"], AsyncCollection) + + with self.assertRaises(AttributeError) as context: + coll._does_not_exist + + # Message should be: + # "AttributeError: Collection has no attribute '_does_not_exist'. To + # access the test._does_not_exist collection, use + # database['test._does_not_exist']." + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + coll2 = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertEqual(coll2.write_concern, WriteConcern(w=0)) + self.assertNotEqual(coll.write_concern, coll2.write_concern) + coll3 = coll2.subcoll + self.assertEqual(coll2.write_concern, coll3.write_concern) + coll4 = coll2["subcoll"] + self.assertEqual(coll2.write_concern, coll4.write_concern) + + def test_iteration(self): + coll = self.db.coll + msg = "'AsyncCollection' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] + break + # Non-string indices will start failing in PyMongo 5. + self.assertEqual(coll[0].name, "coll.0") + self.assertEqual(coll[{}].name, "coll.{}") + # next fails + with self.assertRaisesRegex(TypeError, msg): + _ = next(coll) + # .next() fails + with self.assertRaisesRegex(TypeError, msg): + _ = coll.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(coll, Iterable) + + +class AsyncTestCollection(AsyncIntegrationTest): + w: int + + async def asyncSetUp(self): + await super().asyncSetUp() + self.w = async_client_context.w # type: ignore + + async def asyncTearDown(self): + await self.db.test.drop() + await self.db.drop_collection("test_large_limit") + await super().asyncTearDown() + + @contextlib.contextmanager + def write_concern_collection(self): + if async_client_context.is_rs: + with self.assertRaises(WriteConcernError): + # Unsatisfiable write concern. + yield AsyncCollection( + self.db, + "test", + write_concern=WriteConcern(w=len(async_client_context.nodes) + 1), + ) + else: + yield self.db.test + + async def test_equality(self): + self.assertIsInstance(self.db.test, AsyncCollection) + self.assertEqual(self.db.test, self.db["test"]) + self.assertEqual(self.db.test, AsyncCollection(self.db, "test")) + self.assertEqual(self.db.test.mike, self.db["test.mike"]) + self.assertEqual(self.db.test["mike"], self.db["test.mike"]) + + async def test_hashable(self): + self.assertIn(self.db.test.mike, {self.db["test.mike"]}) + + async def test_create(self): + # No Exception. + db = async_client_context.client.pymongo_test + await db.create_test_no_wc.drop() + + async def lambda_test(): + return "create_test_no_wc" not in await db.list_collection_names() + + async def lambda_test_2(): + return "create_test_no_wc" in await db.list_collection_names() + + await async_wait_until( + lambda_test, + "drop create_test_no_wc collection", + ) + await db.create_collection("create_test_no_wc") + await async_wait_until( + lambda_test_2, + "create create_test_no_wc collection", + ) + # SERVER-33317 + if not async_client_context.is_mongos or not async_client_context.version.at_least(3, 7, 0): + with self.assertRaises(OperationFailure): + await db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) + + async def test_drop_nonexistent_collection(self): + await self.db.drop_collection("test") + self.assertNotIn("test", await self.db.list_collection_names()) + + # No exception + await self.db.drop_collection("test") + + async def test_create_indexes(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.test.create_indexes(["foo"]) # type: ignore[list-item] + self.assertRaises(TypeError, IndexModel, 5) + self.assertRaises(ValueError, IndexModel, []) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_indexes([IndexModel("hello")]) + await db.test.create_indexes([IndexModel([("hello", DESCENDING), ("world", ASCENDING)])]) + + # Tuple instead of list. + await db.test.create_indexes([IndexModel((("world", ASCENDING),))]) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world")] + ) + self.assertEqual(names, ["hello_world"]) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_indexes([IndexModel("hello")]) + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + names = await db.test.create_indexes( + [IndexModel([("hello", DESCENDING), ("world", ASCENDING)]), IndexModel("hello")] + ) + info = await db.test.index_information() + for name in names: + self.assertIn(name, info) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_indexes([IndexModel("a", unique=True)]) + + with self.write_concern_collection() as coll: + await coll.create_indexes([IndexModel("hello")]) + + @async_client_context.require_version_max(4, 3, -1) + async def test_create_indexes_commitQuorum_requires_44(self): + db = self.db + with self.assertRaisesRegex( + ConfigurationError, + r"Must be connected to MongoDB 4\.4\+ to use the commitQuorum option for createIndexes", + ): + await db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + @async_client_context.require_no_standalone + @async_client_context.require_version_min(4, 4, -1) + async def test_create_indexes_commitQuorum(self): + await self.db.coll.create_indexes([IndexModel("a")], commitQuorum="majority") + + async def test_create_index(self): + db = self.db + + with self.assertRaises(TypeError): + await db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + await db.test.create_index([]) + + await db.test.drop_indexes() + await db.test.insert_one({}) + self.assertEqual(len(await db.test.index_information()), 1) + + await db.test.create_index("hello") + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + + # Tuple instead of list. + await db.test.create_index((("world", ASCENDING),)) + + self.assertEqual(len(await db.test.index_information()), 4) + + await db.test.drop_indexes() + ix = await db.test.create_index( + [("hello", DESCENDING), ("world", ASCENDING)], name="hello_world" + ) + self.assertEqual(ix, "hello_world") + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index("hello") + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + self.assertEqual(len(await db.test.index_information()), 1) + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) + self.assertIn("hello_-1_world_1", await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertIn("hello_-1_world_1", await db.test.index_information()) + + await db.test.drop() + await db.test.insert_one({"a": 1}) + await db.test.insert_one({"a": 1}) + with self.assertRaises(DuplicateKeyError): + await db.test.create_index("a", unique=True) + + with self.write_concern_collection() as coll: + await coll.create_index([("hello", DESCENDING)]) + + await db.test.create_index(["hello", "world"]) + await db.test.create_index(["hello", ("world", DESCENDING)]) + await db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + + async def test_drop_index(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index(name) + + # Drop it again. + if async_client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + await db.test.drop_index(name) + else: + await db.test.drop_index(name) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertIn("hello_1", await db.test.index_information()) + + await db.test.drop_indexes() + await db.test.create_index("hello") + name = await db.test.create_index("goodbye") + + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual(name, "goodbye_1") + await db.test.drop_index([("goodbye", ASCENDING)]) + self.assertEqual(len(await db.test.index_information()), 2) + self.assertIn("hello_1", await db.test.index_information()) + + with self.write_concern_collection() as coll: + await coll.drop_index("hello_1") + + @async_client_context.require_no_mongos + @async_client_context.require_test_commands + async def test_index_management_max_time_ms(self): + coll = self.db.test + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + with self.assertRaises(ExecutionTimeout): + await coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + await coll.drop_indexes(maxTimeMS=1) + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + async def test_list_indexes(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + + def map_indexes(indexes): + return {index["name"]: index for index in indexes} + + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 1) + self.assertIn("_id_", map_indexes(indexes)) + + await db.test.create_index("hello") + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 2) + self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + indexes = await (await db.test.list_indexes()).to_list() + self.assertEqual(len(indexes), 3) + index_map = map_indexes(indexes) + self.assertEqual( + index_map["hello_-1_world_1"]["key"], SON([("hello", DESCENDING), ("world", ASCENDING)]) + ) + self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) + + # List indexes on a collection that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + # List indexes on a database that does not exist. + indexes = await (await db.does_not_exist.list_indexes()).to_list() + self.assertEqual(len(indexes), 0) + + async def test_index_info(self): + db = self.db + await db.test.drop() + await db.test.insert_one({}) # create collection + self.assertEqual(len(await db.test.index_information()), 1) + self.assertIn("_id_", await db.test.index_information()) + + await db.test.create_index("hello") + self.assertEqual(len(await db.test.index_information()), 2) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + + await db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) + self.assertEqual( + (await db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)] + ) + self.assertEqual(len(await db.test.index_information()), 3) + self.assertEqual( + [("hello", DESCENDING), ("world", ASCENDING)], + (await db.test.index_information())["hello_-1_world_1"]["key"], + ) + self.assertEqual(True, (await db.test.index_information())["hello_-1_world_1"]["unique"]) + + async def test_index_geo2d(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("loc_2d", await db.test.create_index([("loc", GEO2D)])) + index_info = (await db.test.index_information())["loc_2d"] + self.assertEqual([("loc", "2d")], index_info["key"]) + + # geoSearch was deprecated in 4.4 and removed in 5.0 + @async_client_context.require_version_max(4, 5) + @async_client_context.require_no_mongos + async def test_index_haystack(self): + db = self.db + await db.test.drop() + _id = ( + await db.test.insert_one({"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}) + ).inserted_id + await db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) + await db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) + await db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) + + results = ( + await db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) + ) + )["results"] + + self.assertEqual(2, len(results)) + self.assertEqual( + {"_id": _id, "pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}, results[0] + ) + + @async_client_context.require_no_mongos + async def test_index_text(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("t_text", await db.test.create_index([("t", TEXT)])) + index_info = (await db.test.index_information())["t_text"] + self.assertIn("weights", index_info) + + await db.test.insert_many( + [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] + ) + + # MongoDB 2.6 text search. Create 'score' field in projection. + cursor = db.test.find({"$text": {"$search": "spam"}}, {"score": {"$meta": "textScore"}}) + + # Sort by 'score' field. + cursor.sort([("score", {"$meta": "textScore"})]) + results = await cursor.to_list() + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) + + await db.test.drop_indexes() + + async def test_index_2dsphere(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("geo_2dsphere", await db.test.create_index([("geo", GEOSPHERE)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "geo" and idx_type == "2dsphere": + break + else: + self.fail("2dsphere index not found.") + + poly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]} + query = {"geo": {"$within": {"$geometry": poly}}} + + # This query will error without a 2dsphere index. + db.test.find(query) + await db.test.drop_indexes() + + async def test_index_hashed(self): + db = self.db + await db.test.drop_indexes() + self.assertEqual("a_hashed", await db.test.create_index([("a", HASHED)])) + + for dummy, info in (await db.test.index_information()).items(): + field, idx_type = info["key"][0] + if field == "a" and idx_type == "hashed": + break + else: + self.fail("hashed index not found.") + + await db.test.drop_indexes() + + async def test_index_sparse(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("key", ASCENDING)], sparse=True) + self.assertTrue((await db.test.index_information())["key_1"]["sparse"]) + + async def test_index_background(self): + db = self.db + await db.test.drop_indexes() + await db.test.create_index([("keya", ASCENDING)]) + await db.test.create_index([("keyb", ASCENDING)], background=False) + await db.test.create_index([("keyc", ASCENDING)], background=True) + self.assertNotIn("background", (await db.test.index_information())["keya_1"]) + self.assertFalse((await db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((await db.test.index_information())["keyc_1"]["background"]) + + async def _drop_dups_setup(self, db): + await db.drop_collection("test") + await db.test.insert_one({"i": 1}) + await db.test.insert_one({"i": 2}) + await db.test.insert_one({"i": 2}) # duplicate + await db.test.insert_one({"i": 3}) + + async def test_index_dont_drop_dups(self): + # Try *not* dropping duplicates + db = self.db + await self._drop_dups_setup(db) + + # There's a duplicate + async def _test_create(): + await db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) + + with self.assertRaises(DuplicateKeyError): + await _test_create() + + # Duplicate wasn't dropped + self.assertEqual(4, await db.test.count_documents({})) + + # Index wasn't created, only the default index on _id + self.assertEqual(1, len(await db.test.index_information())) + + # Get the plan dynamically because the explain format will change. + def get_plan_stage(self, root, stage): + if root.get("stage") == stage: + return root + elif "inputStage" in root: + return self.get_plan_stage(root["inputStage"], stage) + elif "inputStages" in root: + for i in root["inputStages"]: + stage = self.get_plan_stage(i, stage) + if stage: + return stage + elif "queryPlan" in root: + # queryPlan (and slotBasedPlan) are new in 5.0. + return self.get_plan_stage(root["queryPlan"], stage) + elif "shards" in root: + for i in root["shards"]: + stage = self.get_plan_stage(i["winningPlan"], stage) + if stage: + return stage + return {} + + async def test_index_filter(self): + db = self.db + await db.drop_collection("test") + + # Test bad filter spec on create. + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + await db.test.create_index("x", partialFilterExpression={"$and": 5}) + + self.assertEqual( + "x_1", + await db.test.create_index( + [("x", ASCENDING)], partialFilterExpression={"a": {"$lte": 1.5}} + ), + ) + await db.test.insert_one({"x": 5, "a": 2}) + await db.test.insert_one({"x": 6, "a": 1}) + + # Operations that use the partial index. + explain = await db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await db.test.find({"x": {"$gt": 1}, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + explain = await db.test.find({"x": 6, "a": {"$lte": 1}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "IXSCAN") + self.assertEqual("x_1", stage.get("indexName")) + self.assertTrue(stage.get("isPartial")) + + # Operations that do not use the partial index. + explain = await db.test.find({"x": 6, "a": {"$lte": 1.6}}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + explain = await db.test.find({"x": 6}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + # Test drop_indexes. + await db.test.drop_index("x_1") + explain = await db.test.find({"x": 6, "a": 1}).explain() + stage = self.get_plan_stage(explain["queryPlanner"]["winningPlan"], "COLLSCAN") + self.assertNotEqual({}, stage) + + async def test_field_selection(self): + db = self.db + await db.drop_collection("test") + + doc = {"a": 1, "b": 5, "c": {"d": 5, "e": 10}} + await db.test.insert_one(doc) + + # Test field inclusion + doc = await anext(db.test.find({}, ["_id"])) + self.assertEqual(list(doc), ["_id"]) + doc = await anext(db.test.find({}, ["a"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "a"]) + doc = await anext(db.test.find({}, ["b"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b"]) + doc = await anext(db.test.find({}, ["c"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + doc = await anext(db.test.find({}, ["a"])) + self.assertEqual(doc["a"], 1) + doc = await anext(db.test.find({}, ["b"])) + self.assertEqual(doc["b"], 5) + doc = await anext(db.test.find({}, ["c"])) + self.assertEqual(doc["c"], {"d": 5, "e": 10}) + + # Test inclusion of fields with dots + doc = await anext(db.test.find({}, ["c.d"])) + self.assertEqual(doc["c"], {"d": 5}) + doc = await anext(db.test.find({}, ["c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + doc = await anext(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["c"], {"e": 10}) + + doc = await anext(db.test.find({}, ["b", "c.e"])) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "b", "c"]) + doc = await anext(db.test.find({}, ["b", "c.e"])) + self.assertEqual(doc["b"], 5) + + # Test field exclusion + doc = await anext(db.test.find({}, {"a": False, "b": 0})) + l = list(doc) + l.sort() + self.assertEqual(l, ["_id", "c"]) + + doc = await anext(db.test.find({}, {"_id": False})) + l = list(doc) + self.assertNotIn("_id", l) + + async def test_options(self): + db = self.db + await db.drop_collection("test") + await db.create_collection("test", capped=True, size=4096) + result = await db.test.options() + self.assertEqual(result, {"capped": True, "size": 4096}) + await db.drop_collection("test") + + async def test_insert_one(self): + db = self.db + await db.test.drop() + + document: dict[str, Any] = {"_id": 1000} + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(1, await db.test.count_documents({})) + + document = {"foo": "bar"} + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertTrue(result.acknowledged) + self.assertIsNotNone(await db.test.find_one({"_id": document["_id"]})) + self.assertEqual(2, await db.test.count_documents({})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) + self.assertEqual(document["_id"], result.inserted_id) + self.assertFalse(result.acknowledged) + # The insert failed duplicate key... + + async def async_lambda(): + return await db.test.count_documents({}) == 2 + + await async_wait_until(async_lambda, "forcing duplicate key error") + + document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) + result = await db.test.insert_one(document) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(result.inserted_id, None) + + async def test_insert_many(self): + db = self.db + await db.test.drop() + + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, ObjectId) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [{"_id": i} for i in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual(5, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"_id": _id})) + self.assertTrue(result.acknowledged) + + docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) + self.assertEqual([], result.inserted_ids) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + docs: list = [{} for _ in range(5)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertFalse(result.acknowledged) + self.assertEqual(20, await db.test.count_documents({})) + + async def test_insert_many_generator(self): + coll = self.db.test + await coll.delete_many({}) + + def gen(): + yield {"a": 1, "b": 1} + yield {"a": 1, "b": 2} + yield {"a": 2, "b": 3} + yield {"a": 3, "b": 5} + yield {"a": 5, "b": 8} + + result = await coll.insert_many(gen()) + self.assertEqual(5, len(result.inserted_ids)) + + async def test_insert_many_invalid(self): + db = self.db + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many({}) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many([]) + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(1) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): + await db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) + + async def test_delete_one(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"z": 1}) + + result = await self.db.test.delete_one({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(2, await self.db.test.count_documents({})) + + result = await self.db.test.delete_one({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(1, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await self.db.test.count_documents({})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_one({"z": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 1 documents") + + async def test_delete_many(self): + await self.db.test.drop() + + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"x": 1}) + await self.db.test.insert_one({"y": 1}) + await self.db.test.insert_one({"y": 1}) + + result = await self.db.test.delete_many({"x": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertEqual(2, result.deleted_count) + self.assertTrue(result.acknowledged) + self.assertEqual(0, await self.db.test.count_documents({"x": 1})) + + db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + result = await db.test.delete_many({"y": 1}) + self.assertIsInstance(result, DeleteResult) + self.assertRaises(InvalidOperation, lambda: result.deleted_count) + self.assertFalse(result.acknowledged) + + async def lambda_async(): + return await db.test.count_documents({}) == 0 + + await async_wait_until(lambda_async, "delete 2 documents") + + async def test_command_document_too_large(self): + large = "*" * (await async_client_context.max_bson_size + _COMMAND_OVERHEAD) + coll = self.db.test + with self.assertRaises(DocumentTooLarge): + await coll.insert_one({"data": large}) + # update_one and update_many are the same + with self.assertRaises(DocumentTooLarge): + await coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + await coll.delete_one({"data": large}) + + async def test_write_large_document(self): + max_size = await async_client_context.max_bson_size + half_size = int(max_size / 2) + max_str = "x" * max_size + half_str = "x" * half_size + self.assertEqual(max_size, 16777216) + + with self.assertRaises(OperationFailure): + await self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + await self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) + await self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) + + await self.db.test.insert_one({"bar": "x"}) + # Use w=0 here to test legacy doc size checking in all server versions + unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(DocumentTooLarge): + await unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) + await self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) + + async def test_insert_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test insert_one + with self.assertRaises(OperationFailure): + await db.test.insert_one({"_id": 1, "x": 100}) + result = await db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(1, result.inserted_id) + result = await db.test.insert_one({"_id": 2, "a": 0}) + self.assertIsInstance(result, InsertOneResult) + self.assertEqual(2, result.inserted_id) + + await db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1}) + + await async_wait_until(async_lambda, "find w:0 inserted document") + + # Test insert_many + docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] + with self.assertRaises(OperationFailure): + await db.test.insert_many(docs) + result = await db.test.insert_many(docs, bypass_document_validation=True) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"x": doc["x"]})) + self.assertTrue(result.acknowledged) + docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] + result = await db.test.insert_many(docs) + self.assertIsInstance(result, InsertManyResult) + self.assertTrue(97, len(result.inserted_ids)) + for doc in docs: + _id = doc["_id"] + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) + self.assertEqual(1, await db.test.count_documents({"a": doc["a"]})) + self.assertTrue(result.acknowledged) + + with self.assertRaises(OperationFailure): + await db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) + + async def test_replace_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$exists": True}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test replace_one + await db.test.insert_one({"a": 101}) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"a": 101}, {"y": 1}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"a": 101})) + await db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"a": 102}) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"a": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 102})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.replace_one({"y": 1}, {"x": 101}) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + await db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual(1, await db.test.count_documents({"x": 101})) + await db.test.replace_one({"x": 101}, {"a": 103}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"x": 101})) + self.assertEqual(1, await db.test.count_documents({"a": 103})) + + await db.test.insert_one({"y": 1}, bypass_document_validation=True) + await db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) + + async def predicate(): + return await db_w0.test.find_one({"x": 1}) + + await async_wait_until(predicate, "find w:0 replaced document") + + async def test_update_bypass_document_validation(self): + db = self.db + await db.test.drop() + await db.test.insert_one({"z": 5}) + await db.command(SON([("collMod", "test"), ("validator", {"z": {"$gte": 0}})])) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + # Test update_one + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + self.assertEqual(1, await db.test.count_documents({"z": 5})) + await db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) + self.assertEqual(0, await db.test.count_documents({"z": 5})) + self.assertEqual(1, await db.test.count_documents({"z": -5})) + await db.test.update_one({"z": -5}, {"$inc": {"z": 6}}, bypass_document_validation=False) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"z": -5})) + + await db.test.insert_one({"z": -10}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) + self.assertEqual(1, await db.test.count_documents({"z": -9})) + self.assertEqual(0, await db.test.count_documents({"z": -10})) + await db.test.update_one({"z": -9}, {"$inc": {"z": 9}}, bypass_document_validation=False) + self.assertEqual(0, await db.test.count_documents({"z": -9})) + self.assertEqual(1, await db.test.count_documents({"z": 0})) + + await db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.find_one({"y": 1, "x": 1}) + + await async_wait_until(async_lambda, "find w:0 updated document") + + # Test update_many + await db.test.insert_many([{"z": i} for i in range(3, 101)]) + await db.test.insert_one({"y": 0}, bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": -100}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + self.assertEqual(0, await db.test.count_documents({"y": 0, "z": -100})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$gt": -50}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) + with self.assertRaises(OperationFailure): + await db.test.update_many({}, {"$inc": {"z": 1}}) + self.assertEqual(100, await db.test.count_documents({"z": {"$lte": 0}})) + self.assertEqual(50, await db.test.count_documents({"z": {"$gt": 1}})) + await db.test.update_many( + {"z": {"$gte": 0}}, {"$inc": {"z": -100}}, bypass_document_validation=True + ) + self.assertEqual(0, await db.test.count_documents({"z": {"$gt": 0}})) + self.assertEqual(150, await db.test.count_documents({"z": {"$lte": 0}})) + await db.test.update_many( + {"z": {"$lte": 0}}, {"$inc": {"z": 100}}, bypass_document_validation=False + ) + self.assertEqual(150, await db.test.count_documents({"z": {"$gte": 0}})) + self.assertEqual(0, await db.test.count_documents({"z": {"$lt": 0}})) + + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) + await db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) + + async def async_lambda(): + return await db_w0.test.count_documents({"m": 1, "x": 1}) == 2 + + await async_wait_until(async_lambda, "find w:0 updated documents") + + async def test_bypass_document_validation_bulk_write(self): + db = self.db + await db.test.drop() + await db.create_collection("test", validator={"a": {"$gte": 0}}) + db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) + + ops: list = [ + InsertOne({"a": -10}), + InsertOne({"a": -11}), + InsertOne({"a": -12}), + UpdateOne({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + UpdateMany({"a": {"$lte": -10}}, {"$inc": {"a": 1}}), + ReplaceOne({"a": {"$lte": -10}}, {"a": -1}), + ] + await db.test.bulk_write(ops, bypass_document_validation=True) + + self.assertEqual(3, await db.test.count_documents({})) + self.assertEqual(1, await db.test.count_documents({"a": -11})) + self.assertEqual(1, await db.test.count_documents({"a": -1})) + self.assertEqual(1, await db.test.count_documents({"a": -9})) + + # Assert that the operations would fail without bypass_doc_val + for op in ops: + with self.assertRaises(BulkWriteError): + await db.test.bulk_write([op]) + + with self.assertRaises(OperationFailure): + await db_w0.test.bulk_write(ops, bypass_document_validation=True) + + async def test_find_by_default_dct(self): + db = self.db + await db.test.insert_one({"foo": "bar"}) + dct = defaultdict(dict, [("foo", "bar")]) # type: ignore[arg-type] + self.assertIsNotNone(await db.test.find_one(dct)) + self.assertEqual(dct, defaultdict(dict, [("foo", "bar")])) + + async def test_find_w_fields(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one( + {"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"} + ) + self.assertEqual(1, await db.test.count_documents({})) + doc = await anext(db.test.find({})) + self.assertIn("x", doc) + doc = await anext(db.test.find({})) + self.assertIn("mike", doc) + doc = await anext(db.test.find({})) + self.assertIn("extra thing", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertIn("x", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertIn("mike", doc) + doc = await anext(db.test.find({}, ["x", "mike"])) + self.assertNotIn("extra thing", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertNotIn("x", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertIn("mike", doc) + doc = await anext(db.test.find({}, ["mike"])) + self.assertNotIn("extra thing", doc) + + @no_type_check + async def test_fields_specifier_as_dict(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) + + self.assertEqual([1, 2, 3], (await db.test.find_one())["x"]) + self.assertEqual([2, 3], (await db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertNotIn("x", await db.test.find_one(projection={"x": 0})) + self.assertIn("mike", await db.test.find_one(projection={"x": 0})) + + async def test_find_w_regex(self): + db = self.db + await db.test.delete_many({}) + + await db.test.insert_one({"x": "hello_world"}) + await db.test.insert_one({"x": "hello_mike"}) + await db.test.insert_one({"x": "hello_mikey"}) + await db.test.insert_one({"x": "hello_test"}) + + self.assertEqual(len(await db.test.find().to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello.*")}).to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("ello")}).to_list()), 4) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello$")}).to_list()), 0) + self.assertEqual(len(await db.test.find({"x": re.compile("^hello_mi.*$")}).to_list()), 2) + + async def test_id_can_be_anything(self): + db = self.db + + await db.test.delete_many({}) + auto_id = {"hello": "world"} + await db.test.insert_one(auto_id) + self.assertIsInstance(auto_id["_id"], ObjectId) + + numeric = {"_id": 240, "hello": "world"} + await db.test.insert_one(numeric) + self.assertEqual(numeric["_id"], 240) + + obj = {"_id": numeric, "hello": "world"} + await db.test.insert_one(obj) + self.assertEqual(obj["_id"], numeric) + + async for x in db.test.find(): + self.assertEqual(x["hello"], "world") + self.assertIn("_id", x) + + async def test_unique_index(self): + db = self.db + await db.drop_collection("test") + await db.test.create_index("hello") + + # No error. + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + await db.drop_collection("test") + await db.test.create_index("hello", unique=True) + + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": "world"}) + await db.test.insert_one({"hello": "world"}) + + async def test_duplicate_key_error(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("x", unique=True) + + await db.test.insert_one({"_id": 1, "x": 1}) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + + with self.assertRaises(DuplicateKeyError) as context: + await db.test.insert_one({"x": 1}) + + self.assertIsNotNone(context.exception.details) + self.assertEqual(1, await db.test.count_documents({})) + + async def test_write_error_text_handling(self): + db = self.db + await db.drop_collection("test") + + await db.test.create_index("text", unique=True) + + # Test workaround for SERVER-24007 + data = ( + b"a\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + b"\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83\xe2\x98\x83" + ) + + text = utf_8_decode(data, None, True) + await db.test.insert_one({"text": text}) + + # Should raise DuplicateKeyError, not InvalidBSON + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"text": text}) + + with self.assertRaises(DuplicateKeyError): + await db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) + + # Should raise BulkWriteError, not InvalidBSON + with self.assertRaises(BulkWriteError): + await db.test.insert_many([{"text": text}]) + + async def test_write_error_unicode(self): + coll = self.db.test + self.addAsyncCleanup(coll.drop) + + await coll.create_index("a", unique=True) + await coll.insert_one({"a": "unicode \U0001f40d"}) + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error") as ctx: + await coll.insert_one({"a": "unicode \U0001f40d"}) + + # Once more for good measure. + self.assertIn("E11000 duplicate key error", str(ctx.exception)) + + async def test_wtimeout(self): + # Ensure setting wtimeout doesn't disable write concern altogether. + # See SERVER-12596. + collection = self.db.test + await collection.drop() + await collection.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) + with self.assertRaises(DuplicateKeyError): + await coll.insert_one({"_id": 1}) + + async def test_error_code(self): + try: + await self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) + except OperationFailure as exc: + self.assertIn(exc.code, (9, 10147, 16840, 17009)) + # Just check that we set the error document. Fields + # vary by MongoDB version. + self.assertIsNotNone(exc.details) + else: + self.fail("OperationFailure was not raised") + + async def test_index_on_subfield(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + await db.drop_collection("test") + await db.test.create_index("hello.a", unique=True) + + await db.test.insert_one({"hello": {"a": 4, "b": 5}}) + await db.test.insert_one({"hello": {"a": 7, "b": 2}}) + with self.assertRaises(DuplicateKeyError): + await db.test.insert_one({"hello": {"a": 4, "b": 10}}) + + async def test_replace_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.replace_one({}, {"$set": {"x": 1}}) + + id1 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.replace_one({"x": 1}, {"y": 1}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 1})) + self.assertEqual(0, await db.test.count_documents({"x": 1})) + self.assertEqual((await db.test.find_one(id1))["y"], 1) # type: ignore + + replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) + result = await db.test.replace_one({"y": 1}, replacement, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"z": 1})) + self.assertEqual(0, await db.test.count_documents({"y": 1})) + self.assertEqual((await db.test.find_one(id1))["z"], 1) # type: ignore + + result = await db.test.replace_one({"x": 2}, {"y": 2}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 2})) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.replace_one({"x": 0}, {"y": 0}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_one(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_one({}, {"x": 1}) + + id1 = (await db.test.insert_one({"x": 5})).inserted_id + result = await db.test.update_one({}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 6) # type: ignore + + id2 = (await db.test.insert_one({"x": 1})).inserted_id + result = await db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual((await db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((await db.test.find_one(id2))["x"], 1) # type: ignore + + result = await db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_result(self): + db = self.db + await db.drop_collection("test") + + result = await db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = await db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) + + async def test_update_many(self): + db = self.db + await db.drop_collection("test") + + with self.assertRaises(ValueError): + await db.test.update_many({}, {"x": 1}) + + await db.test.insert_one({"x": 4, "y": 3}) + await db.test.insert_one({"x": 5, "y": 5}) + await db.test.insert_one({"x": 4, "y": 4}) + + result = await db.test.update_many({"x": 4}, {"$set": {"y": 5}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(2, result.matched_count) + self.assertIn(result.modified_count, (None, 2)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(3, await db.test.count_documents({"y": 5})) + + result = await db.test.update_many({"x": 5}, {"$set": {"y": 6}}) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(1, result.matched_count) + self.assertIn(result.modified_count, (None, 1)) + self.assertIsNone(result.upserted_id) + self.assertTrue(result.acknowledged) + self.assertEqual(1, await db.test.count_documents({"y": 6})) + + result = await db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) + self.assertIsInstance(result, UpdateResult) + self.assertEqual(0, result.matched_count) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) + self.assertTrue(result.acknowledged) + + db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) + result = await db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) + self.assertIsInstance(result, UpdateResult) + self.assertRaises(InvalidOperation, lambda: result.matched_count) + self.assertRaises(InvalidOperation, lambda: result.modified_count) + self.assertRaises(InvalidOperation, lambda: result.upserted_id) + self.assertFalse(result.acknowledged) + + async def test_update_check_keys(self): + await self.db.drop_collection("test") + self.assertTrue(await self.db.test.insert_one({"hello": "world"})) + + # Modify shouldn't check keys... + self.assertTrue( + await self.db.test.update_one( + {"hello": "world"}, {"$set": {"foo.bar": "baz"}}, upsert=True + ) + ) + + # I know this seems like testing the server but I'd like to be notified + # by CI if the server's behavior changes here. + doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) + with self.assertRaises(OperationFailure): + await self.db.test.update_one({"hello": "world"}, doc, upsert=True) + + # This is going to cause keys to be checked and raise InvalidDocument. + # That's OK assuming the server's behavior in the previous assert + # doesn't change. If the behavior changes checking the first key for + # '$' in update won't be good enough anymore. + doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) + with self.assertRaises(OperationFailure): + await self.db.test.replace_one({"hello": "world"}, doc, upsert=True) + + # Replace with empty document + self.assertNotEqual( + 0, (await self.db.test.replace_one({"hello": "world"}, {})).matched_count + ) + + async def test_acknowledged_delete(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"x": 1}, {"x": 1}]) + self.assertEqual(2, (await db.test.delete_many({})).deleted_count) + self.assertEqual(0, (await db.test.delete_many({})).deleted_count) + + @async_client_context.require_version_max(4, 9) + async def test_manual_last_error(self): + coll = self.db.get_collection("test", write_concern=WriteConcern(w=0)) + await coll.insert_one({"x": 1}) + await self.db.command("getlasterror", w=1, wtimeout=1) + + async def test_count_documents(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.count_documents({}), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.count_documents({}), 2) + await db.test.insert_many([{"foo": "bar"}, {"foo": "baz"}]) + self.assertEqual(await db.test.count_documents({"foo": "bar"}), 1) + self.assertEqual(await db.test.count_documents({"foo": re.compile(r"ba.*")}), 2) + + async def test_estimated_document_count(self): + db = self.db + await db.drop_collection("test") + self.addAsyncCleanup(db.drop_collection, "test") + + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.wrong.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 0) + await db.test.insert_many([{}, {}]) + self.assertEqual(await db.test.estimated_document_count(), 2) + + async def test_aggregate(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertIsInstance(result, AsyncCommandCursor) + self.assertEqual([{"foo": [1, 2]}], await result.to_list()) + + # Test write concern. + with self.write_concern_collection() as coll: + await coll.aggregate([{"$out": "output-collection"}]) + + async def test_aggregate_raw_bson(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_one({"foo": [1, 2]}) + + with self.assertRaises(TypeError): + await db.test.aggregate("wow") # type: ignore[arg-type] + + pipeline = {"$project": {"_id": False, "foo": True}} + coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) + result = await coll.aggregate([pipeline]) + self.assertIsInstance(result, AsyncCommandCursor) + first_result = await anext(result) + self.assertIsInstance(first_result, RawBSONDocument) + self.assertEqual([1, 2], list(first_result["foo"])) + + async def test_aggregation_cursor_validation(self): + db = self.db + projection = {"$project": {"_id": "$_id"}} + cursor = await db.test.aggregate([projection], cursor={}) + self.assertIsInstance(cursor, AsyncCommandCursor) + + async def test_aggregation_cursor(self): + db = self.db + if await async_client_context.has_secondaries: + # Test that getMore messages are sent to the right server. + db = self.client.get_database( + db.name, + read_preference=ReadPreference.SECONDARY, + write_concern=WriteConcern(w=self.w), + ) + + for collection_size in (10, 1000): + await db.drop_collection("test") + await db.test.insert_many([{"_id": i} for i in range(collection_size)]) + expected_sum = sum(range(collection_size)) + # Use batchSize to ensure multiple getMore messages + cursor = await db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) + + self.assertEqual(expected_sum, sum(doc["_id"] for doc in await cursor.to_list())) + + # Test that batchSize is handled properly. + cursor = await db.test.aggregate([], batchSize=5) + self.assertEqual(5, len(cursor._data)) + # Force a getMore + cursor._data.clear() + await anext(cursor) + # batchSize - 1 + self.assertEqual(4, len(cursor._data)) + # Exhaust the cursor. There shouldn't be any errors. + async for _doc in cursor: + pass + + async def test_aggregation_cursor_alive(self): + await self.db.test.delete_many({}) + await self.db.test.insert_many([{} for _ in range(3)]) + self.addAsyncCleanup(self.db.test.delete_many, {}) + cursor = await self.db.test.aggregate(pipeline=[], cursor={"batchSize": 2}) + n = 0 + while True: + await cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + async def test_invalid_session_parameter(self): + async def try_invalid_session(): + with await self.db.test.aggregate([], {}): # type:ignore + pass + + with self.assertRaisesRegex(ValueError, "must be an AsyncClientSession"): + await try_invalid_session() + + async def test_large_limit(self): + db = self.db + await db.drop_collection("test_large_limit") + await db.test_large_limit.create_index([("x", 1)]) + my_str = "mongomongo" * 1000 + + await db.test_large_limit.insert_many({"x": i, "y": my_str} for i in range(2000)) + + i = 0 + y = 0 + async for doc in db.test_large_limit.find(limit=1900).sort([("x", 1)]): + i += 1 + y += doc["x"] + + self.assertEqual(1900, i) + self.assertEqual((1900 * 1899) / 2, y) + + async def test_find_kwargs(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + total = 0 + async for x in db.test.find({}, skip=4, limit=2): + total += x["x"] + + self.assertEqual(9, total) + + async def test_rename(self): + db = self.db + await db.drop_collection("test") + await db.drop_collection("foo") + + with self.assertRaises(TypeError): + await db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + await db.test.rename("") + with self.assertRaises(InvalidName): + await db.test.rename("te$t") + with self.assertRaises(InvalidName): + await db.test.rename(".test") + with self.assertRaises(InvalidName): + await db.test.rename("test.") + with self.assertRaises(InvalidName): + await db.test.rename("tes..t") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(0, await db.foo.count_documents({})) + + await db.test.insert_many({"x": i} for i in range(10)) + + self.assertEqual(10, await db.test.count_documents({})) + + await db.test.rename("foo") + + self.assertEqual(0, await db.test.count_documents({})) + self.assertEqual(10, await db.foo.count_documents({})) + + x = 0 + async for doc in db.foo.find(): + self.assertEqual(x, doc["x"]) + x += 1 + + await db.test.insert_one({}) + with self.assertRaises(OperationFailure): + await db.foo.rename("test") + await db.foo.rename("test", dropTarget=True) + + with self.write_concern_collection() as coll: + await coll.rename("foo") + + @no_type_check + async def test_find_one(self): + db = self.db + await db.drop_collection("test") + + _id = (await db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id + + self.assertEqual("world", (await db.test.find_one())["hello"]) + self.assertEqual(await db.test.find_one(_id), await db.test.find_one()) + self.assertEqual(await db.test.find_one(None), await db.test.find_one()) + self.assertEqual(await db.test.find_one({}), await db.test.find_one()) + self.assertEqual(await db.test.find_one({"hello": "world"}), await db.test.find_one()) + + self.assertIn("hello", await db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", await db.test.find_one(projection=["foo"])) + + self.assertIn("hello", await db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", await db.test.find_one(projection=("foo",))) + + self.assertIn("hello", await db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", await db.test.find_one(projection={"foo"})) + + self.assertIn("hello", await db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", await db.test.find_one(projection=frozenset(["foo"]))) + + self.assertEqual(["_id"], list(await db.test.find_one(projection={"_id": True}))) + self.assertIn("hello", list(await db.test.find_one(projection={}))) + self.assertIn("hello", list(await db.test.find_one(projection=[]))) + + self.assertEqual(None, await db.test.find_one({"hello": "foo"})) + self.assertEqual(None, await db.test.find_one(ObjectId())) + + async def test_find_one_non_objectid(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_one({"_id": 5}) + + self.assertTrue(await db.test.find_one(5)) + self.assertFalse(await db.test.find_one(6)) + + async def test_find_one_with_find_args(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": i} for i in range(1, 4)]) + + self.assertEqual(1, (await db.test.find_one())["x"]) + self.assertEqual(2, (await db.test.find_one(skip=1, limit=2))["x"]) + + async def test_find_with_sort(self): + db = self.db + await db.drop_collection("test") + + await db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) + + self.assertEqual(2, (await db.test.find_one())["x"]) + self.assertEqual(1, (await db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (await db.test.find_one(sort=[("x", -1)]))["x"]) + + async def to_list(things): + return [thing["x"] async for thing in things] + + self.assertEqual([2, 1, 3], await to_list(db.test.find())) + self.assertEqual([1, 2, 3], await to_list(db.test.find(sort=[("x", 1)]))) + self.assertEqual([3, 2, 1], await to_list(db.test.find(sort=[("x", -1)]))) + + with self.assertRaises(TypeError): + await db.test.find(sort=5) + with self.assertRaises(TypeError): + await db.test.find(sort="hello") + with self.assertRaises(TypeError): + await db.test.find(sort=["hello", 1]) + + # TODO doesn't actually test functionality, just that it doesn't blow up + async def test_cursor_timeout(self): + await self.db.test.find(no_cursor_timeout=True).to_list() + await self.db.test.find(no_cursor_timeout=False).to_list() + + async def test_exhaust(self): + if await async_is_mongos(self.db.client): + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST)) + return + + # Limit is incompatible with exhaust. + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5)) + cur = self.db.test.find(cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + cur.limit(5) + await cur.next() + cur = self.db.test.find(limit=5) + with self.assertRaises(InvalidOperation): + await cur.add_option(64) + cur = self.db.test.find() + await cur.add_option(64) + with self.assertRaises(InvalidOperation): + cur.limit(5) + + await self.db.drop_collection("test") + # Insert enough documents to require more than one batch + await self.db.test.insert_many([{"i": i} for i in range(150)]) + + client = await self.async_rs_or_single_client(maxPoolSize=1) + pool = await async_get_pool(client) + + # Make sure the socket is returned after exhaustion. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) + await anext(cur) + self.assertEqual(0, len(pool.conns)) + async for _ in cur: + pass + self.assertEqual(1, len(pool.conns)) + + # Same as previous but don't call next() + async for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): + pass + self.assertEqual(1, len(pool.conns)) + + # If the Cursor instance is discarded before being completely iterated + # and the socket has pending data (more_to_come=True) we have to close + # and discard the socket. + cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST, batch_size=2) + if async_client_context.version.at_least(4, 2): + # On 4.2+ we use OP_MSG which only sets more_to_come=True after the + # first getMore. + for _ in range(3): + await anext(cur) + else: + await anext(cur) + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + await cur.close() + cur = None + # Wait until the background thread returns the socket. + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # The socket should be discarded. + self.assertEqual(0, len(pool.conns)) + + async def test_distinct(self): + await self.db.drop_collection("test") + + test = self.db.test + await test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = await test.distinct("a") + distinct.sort() + + self.assertEqual([1, 2, 3], distinct) + + distinct = await test.find({"a": {"$gt": 1}}).distinct("a") + distinct.sort() + self.assertEqual([2, 3], distinct) + + distinct = await test.distinct("a", {"a": {"$gt": 1}}) + distinct.sort() + self.assertEqual([2, 3], distinct) + + await self.db.drop_collection("test") + + await test.insert_one({"a": {"b": "a"}, "c": 12}) + await test.insert_one({"a": {"b": "b"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + await test.insert_one({"a": {"b": "c"}, "c": 12}) + + distinct = await test.distinct("a.b") + distinct.sort() + + self.assertEqual(["a", "b", "c"], distinct) + + async def test_query_on_query_field(self): + await self.db.drop_collection("test") + await self.db.test.insert_one({"query": "foo"}) + await self.db.test.insert_one({"bar": "foo"}) + + self.assertEqual(1, await self.db.test.count_documents({"query": {"$ne": None}})) + self.assertEqual(1, len(await self.db.test.find({"query": {"$ne": None}}).to_list())) + + async def test_min_query(self): + await self.db.drop_collection("test") + await self.db.test.insert_many([{"x": 1}, {"x": 2}]) + await self.db.test.create_index("x") + + cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") + + docs = await cursor.to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(2, docs[0]["x"]) + + async def test_numerous_inserts(self): + # Ensure we don't exceed server's maxWriteBatchSize size limit. + await self.db.test.drop() + n_docs = await async_client_context.max_write_batch_size + 100 + await self.db.test.insert_many([{} for _ in range(n_docs)]) + self.assertEqual(n_docs, await self.db.test.count_documents({})) + await self.db.test.drop() + + async def test_insert_many_large_batch(self): + # Tests legacy insert. + db = self.client.test_insert_large_batch + self.addAsyncCleanup(self.client.drop_database, "test_insert_large_batch") + max_bson_size = await async_client_context.max_bson_size + # Write commands are limited to 16MB + 16k per batch + big_string = "x" * int(max_bson_size / 2) + + # Batch insert that requires 2 batches. + successful_insert = [ + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + {"x": big_string}, + ] + await db.collection_0.insert_many(successful_insert) + self.assertEqual(4, await db.collection_0.count_documents({})) + + await db.collection_0.drop() + + # Test that inserts fail after first error. + insert_second_fails = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id2", "x": big_string}, + ] + + with self.assertRaises(BulkWriteError): + await db.collection_1.insert_many(insert_second_fails) + + self.assertEqual(1, await db.collection_1.count_documents({})) + + await db.collection_1.drop() + + # 2 batches, 2nd insert fails, unacknowledged, ordered. + unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_second_fails) + + async def async_lambda(): + return await db.collection_2.count_documents({}) == 1 + + await async_wait_until(async_lambda, "insert 1 document", timeout=60) + + await db.collection_2.drop() + + # 2 batches, ids of docs 0 and 1 are dupes, ids of docs 2 and 3 are + # dupes. Acknowledged, unordered. + insert_two_failures = [ + {"_id": "id0", "x": big_string}, + {"_id": "id0", "x": big_string}, + {"_id": "id1", "x": big_string}, + {"_id": "id1", "x": big_string}, + ] + + with self.assertRaises(OperationFailure) as context: + await db.collection_3.insert_many(insert_two_failures, ordered=False) + + self.assertIn("id1", str(context.exception)) + + # Only the first and third documents should be inserted. + self.assertEqual(2, await db.collection_3.count_documents({})) + + await db.collection_3.drop() + + # 2 batches, 2 errors, unacknowledged, unordered. + unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) + await unack_coll.insert_many(insert_two_failures, ordered=False) + + async def async_lambda(): + return await db.collection_4.count_documents({}) == 2 + + # Only the first and third documents are inserted. + await async_wait_until(async_lambda, "insert 2 documents", timeout=60) + + await db.collection_4.drop() + + async def test_messages_with_unicode_collection_names(self): + db = self.db + + await db["Employés"].insert_one({"x": 1}) + await db["Employés"].replace_one({"x": 1}, {"x": 2}) + await db["Employés"].delete_many({}) + await db["Employés"].find_one() + await db["Employés"].find().to_list() + + async def test_drop_indexes_non_existent(self): + await self.db.drop_collection("test") + await self.db.test.drop_indexes() + + # This is really a bson test but easier to just reproduce it here... + # (Shame on me) + async def test_bad_encode(self): + c = self.db.test + await c.drop() + with self.assertRaises(InvalidDocument): + await c.insert_one({"x": c}) + + class BadGetAttr(dict): + def __getattr__(self, name): + pass + + bad = BadGetAttr([("foo", "bar")]) + await c.insert_one({"bad": bad}) + self.assertEqual("bar", (await c.find_one())["bad"]["foo"]) # type: ignore + + async def test_array_filters_validation(self): + # array_filters must be a list. + c = self.db.test + with self.assertRaises(TypeError): + await c.update_one({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await c.update_many({}, {"$set": {"a": 1}}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + update = {"$set": {"a": 1}} + await c.find_one_and_update({}, update, array_filters={}) # type: ignore[arg-type] + + async def test_array_filters_unacknowledged(self): + c_w0 = self.db.test.with_options(write_concern=WriteConcern(w=0)) + with self.assertRaises(ConfigurationError): + await c_w0.update_one({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.update_many({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) + with self.assertRaises(ConfigurationError): + await c_w0.find_one_and_update( + {}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}] + ) + + async def test_find_one_and(self): + c = self.db.test + await c.drop() + await c.insert_one({"_id": 1, "i": 1}) + + self.assertEqual( + {"_id": 1, "i": 1}, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}}) + ) + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual({"_id": 1, "i": 3}, await c.find_one_and_delete({"_id": 1})) + self.assertEqual(None, await c.find_one({"_id": 1})) + + self.assertEqual(None, await c.find_one_and_update({"_id": 1}, {"$inc": {"i": 1}})) + self.assertEqual( + {"_id": 1, "i": 1}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER, upsert=True + ), + ) + self.assertEqual( + {"_id": 1, "i": 2}, + await c.find_one_and_update( + {"_id": 1}, {"$inc": {"i": 1}}, return_document=ReturnDocument.AFTER + ), + ) + + self.assertEqual( + {"_id": 1, "i": 3}, + await c.find_one_and_replace( + {"_id": 1}, {"i": 3, "j": 1}, projection=["i"], return_document=ReturnDocument.AFTER + ), + ) + self.assertEqual( + {"i": 4}, + await c.find_one_and_update( + {"_id": 1}, + {"$inc": {"i": 1}}, + projection={"i": 1, "_id": 0}, + return_document=ReturnDocument.AFTER, + ), + ) + + await c.drop() + for j in range(5): + await c.insert_one({"j": j, "i": 0}) + + sort = [("j", DESCENDING)] + self.assertEqual(4, (await c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) + + async def test_find_one_and_write_concern(self): + listener = OvertCommandListener() + db = (await self.async_single_client(event_listeners=[listener]))[self.db.name] + # non-default WriteConcern. + c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) + # default WriteConcern. + c_default = db.get_collection("test", write_concern=WriteConcern()) + # Authenticate the client and throw out auth commands from the listener. + await db.command("ping") + listener.reset() + await c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + await c_w0.find_one_and_delete({"_id": 1}) + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() + + # Test write concern errors. + if async_client_context.is_rs: + c_wc_error = db.get_collection( + "test", write_concern=WriteConcern(w=len(async_client_context.nodes) + 1) + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + await c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() + + await c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + await c_default.find_one_and_delete({"_id": 1}) + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() + + async def test_find_with_nested(self): + c = self.db.test + await c.drop() + await c.insert_many([{"i": i} for i in range(5)]) # [0, 1, 2, 3, 4] + self.assertEqual( + [2], + [ + i["i"] + async for i in c.find( + { + "$and": [ + { + # This clause gives us [1,2,4] + "$or": [ + {"i": {"$lte": 2}}, + {"i": {"$gt": 3}}, + ], + }, + { + # This clause gives us [2,3] + "$or": [ + {"i": 2}, + {"i": 3}, + ] + }, + ] + } + ) + ], + ) + + self.assertEqual( + [0, 1, 2], + [ + i["i"] + async for i in c.find( + { + "$or": [ + { + # This clause gives us [2] + "$and": [ + {"i": {"$gte": 2}}, + {"i": {"$lt": 3}}, + ], + }, + { + # This clause gives us [0,1] + "$and": [ + {"i": {"$gt": -100}}, + {"i": {"$lt": 2}}, + ] + }, + ] + } + ) + ], + ) + + async def test_find_regex(self): + c = self.db.test + await c.drop() + await c.insert_one({"r": re.compile(".*")}) + + self.assertIsInstance((await c.find_one())["r"], Regex) # type: ignore + async for doc in c.find(): + self.assertIsInstance(doc["r"], Regex) + + def test_find_command_generation(self): + cmd = _gen_find_command( + "coll", + {"$query": {"foo": 1}, "$dumb": 2}, + None, + 0, + 0, + 0, + None, + DEFAULT_READ_CONCERN, + None, + None, + ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(AsyncCollection(self.db, "test")) + + @async_client_context.require_version_min(5, 0, 0) + async def test_helpers_with_let(self): + c = self.db.test + + async def afind(*args, **kwargs): + return c.find(*args, **kwargs) + + helpers = [ + (c.delete_many, ({}, {})), + (c.delete_one, ({}, {})), + (afind, ({})), + (c.update_many, ({}, {"$inc": {"x": 3}})), + (c.update_one, ({}, {"$inc": {"x": 3}})), + (c.find_one_and_delete, ({}, {})), + (c.find_one_and_replace, ({}, {})), + (c.aggregate, ([],)), + ] + for let in [10, "str", [], False]: + for helper, args in helpers: + with self.assertRaisesRegex(TypeError, "let must be an instance of dict"): + await helper(*args, let=let) # type: ignore + for helper, args in helpers: + await helper(*args, let={}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_collection_management.py b/test/asynchronous/test_collection_management.py new file mode 100644 index 0000000000..c0edf91581 --- /dev/null +++ b/test/asynchronous/test_collection_management.py @@ -0,0 +1,41 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the collection management unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_command_logging.py b/test/asynchronous/test_command_logging.py new file mode 100644 index 0000000000..f9b459c152 --- /dev/null +++ b/test/asynchronous/test_command_logging.py @@ -0,0 +1,44 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_command_monitoring.py b/test/asynchronous/test_command_monitoring.py new file mode 100644 index 0000000000..311fd1fdc1 --- /dev/null +++ b/test/asynchronous/test_command_monitoring.py @@ -0,0 +1,45 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_comment.py b/test/asynchronous/test_comment.py new file mode 100644 index 0000000000..2d6d0f5f1e --- /dev/null +++ b/test/asynchronous/test_comment.py @@ -0,0 +1,159 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the keyword argument 'comment' in various helpers.""" + +from __future__ import annotations + +import inspect +import sys + +sys.path[0:0] = [""] +from inspect import iscoroutinefunction +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import OvertCommandListener + +from bson.dbref import DBRef +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.operations import IndexModel + +_IS_SYNC = False + + +class AsyncTestComment(AsyncIntegrationTest): + async def _test_ops( + self, + helpers, + already_supported, + listener, + ): + for h, args in helpers: + c = "testing comment with " + h.__name__ + with self.subTest("collection-" + h.__name__ + "-comment"): + for cc in [c, {"key": c}, ["any", 1]]: + listener.reset() + kwargs = {"comment": cc} + try: + maybe_cursor = await h(*args, **kwargs) + except Exception: + maybe_cursor = None + self.assertIn( + "comment", + inspect.signature(h).parameters, + msg="Could not find 'comment' in the " + "signature of function %s" % (h.__name__), + ) + self.assertEqual( + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" + ) + if isinstance(maybe_cursor, AsyncCommandCursor): + await maybe_cursor.close() + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": + self.assertIn( + ":param comment:", + h.__doc__, + ) + if h not in already_supported: + self.assertIn( + "Added ``comment`` parameter", + h.__doc__, + ) + else: + self.assertNotIn( + "Added ``comment`` parameter", + h.__doc__, + ) + + listener.reset() + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_database_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener])).db + helpers = [ + (db.watch, []), + (db.command, ["hello"]), + (db.list_collections, []), + (db.list_collection_names, []), + (db.drop_collection, ["hello"]), + (db.validate_collection, ["test"]), + (db.dereference, [DBRef("collection", 1)]), + ] + already_supported = [db.command, db.list_collections, db.list_collection_names] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + @async_client_context.require_replica_set + async def test_client_helpers(self): + listener = OvertCommandListener() + cli = await self.async_rs_or_single_client(event_listeners=[listener]) + helpers = [ + (cli.watch, []), + (cli.list_databases, []), + (cli.list_database_names, []), + (cli.drop_database, ["test"]), + ] + already_supported = [ + cli.list_databases, + ] + await self._test_ops(helpers, already_supported, listener) + + @async_client_context.require_version_min(4, 7, -1) + async def test_collection_helpers(self): + listener = OvertCommandListener() + db = (await self.async_rs_or_single_client(event_listeners=[listener]))[self.db.name] + coll = db.get_collection("test") + + helpers = [ + (coll.list_indexes, []), + (coll.drop, []), + (coll.index_information, []), + (coll.options, []), + (coll.aggregate, [[{"$set": {"x": 1}}]]), + (coll.aggregate_raw_batches, [[{"$set": {"x": 1}}]]), + (coll.rename, ["temp_temp_temp"]), + (coll.distinct, ["_id"]), + (coll.find_one_and_delete, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.estimated_document_count, []), + (coll.count_documents, [{}]), + (coll.create_indexes, [[IndexModel("a")]]), + (coll.create_index, ["a"]), + (coll.drop_index, [[("a", 1)]]), + (coll.drop_indexes, []), + ] + already_supported = [ + coll.estimated_document_count, + coll.count_documents, + coll.create_indexes, + coll.drop_indexes, + coll.options, + coll.find_one_and_replace, + coll.drop_index, + coll.rename, + coll.distinct, + coll.find_one_and_delete, + coll.find_one_and_update, + ] + await self._test_ops(helpers, already_supported, listener) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_common.py b/test/asynchronous/test_common.py new file mode 100644 index 0000000000..00495e7c30 --- /dev/null +++ b/test/asynchronous/test_common.py @@ -0,0 +1,185 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the pymongo common module.""" +from __future__ import annotations + +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest + +from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.objectid import ObjectId +from pymongo.errors import OperationFailure +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCommon(AsyncIntegrationTest): + async def test_uuid_representation(self): + coll = self.db.uuid + await coll.drop() + + # Test property + self.assertEqual(UuidRepresentation.UNSPECIFIED, coll.codec_options.uuid_representation) + + # Test basic query + uu = uuid.uuid4() + # Insert as binary subtype 3 + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + legacy_opts = coll.codec_options + await coll.insert_one({"uu": uu}) + self.assertEqual(uu, (await coll.find_one({"uu": uu}))["uu"]) # type: ignore + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual(STANDARD, coll.codec_options.uuid_representation) + self.assertEqual(None, await coll.find_one({"uu": uu})) + uul = Binary.from_uuid(uu, PYTHON_LEGACY) + self.assertEqual(uul, (await coll.find_one({"uu": uul}))["uu"]) # type: ignore + + # Test count_documents + self.assertEqual(0, await coll.count_documents({"uu": uu})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, await coll.count_documents({"uu": uu})) + + # Test delete + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.delete_one({"uu": uu}) + self.assertEqual(1, await coll.count_documents({})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + await coll.delete_one({"uu": uu}) + self.assertEqual(0, await coll.count_documents({})) + + # Test update_one + await coll.insert_one({"_id": uu, "i": 1}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(1, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + await coll.update_one({"_id": uu}, {"$set": {"i": 2}}) + self.assertEqual(2, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test Cursor.distinct + self.assertEqual([2], await coll.find({"_id": uu}).distinct("i")) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) + self.assertEqual([], await coll.find({"_id": uu}).distinct("i")) + + # Test findAndModify + self.assertEqual(None, await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) + coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + self.assertEqual(2, (await coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (await coll.find_one({"_id": uu}))["i"]) # type: ignore + + # Test command + self.assertEqual( + 5, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) + )["value"]["i"], + ) + self.assertEqual( + 6, + ( + await self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) + )["value"]["i"], + ) + + async def test_write_concern(self): + c = await self.async_rs_or_single_client(connect=False) + self.assertEqual(WriteConcern(), c.write_concern) + + c = await self.async_rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + wc = WriteConcern(w=2, wtimeout=1000) + self.assertEqual(wc, c.write_concern) + + # Can we override back to the server default? + db = c.get_database("pymongo_test", write_concern=WriteConcern()) + self.assertEqual(db.write_concern, WriteConcern()) + + db = c.pymongo_test + self.assertEqual(wc, db.write_concern) + coll = db.test + self.assertEqual(wc, coll.write_concern) + + cwc = WriteConcern(j=True) + coll = db.get_collection("test", write_concern=cwc) + self.assertEqual(cwc, coll.write_concern) + self.assertEqual(wc, db.write_concern) + + async def test_mongo_client(self): + pair = await async_client_context.pair + m = await self.async_rs_or_single_client(w=0) + coll = m.pymongo_test.write_concern_test + await coll.drop() + doc = {"_id": ObjectId()} + await coll.insert_one(doc) + self.assertTrue(await coll.insert_one(doc)) + coll = coll.with_options(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client() + coll = m.pymongo_test.write_concern_test + new_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.assertTrue(await new_coll.insert_one(doc)) + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + with self.assertRaises(OperationFailure): + await coll.insert_one(doc) + m = await self.async_rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=async_client_context.replica_set_name + ) + + coll = m.pymongo_test.write_concern_test + await coll.insert_one(doc) + + # Equality tests + direct = await connected(await self.async_single_client(w=0)) + direct2 = await connected( + await self.async_single_client(f"mongodb://{pair}/?w=0", **self.credentials) + ) + self.assertEqual(direct, direct2) + self.assertFalse(direct != direct2) + + async def test_validate_boolean(self): + await self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + await self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_concurrency.py b/test/asynchronous/test_concurrency.py new file mode 100644 index 0000000000..65ea90c03f --- /dev/null +++ b/test/asynchronous/test_concurrency.py @@ -0,0 +1,54 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests to ensure that the async API is properly concurrent with asyncio.""" +from __future__ import annotations + +import asyncio +import time +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils_shared import delay + +_IS_SYNC = False + + +class TestAsyncConcurrency(AsyncIntegrationTest): + async def _task(self, client): + await client.db.test.find_one({"$where": delay(0.20)}) + + async def test_concurrency(self): + tasks = [] + iterations = 5 + + client = await self.async_single_client() + await client.db.test.drop() + await client.db.test.insert_one({"x": 1}) + + start = time.time() + + for _ in range(iterations): + await self._task(client) + + sequential_time = time.time() - start + start = time.time() + + for i in range(iterations): + tasks.append(self._task(client)) + + await asyncio.gather(*tasks) + concurrent_time = time.time() - start + + percent_faster = (sequential_time - concurrent_time) / concurrent_time * 100 + # We expect the concurrent tasks to be at least 50% faster on all platforms as a conservative benchmark + self.assertGreaterEqual(percent_faster, 50) diff --git a/test/asynchronous/test_connection_logging.py b/test/asynchronous/test_connection_logging.py new file mode 100644 index 0000000000..945c6c59b5 --- /dev/null +++ b/test/asynchronous/test_connection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_connection_monitoring.py b/test/asynchronous/test_connection_monitoring.py new file mode 100644 index 0000000000..c6dc6f0a69 --- /dev/null +++ b/test/asynchronous/test_connection_monitoring.py @@ -0,0 +1,472 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +from pathlib import Path +from test.asynchronous.utils import async_get_pool, async_get_pools + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator, SpecRunnerTask +from test.utils_shared import ( + CMAPListener, + async_wait_until, + camel_to_snake, +) + +from bson.objectid import ObjectId +from bson.son import SON +from pymongo.asynchronous.pool import PoolState, _PoolClosedError +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_preferences import ReadPreference +from pymongo.topology_description import updated_topology_description + +_IS_SYNC = False + +OBJECT_TYPES = { + # Event types. + "ConnectionCheckedIn": ConnectionCheckedInEvent, + "ConnectionCheckedOut": ConnectionCheckedOutEvent, + "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, + "ConnectionClosed": ConnectionClosedEvent, + "ConnectionCreated": ConnectionCreatedEvent, + "ConnectionReady": ConnectionReadyEvent, + "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, + "ConnectionPoolCreated": PoolCreatedEvent, + "ConnectionPoolReady": PoolReadyEvent, + "ConnectionPoolCleared": PoolClearedEvent, + "ConnectionPoolClosed": PoolClosedEvent, + # Error types. + "PoolClosedError": _PoolClosedError, + "WaitQueueTimeoutError": WaitQueueTimeoutError, +} + + +class AsyncTestCMAP(AsyncIntegrationTest): + # Location of JSON test specifications. + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") + + # Test operations: + + async def start(self, op): + """Run the 'start' thread operation.""" + target = op["target"] + thread = SpecRunnerTask(target) + await thread.start() + self.targets[target] = thread + + async def wait(self, op): + """Run the 'wait' operation.""" + await asyncio.sleep(op["ms"] / 1000.0) + + async def wait_for_thread(self, op): + """Run the 'waitForThread' operation.""" + target = op["target"] + thread = self.targets[target] + await thread.stop() + await thread.join() + if thread.exc: + raise thread.exc + self.assertFalse(thread.ops) + + async def wait_for_event(self, op): + """Run the 'waitForEvent' operation.""" + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + await async_wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) + + async def check_out(self, op): + """Run the 'checkOut' operation.""" + label = op["label"] + async with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() + if label: + self.labels[label] = conn + else: + self.addAsyncCleanup(conn.close_conn, None) + + async def check_in(self, op): + """Run the 'checkIn' operation.""" + label = op["connection"] + conn = self.labels[label] + await self.pool.checkin(conn) + + async def ready(self, op): + """Run the 'ready' operation.""" + await self.pool.ready() + + async def clear(self, op): + """Run the 'clear' operation.""" + if "interruptInUseConnections" in op: + await self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + await self.pool.reset() + + async def close(self, op): + """Run the 'close' operation.""" + await self.pool.close() + + async def run_operation(self, op): + """Run a single operation in a test.""" + op_name = camel_to_snake(op["name"]) + thread = op["thread"] + meth = getattr(self, op_name) + if thread: + await self.targets[thread].schedule(lambda: meth(op)) + else: + await meth(op) + + async def run_operations(self, ops): + """Run a test's operations.""" + for op in ops: + self._ops.append(op) + await self.run_operation(op) + + def check_object(self, actual, expected): + """Assert that the actual object matches the expected object.""" + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) + for attr, expected_val in expected.items(): + if attr == "type": + continue + c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" + actual_val = getattr(actual, c2s) + if expected_val == 42: + self.assertIsNotNone(actual_val) + else: + self.assertEqual(actual_val, expected_val) + + def check_event(self, actual, expected): + """Assert that the actual event matches the expected event.""" + self.check_object(actual, expected) + + def actual_events(self, ignore): + """Return all the non-ignored events.""" + ignore = tuple(OBJECT_TYPES[name] for name in ignore) + return [event for event in self.listener.events if not isinstance(event, ignore)] + + def check_events(self, events, ignore): + """Check the events of a test.""" + actual_events = self.actual_events(ignore) + for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") + self.check_event(actual, expected) + + if len(events) > len(actual_events): + self.fail(f"missing events: {events[len(actual_events) :]!r}") + + def check_error(self, actual, expected): + message = expected.pop("message") + self.check_object(actual, expected) + self.assertIn(message, str(actual)) + + async def set_fail_point(self, command_args): + if not async_client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + await self.configure_fail_point(self.client, command_args) + + async def run_scenario(self, scenario_def, test): + """Run a CMAP spec test.""" + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) + self.listener = CMAPListener() + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point(fp) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = await self.async_single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = async_client_context.client._topology.description + sd = td.server_descriptions()[ + (await async_client_context.host, await async_client_context.port) + ] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + await client._topology.open() + else: + await client._get_topology() + self.pool = list(client._topology._servers.values())[0].pool + + # Map of target names to Thread objects. + self.targets: dict = {} + # Map of label names to AsyncConnection objects + self.labels: dict = {} + + async def cleanup(): + for t in self.targets.values(): + await t.stop() + for t in self.targets.values(): + await t.join(5) + for conn in self.labels.values(): + await conn.close_conn(None) + + self.addAsyncCleanup(cleanup) + + try: + if test["error"]: + with self.assertRaises(PyMongoError) as ctx: + await self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) + else: + await self.run_operations(test["operations"]) + + self.check_events(test["events"], test["ignore"]) + except Exception: + # Print the events after a test failure. + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") + for op in self._ops: + print(op) + print("Threads:") + print(self.targets) + print("AsyncConnections:") + print(self.labels) + print("Events:") + for event in self.listener.events: + print(event) + print("Log:") + for log in self.logs: + print(log) + raise + + POOL_OPTIONS = { + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, + } + + # + # Prose tests. Numbers correspond to the prose test number in the spec. + # + async def test_1_client_connection_pool_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_2_all_client_pools_have_same_options(self): + client = await self.async_rs_or_single_client(**self.POOL_OPTIONS) + await client.admin.command("ping") + # Discover at least one secondary. + if await async_client_context.has_secondaries: + await client.admin.command("ping", read_preference=ReadPreference.SECONDARY) + pools = await async_get_pools(client) + pool_opts = pools[0].opts + + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + for pool in pools[1:]: + self.assertEqual(pool.opts, pool_opts) + + async def test_3_uri_connection_pool_options(self): + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{await async_client_context.pair}/?{opts}" + client = await self.async_rs_or_single_client(uri) + pool_opts = (await async_get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + async def test_4_subscribe_to_events(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + self.assertEqual(listener.event_count(PoolCreatedEvent), 1) + + # Creates a new connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) + self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) + self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) + + # Uses the existing connection. + await client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) + + await client.close() + self.assertEqual(listener.event_count(PoolClosedEvent), 1) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) + + async def test_5_check_out_fails_connection_error(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + pool = await async_get_pool(client) + + def mock_connect(*args, **kwargs): + raise ConnectionFailure("connect failed") + + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Attempt to create a new connection. + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) + + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + @async_client_context.require_no_fips + async def test_5_check_out_fails_auth_error(self): + listener = CMAPListener() + client = await self.async_single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) + + # Attempt to create a new connection. + with self.assertRaisesRegex(OperationFailure, "failed"): + await client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) + # Error happens here. + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + # + # Extra non-spec tests + # + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + async def test_events_repr(self): + host = ("localhost", 27017) + self.assertRepr(ConnectionCheckedInEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) + self.assertRepr( + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr(ConnectionCreatedEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) + self.assertRepr(ConnectionCheckOutStartedEvent(host)) + self.assertRepr(PoolCreatedEvent(host, {})) + self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) + self.assertRepr(PoolClosedEvent(host)) + + async def test_close_leaves_pool_unpaused(self): + listener = CMAPListener() + client = await self.async_single_client(event_listeners=[listener]) + await client.admin.command("ping") + pool = await async_get_pool(client) + await client.close() + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + async with pool.checkout(): + pass + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +class CMAPSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + CMAP tests do not have a 'tests' field. The whole file represents + a single test case. + """ + return [scenario_def] + + +test_creator = CMAPSpecTestCreator(create_test, AsyncTestCMAP, AsyncTestCMAP.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_connections_survive_primary_stepdown_spec.py b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py new file mode 100644 index 0000000000..aed3c1ce7b --- /dev/null +++ b/test/asynchronous/test_connections_survive_primary_stepdown_spec.py @@ -0,0 +1,137 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations + +import sys +from test.asynchronous.utils import async_ensure_all_connected + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import async_repl_set_step_down +from test.utils_shared import ( + CMAPListener, +) + +from bson import SON +from pymongo import monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.errors import NotPrimaryError +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestAsyncConnectionsSurvivePrimaryStepDown(AsyncIntegrationTest): + listener: CMAPListener + coll: AsyncCollection + + @async_client_context.require_replica_set + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = CMAPListener() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 + ) + + # Ensure connections to all servers in replica set. This is to test + # that the is_writable flag is properly updated for connections that + # survive a replica set election. + await async_ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) + # Note that all ops use same write-concern as self.db (majority). + await self.db.drop_collection("step-down") + await self.db.create_collection("step-down") + self.listener.reset() + + async def set_fail_point(self, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await self.client.admin.command(cmd) + + def verify_pool_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 1) + + def verify_pool_not_cleared(self): + self.assertEqual(self.listener.event_count(monitoring.PoolClearedEvent), 0) + + @async_client_context.require_version_min(4, 2, -1) + async def test_get_more_iteration(self): + # Insert 5 documents with WC majority. + await self.coll.insert_many([{"data": k} for k in range(5)]) + # Start a find operation and retrieve first batch of results. + batch_size = 2 + cursor = self.coll.find(batch_size=batch_size) + for _ in range(batch_size): + await cursor.next() + # Force step-down the primary. + await async_repl_set_step_down(self.client, replSetStepDown=5, force=True) + # Get await anext batch of results. + for _ in range(batch_size): + await cursor.next() + # Verify pool not cleared. + self.verify_pool_not_cleared() + # Attempt insertion to mark server description as stale and prevent a + # NotPrimaryError on the subsequent operation. + try: + await self.coll.insert_one({}) + except NotPrimaryError: + pass + # Next insert should succeed on the new primary without clearing pool. + await self.coll.insert_one({}) + self.verify_pool_not_cleared() + + async def run_scenario(self, error_code, retry, pool_status_checker): + # Set fail point. + await self.set_fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["insert"], "errorCode": error_code}} + ) + self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) + # Insert record and verify failure. + with self.assertRaises(NotPrimaryError) as exc: + await self.coll.insert_one({"test": 1}) + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] + # Retry before CMAPListener assertion if retry_before=True. + if retry: + await self.coll.insert_one({"test": 1}) + # Verify pool cleared/not cleared. + pool_status_checker() + # Always retry here to ensure discovery of new primary. + await self.coll.insert_one({"test": 1}) + + @async_client_context.require_version_min(4, 2, -1) + @async_client_context.require_test_commands + async def test_not_primary_keep_connection_pool(self): + await self.run_scenario(10107, True, self.verify_pool_not_cleared) + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_test_commands + async def test_shutdown_in_progress(self): + await self.run_scenario(91, False, self.verify_pool_cleared) + + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_test_commands + async def test_interrupted_at_shutdown(self): + await self.run_scenario(11600, False, self.verify_pool_cleared) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_create_entities.py b/test/asynchronous/test_create_entities.py new file mode 100644 index 0000000000..1f68cf6ddc --- /dev/null +++ b/test/asynchronous/test_create_entities.py @@ -0,0 +1,134 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest +from test.asynchronous.unified_format import UnifiedSpecTestMixinV1 + +_IS_SYNC = False + + +class TestCreateEntities(AsyncIntegrationTest): + async def test_store_events_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "blank", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "storeEventsAsEntities": [ + { + "id": "events1", + "events": [ + "PoolCreatedEvent", + ], + } + ], + } + }, + ], + "tests": [{"description": "foo", "operations": []}], + } + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + final_entity_map = self.scenario_runner.entity_map + self.assertIn("events1", final_entity_map) + self.assertGreater(len(final_entity_map["events1"]), 0) + for event in final_entity_map["events1"]: + self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() + + async def test_store_all_others_as_entities(self): + self.scenario_runner = UnifiedSpecTestMixinV1() + spec = { + "description": "Find", + "schemaVersion": "1.2", + "createEntities": [ + { + "client": { + "id": "client0", + "uriOptions": {"retryReads": True}, + } + }, + {"database": {"id": "database0", "client": "client0", "databaseName": "dat"}}, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "dat", + } + }, + ], + "tests": [ + { + "description": "test loops", + "operations": [ + { + "name": "loop", + "object": "testRunner", + "arguments": { + "storeIterationsAsEntity": "iterations", + "storeSuccessesAsEntity": "successes", + "storeFailuresAsEntity": "failures", + "storeErrorsAsEntity": "errors", + "numIterations": 5, + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 1, "x": 44}}, + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": {"document": {"_id": 2, "x": 44}}, + }, + ], + }, + } + ], + } + ], + } + + await self.client.dat.dat.delete_many({}) + self.scenario_runner.TEST_SPEC = spec + await self.scenario_runner.asyncSetUp() + await self.scenario_runner.run_scenario(spec["tests"][0]) + await self.scenario_runner.entity_map["client0"].close() + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + await client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_crud_unified.py b/test/asynchronous/test_crud_unified.py new file mode 100644 index 0000000000..8b1f9b8e38 --- /dev/null +++ b/test/asynchronous/test_crud_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CRUD unified spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_csot.py b/test/asynchronous/test_csot.py new file mode 100644 index 0000000000..a978d1ccc0 --- /dev/null +++ b/test/asynchronous/test_csot.py @@ -0,0 +1,116 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the CSOT unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils import flaky + +import pymongo +from pymongo import _csot +from pymongo.errors import PyMongoError + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestCSOT(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + @flaky(reason="PYTHON-3522") + async def test_timeout_nested(self): + coll = self.db.coll + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + with pymongo.timeout(10): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 10) + deadline_10 = _csot.get_deadline() + + # Capped at the original 10 deadline. + with pymongo.timeout(15): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 15) + self.assertEqual(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + with pymongo.timeout(5): + await coll.find_one() + self.assertEqual(_csot.get_timeout(), 5) + self.assertLess(_csot.get_deadline(), deadline_10) + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), 10) + self.assertEqual(_csot.get_deadline(), deadline_10) + await coll.find_one() + + # Should be reset to previous values + self.assertEqual(_csot.get_timeout(), None) + self.assertEqual(_csot.get_deadline(), float("inf")) + self.assertEqual(_csot.get_rtt(), 0.0) + + @async_client_context.require_change_streams + @flaky(reason="PYTHON-3522") + async def test_change_stream_can_resume_after_timeouts(self): + coll = self.db.test + await coll.insert_one({}) + async with await coll.watch() as stream: + with pymongo.timeout(0.1): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + with self.assertRaises(PyMongoError) as ctx: + await stream.try_next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + # Resume before the insert on 3.6 because 4.0 is required to avoid skipping documents + if async_client_context.version < (4, 0): + await stream.try_next() + await coll.insert_one({}) + with pymongo.timeout(10): + self.assertTrue(await stream.next()) + self.assertTrue(stream.alive) + # Timeout applies to entire next() call, not only individual commands. + with pymongo.timeout(0.5): + with self.assertRaises(PyMongoError) as ctx: + await stream.next() + self.assertTrue(ctx.exception.timeout) + self.assertTrue(stream.alive) + self.assertFalse(stream.alive) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_cursor.py b/test/asynchronous/test_cursor.py new file mode 100644 index 0000000000..906f78cc97 --- /dev/null +++ b/test/asynchronous/test_cursor.py @@ -0,0 +1,1867 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the cursor module.""" +from __future__ import annotations + +import copy +import gc +import itertools +import os +import random +import re +import sys +import threading +import time +from typing import Any + +import pymongo + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import flaky +from test.utils_shared import ( + AllowListEventListener, + EventListener, + OvertCommandListener, + async_wait_until, + delay, + ignore_deprecations, +) + +from bson import decode_all +from bson.code import Code +from bson.raw_bson import RawBSONDocument +from pymongo import ASCENDING, DESCENDING +from pymongo.asynchronous.cursor import AsyncCursor, CursorType +from pymongo.collation import Collation +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError +from pymongo.operations import _IndexList +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestCursor(AsyncIntegrationTest): + async def test_deepcopy_cursor_littered_with_regexes(self): + cursor = self.db.test.find( + { + "x": re.compile("^hmmm.*"), + "y": [re.compile("^hmm.*")], + "z": {"a": [re.compile("^hm.*")]}, + re.compile("^key.*"): {"a": [re.compile("^hm.*")]}, + } + ) + + cursor2 = copy.deepcopy(cursor) + self.assertEqual(cursor._spec, cursor2._spec) + + async def test_add_remove_option(self): + cursor = self.db.test.find() + self.assertEqual(0, cursor._query_flags) + await cursor.add_option(2) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + await cursor.add_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + await cursor.add_option(128) + cursor2 = await self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) + self.assertEqual(162, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + + self.assertEqual(162, cursor._query_flags) + await cursor.add_option(128) + self.assertEqual(162, cursor._query_flags) + + cursor.remove_option(128) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + + self.assertEqual(2, cursor._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Timeout + cursor = self.db.test.find(no_cursor_timeout=True) + self.assertEqual(16, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(16) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(16) + self.assertEqual(0, cursor._query_flags) + + # Tailable / Await data + cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(34, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(34) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(32) + self.assertEqual(2, cursor._query_flags) + + # Partial + cursor = self.db.test.find(allow_partial_results=True) + self.assertEqual(128, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(128) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + cursor.remove_option(128) + self.assertEqual(0, cursor._query_flags) + + async def test_add_remove_option_exhaust(self): + # Exhaust - which mongos doesn't support + if async_client_context.is_mongos: + with self.assertRaises(InvalidOperation): + await anext(self.db.test.find(cursor_type=CursorType.EXHAUST)) + else: + cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) + self.assertEqual(64, cursor._query_flags) + cursor2 = await self.db.test.find().add_option(64) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertTrue(cursor._exhaust) + cursor.remove_option(64) + self.assertEqual(0, cursor._query_flags) + self.assertFalse(cursor._exhaust) + + async def test_allow_disk_use(self): + db = self.db + await db.pymongo_test.drop() + coll = db.pymongo_test + + with self.assertRaises(TypeError): + coll.find().allow_disk_use("baz") # type: ignore[arg-type] + + cursor = coll.find().allow_disk_use(True) + self.assertEqual(True, cursor._allow_disk_use) + cursor = coll.find().allow_disk_use(False) + self.assertEqual(False, cursor._allow_disk_use) + + async def test_max_time_ms(self): + db = self.db + await db.pymongo_test.drop() + coll = db.pymongo_test + with self.assertRaises(TypeError): + coll.find().max_time_ms("foo") # type: ignore[arg-type] + await coll.insert_one({"amalia": 1}) + await coll.insert_one({"amalia": 2}) + + coll.find().max_time_ms(None) + coll.find().max_time_ms(1) + + cursor = coll.find().max_time_ms(999) + self.assertEqual(999, cursor._max_time_ms) + cursor = coll.find().max_time_ms(10).max_time_ms(1000) + self.assertEqual(1000, cursor._max_time_ms) + + cursor = coll.find().max_time_ms(999) + c2 = cursor.clone() + self.assertEqual(999, c2._max_time_ms) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) + + self.assertTrue(await coll.find_one(max_time_ms=1000)) + + client = self.client + if not async_client_context.is_mongos and async_client_context.test_commands_enabled: + # Cursor parses server timeout error in response to initial query. + await client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + cursor = coll.find().max_time_ms(1) + try: + await anext(cursor) + except ExecutionTimeout: + pass + else: + self.fail("ExecutionTimeout not raised") + with self.assertRaises(ExecutionTimeout): + await coll.find_one(max_time_ms=1) + finally: + await client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + + async def test_maxtime_ms_message(self): + db = self.db + await db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + client = await self.async_rs_client(document_class=RawBSONDocument) + await client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + async def test_max_await_time_ms(self): + db = self.db + await db.pymongo_test.drop() + coll = await db.create_collection("pymongo_test", capped=True, size=4096) + + with self.assertRaises(TypeError): + coll.find().max_await_time_ms("foo") # type: ignore[arg-type] + await coll.insert_one({"amalia": 1}) + await coll.insert_one({"amalia": 2}) + + coll.find().max_await_time_ms(None) + coll.find().max_await_time_ms(1) + + # When cursor is not tailable_await + cursor = coll.find() + self.assertEqual(None, cursor._max_await_time_ms) + cursor = coll.find().max_await_time_ms(99) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is unset + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + self.assertEqual(None, cursor._max_await_time_ms) + + # If cursor is tailable_await and timeout is set + cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) + self.assertEqual(99, cursor._max_await_time_ms) + + cursor = ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_await_time_ms(10) + .max_await_time_ms(90) + ) + self.assertEqual(90, cursor._max_await_time_ms) + + listener = AllowListEventListener("find", "getMore") + coll = (await self.async_rs_or_single_client(event_listeners=[listener]))[ + self.db.name + ].pymongo_test + + # Tailable_await defaults. + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() + # find + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_await with max_await_time_ms set. + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Tailable_await with max_time_ms and make sure list() works on synchronous cursors + if _IS_SYNC: + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # type: ignore[call-overload] + else: + await coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Tailable_await with both max_time_ms and max_await_time_ms + await ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_time_ms(99) + .max_await_time_ms(99) + .to_list() + ) + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Non tailable_await with max_await_time_ms + await coll.find(batch_size=1).max_await_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() + + # Non tailable_await with max_time_ms + await coll.find(batch_size=1).max_time_ms(99).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + # Non tailable_await with both max_time_ms and max_await_time_ms + await coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() + # find + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) + # getMore + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + + @async_client_context.require_test_commands + @async_client_context.require_no_mongos + async def test_max_time_ms_getmore(self): + # Test that Cursor handles server timeout error in response to getmore. + coll = self.db.pymongo_test + await coll.insert_many([{} for _ in range(200)]) + cursor = coll.find().max_time_ms(100) + + # Send initial query before turning on failpoint. + await anext(cursor) + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + try: + # Iterate up to first getmore. + await cursor.to_list() + except ExecutionTimeout: + pass + else: + self.fail("ExecutionTimeout not raised") + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + async def test_explain(self): + a = self.db.test.find() + await a.explain() + async for _ in a: + break + b = await a.explain() + self.assertIn("executionStats", b) + + async def test_explain_with_read_concern(self): + # Do not add readConcern level to explain. + listener = AllowListEventListener("explain") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) + self.assertTrue(await coll.find().explain()) + started = listener.started_events + self.assertEqual(len(started), 1) + self.assertNotIn("readConcern", started[0].command) + + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + async def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + # Workaround for SERVER-108463 + names = await client["explain-test"].list_collection_names() + if "collection" not in names: + collection = await client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(await collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 + + async def test_hint(self): + db = self.db + with self.assertRaises(TypeError): + db.test.find().hint(5.5) # type: ignore[arg-type] + await db.test.drop() + + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain() + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() + + spec: list[Any] = [("num", DESCENDING)] + _ = await db.test.create_index(spec) + + first = await anext(db.test.find()) + self.assertEqual(0, first.get("num")) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(99, first.get("num")) + with self.assertRaises(OperationFailure): + await db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() + + a = db.test.find({"num": 17}) + a.hint(spec) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.hint, spec) + + await db.test.drop() + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + await db.test.create_index(spec) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + await db.test.drop() + await db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + await db.test.create_index(spec) + first = await anext(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + + async def test_hint_by_name(self): + db = self.db + await db.test.drop() + + await db.test.insert_many([{"i": i} for i in range(100)]) + + await db.test.create_index([("i", DESCENDING)], name="fooindex") + first = await anext(db.test.find()) + self.assertEqual(0, first.get("i")) + first = await anext(db.test.find().hint("fooindex")) + self.assertEqual(99, first.get("i")) + + async def test_limit(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().limit(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit(5.5) # type: ignore[arg-type] + self.assertTrue((db.test.find()).limit(5)) + + await db.test.drop() + await db.test.insert_many([{"x": i} for i in range(100)]) + + count = 0 + async for _ in db.test.find(): + count += 1 + self.assertEqual(count, 100) + + count = 0 + async for _ in db.test.find().limit(20): + count += 1 + self.assertEqual(count, 20) + + count = 0 + async for _ in db.test.find().limit(99): + count += 1 + self.assertEqual(count, 99) + + count = 0 + async for _ in db.test.find().limit(1): + count += 1 + self.assertEqual(count, 1) + + count = 0 + async for _ in db.test.find().limit(0): + count += 1 + self.assertEqual(count, 100) + + count = 0 + async for _ in db.test.find().limit(0).limit(50).limit(10): + count += 1 + self.assertEqual(count, 10) + + a = db.test.find() + a.limit(10) + async for _ in a: + break + with self.assertRaises(InvalidOperation): + a.limit(5) + + async def test_max(self): + db = self.db + await db.test.drop() + j_index = [("j", ASCENDING)] + await db.test.create_index(j_index) + + await db.test.insert_many([{"j": j, "k": j} for j in range(10)]) + + def find(max_spec, expected_index): + return db.test.find().max(max_spec).hint(expected_index) + + cursor = find([("j", 3)], j_index) + self.assertEqual(len(await cursor.to_list()), 3) + + # Tuple. + cursor = find((("j", 3),), j_index) + self.assertEqual(len(await cursor.to_list()), 3) + + # Compound index. + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + await db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(await cursor.to_list()), 3) + + # Wrong order. + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + await cursor.to_list() + + # No such index. + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + await cursor.to_list() + with self.assertRaises(TypeError): + db.test.find().max(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().max({"j": 10}) # type: ignore[arg-type] + + async def test_min(self): + db = self.db + await db.test.drop() + j_index = [("j", ASCENDING)] + await db.test.create_index(j_index) + + await db.test.insert_many([{"j": j, "k": j} for j in range(10)]) + + def find(min_spec, expected_index): + return db.test.find().min(min_spec).hint(expected_index) + + cursor = find([("j", 3)], j_index) + self.assertEqual(len(await cursor.to_list()), 7) + + # Tuple. + cursor = find((("j", 3),), j_index) + self.assertEqual(len(await cursor.to_list()), 7) + + # Compound index. + index_keys = [("j", ASCENDING), ("k", ASCENDING)] + await db.test.create_index(index_keys) + cursor = find([("j", 3), ("k", 3)], index_keys) + self.assertEqual(len(await cursor.to_list()), 7) + + # Wrong order. + cursor = find([("k", 3), ("j", 3)], index_keys) + with self.assertRaises(OperationFailure): + await cursor.to_list() + + # No such index. + cursor = find([("k", 3)], "k") + with self.assertRaises(OperationFailure): + await cursor.to_list() + + with self.assertRaises(TypeError): + db.test.find().min(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().min({"j": 10}) # type: ignore[arg-type] + + async def test_min_max_without_hint(self): + coll = self.db.test + j_index = [("j", ASCENDING)] + await coll.create_index(j_index) + + with self.assertRaises(InvalidOperation): + await coll.find().min([("j", 3)]).to_list() + with self.assertRaises(InvalidOperation): + await coll.find().max([("j", 3)]).to_list() + + async def test_batch_size(self): + db = self.db + await db.test.drop() + await db.test.insert_many([{"x": x} for x in range(200)]) + + with self.assertRaises(TypeError): + db.test.find().batch_size(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().batch_size(-1) + self.assertTrue((db.test.find()).batch_size(5)) + a = db.test.find() + async for _ in a: + break + self.assertRaises(InvalidOperation, a.batch_size, 5) + + async def cursor_count(cursor, expected_count): + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(expected_count, count) + + await cursor_count((db.test.find()).batch_size(0), 200) + await cursor_count((db.test.find()).batch_size(1), 200) + await cursor_count((db.test.find()).batch_size(2), 200) + await cursor_count((db.test.find()).batch_size(5), 200) + await cursor_count((db.test.find()).batch_size(100), 200) + await cursor_count((db.test.find()).batch_size(500), 200) + + await cursor_count((db.test.find()).batch_size(0).limit(1), 1) + await cursor_count((db.test.find()).batch_size(1).limit(1), 1) + await cursor_count((db.test.find()).batch_size(2).limit(1), 1) + await cursor_count((db.test.find()).batch_size(5).limit(1), 1) + await cursor_count((db.test.find()).batch_size(100).limit(1), 1) + await cursor_count((db.test.find()).batch_size(500).limit(1), 1) + + await cursor_count((db.test.find()).batch_size(0).limit(10), 10) + await cursor_count((db.test.find()).batch_size(1).limit(10), 10) + await cursor_count((db.test.find()).batch_size(2).limit(10), 10) + await cursor_count((db.test.find()).batch_size(5).limit(10), 10) + await cursor_count((db.test.find()).batch_size(100).limit(10), 10) + await cursor_count((db.test.find()).batch_size(500).limit(10), 10) + + cur = db.test.find().batch_size(1) + await anext(cur) + # find command batchSize should be 1 + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + await anext(cur) + self.assertEqual(0, len(cur._data)) + + async def test_limit_and_batch_size(self): + db = self.db + await db.test.drop() + await db.test.insert_many([{"x": x} for x in range(500)]) + + curs = db.test.find().limit(0).batch_size(10) + await anext(curs) + self.assertEqual(10, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=10) + await anext(curs) + self.assertEqual(10, curs._retrieved) + + curs = db.test.find().limit(-2).batch_size(0) + await anext(curs) + self.assertEqual(2, curs._retrieved) + + curs = db.test.find(limit=-2, batch_size=0) + await anext(curs) + self.assertEqual(2, curs._retrieved) + + curs = db.test.find().limit(-4).batch_size(5) + await anext(curs) + self.assertEqual(4, curs._retrieved) + + curs = db.test.find(limit=-4, batch_size=5) + await anext(curs) + self.assertEqual(4, curs._retrieved) + + curs = db.test.find().limit(50).batch_size(500) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50, batch_size=500) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find().batch_size(500) + await anext(curs) + self.assertEqual(500, curs._retrieved) + + curs = db.test.find(batch_size=500) + await anext(curs) + self.assertEqual(500, curs._retrieved) + + curs = db.test.find().limit(50) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + curs = db.test.find(limit=50) + await anext(curs) + self.assertEqual(50, curs._retrieved) + + # these two might be shaky, as the default + # is set by the server. as of 2.0.0-rc0, 101 + # or 1MB (whichever is smaller) is default + # for queries without ntoreturn + curs = db.test.find() + await anext(curs) + self.assertEqual(101, curs._retrieved) + + curs = db.test.find().limit(0).batch_size(0) + await anext(curs) + self.assertEqual(101, curs._retrieved) + + curs = db.test.find(limit=0, batch_size=0) + await anext(curs) + self.assertEqual(101, curs._retrieved) + + async def test_skip(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().skip(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().skip(-5) + self.assertTrue((db.test.find()).skip(5)) + + await db.drop_collection("test") + + await db.test.insert_many([{"x": i} for i in range(100)]) + + async for i in db.test.find(): + self.assertEqual(i["x"], 0) + break + + async for i in db.test.find().skip(20): + self.assertEqual(i["x"], 20) + break + + async for i in db.test.find().skip(99): + self.assertEqual(i["x"], 99) + break + + async for i in db.test.find().skip(1): + self.assertEqual(i["x"], 1) + break + + async for i in db.test.find().skip(0): + self.assertEqual(i["x"], 0) + break + + async for i in db.test.find().skip(0).skip(50).skip(10): + self.assertEqual(i["x"], 10) + break + + async for _ in db.test.find().skip(1000): + self.fail() + + a = db.test.find() + a.skip(10) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.skip, 5) + + async def test_sort(self): + db = self.db + + with self.assertRaises(TypeError): + db.test.find().sort(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().sort([]) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([], ASCENDING) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([("hello", DESCENDING)], DESCENDING) # type: ignore[arg-type] + + await db.test.drop() + + unsort = list(range(10)) + random.shuffle(unsort) + + await db.test.insert_many([{"x": i} for i in unsort]) + + asc = [i["x"] async for i in db.test.find().sort("x", ASCENDING)] + self.assertEqual(asc, list(range(10))) + asc = [i["x"] async for i in db.test.find().sort("x")] + self.assertEqual(asc, list(range(10))) + asc = [i["x"] async for i in db.test.find().sort([("x", ASCENDING)])] + self.assertEqual(asc, list(range(10))) + + expect = list(reversed(range(10))) + desc = [i["x"] async for i in db.test.find().sort("x", DESCENDING)] + self.assertEqual(desc, expect) + desc = [i["x"] async for i in db.test.find().sort([("x", DESCENDING)])] + self.assertEqual(desc, expect) + desc = [i["x"] async for i in db.test.find().sort("x", ASCENDING).sort("x", DESCENDING)] + self.assertEqual(desc, expect) + + expected = [(1, 5), (2, 5), (0, 3), (7, 3), (9, 2), (2, 1), (3, 1)] + shuffled = list(expected) + random.shuffle(shuffled) + + await db.test.drop() + for a, b in shuffled: + await db.test.insert_one({"a": a, "b": b}) + + result = [ + (i["a"], i["b"]) + async for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) + ] + self.assertEqual(result, expected) + result = [(i["a"], i["b"]) async for i in db.test.find().sort([("b", DESCENDING), "a"])] + self.assertEqual(result, expected) + + a = db.test.find() + a.sort("x", ASCENDING) + async for _ in a: + break + self.assertRaises(InvalidOperation, a.sort, "x", ASCENDING) + + async def test_where(self): + db = self.db + await db.test.drop() + + a = db.test.find() + with self.assertRaises(TypeError): + a.where(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where({}) # type: ignore[arg-type] + + await db.test.insert_many([{"x": i} for i in range(10)]) + + self.assertEqual(3, len(await db.test.find().where("this.x < 3").to_list())) + self.assertEqual(3, len(await db.test.find().where(Code("this.x < 3")).to_list())) + + code_with_scope = Code("this.x < i", {"i": 3}) + if async_client_context.version.at_least(4, 3, 3): + # MongoDB 4.4 removed support for Code with scope. + with self.assertRaises(OperationFailure): + await db.test.find().where(code_with_scope).to_list() + + code_with_empty_scope = Code("this.x < 3", {}) + with self.assertRaises(OperationFailure): + await db.test.find().where(code_with_empty_scope).to_list() + else: + self.assertEqual(3, len(await db.test.find().where(code_with_scope).to_list())) + + self.assertEqual(10, len(await db.test.find().to_list())) + self.assertEqual([0, 1, 2], [a["x"] async for a in db.test.find().where("this.x < 3")]) + self.assertEqual([], [a["x"] async for a in db.test.find({"x": 5}).where("this.x < 3")]) + self.assertEqual([5], [a["x"] async for a in db.test.find({"x": 5}).where("this.x > 3")]) + + cursor = db.test.find().where("this.x < 3").where("this.x > 7") + self.assertEqual([8, 9], [a["x"] async for a in cursor]) + + a = db.test.find() + _ = a.where("this.x > 3") + async for _ in a: + break + self.assertRaises(InvalidOperation, a.where, "this.x < 3") + + async def test_rewind(self): + await self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + + cursor = self.db.test.find().limit(2) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(0, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + break + await cursor.rewind() + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + self.assertEqual(cursor, await cursor.rewind()) + + # oplog_reply, and snapshot are all deprecated. + @ignore_deprecations + async def test_clone(self): + await self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + + cursor = self.db.test.find().limit(2) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(0, count) + + cursor = cursor.clone() + cursor2 = cursor.clone() + count = 0 + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + async for _ in cursor2: + count += 1 + self.assertEqual(4, count) + + await cursor.rewind() + count = 0 + async for _ in cursor: + break + cursor = cursor.clone() + async for _ in cursor: + count += 1 + self.assertEqual(2, count) + + self.assertNotEqual(cursor, cursor.clone()) + + # Just test attributes + cursor = ( + self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ) + ).limit(2) + cursor.min([("a", 1)]).max([("b", 3)]) + await cursor.add_option(128) + cursor.comment("hi!") + + # Every attribute should be the same. + cursor2 = cursor.clone() + self.assertEqual(cursor.__dict__, cursor2.__dict__) + + # Shallow copies can so can mutate + cursor2 = copy.copy(cursor) + cursor2._projection["cursor2"] = False + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) + + # Deepcopies and shouldn't mutate + cursor3 = copy.deepcopy(cursor) + cursor3._projection["cursor3"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) + + cursor4 = cursor.clone() + cursor4._projection["cursor4"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) + + # Test memo when deepcopying queries + query = {"hello": "world"} + query["reflexive"] = query + cursor = self.db.test.find(query) + + cursor2 = copy.deepcopy(cursor) + + self.assertNotEqual(id(cursor._spec), id(cursor2._spec)) + self.assertEqual(id(cursor2._spec["reflexive"]), id(cursor2._spec)) + self.assertEqual(len(cursor2._spec), 2) + + # Ensure hints are cloned as the correct type + cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) + cursor2 = copy.deepcopy(cursor) + # Internal types are now dict rather than SON by default + self.assertIsInstance(cursor2._hint, dict) + self.assertEqual(cursor._hint, cursor2._hint) + + @async_client_context.require_sync + def test_clone_empty(self): + self.db.test.delete_many({}) + self.db.test.insert_many([{"x": i} for i in range(1, 4)]) + cursor = self.db.test.find()[2:2] + cursor2 = cursor.clone() + self.assertRaises(StopIteration, cursor.next) + self.assertRaises(StopIteration, cursor2.next) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_bad_getitem(self): + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) + self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_getitem_slice_index(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + count = itertools.count + + self.assertRaises(IndexError, lambda: self.db.test.find()[-1:]) + self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) + + for a, b in zip(count(0), self.db.test.find()): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(100, len(list(self.db.test.find()[0:]))) # type: ignore[call-overload] + for a, b in zip(count(0), self.db.test.find()[0:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + for a, b in zip(count(99), self.db.test.find()[99:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + for _i in self.db.test.find()[1000:]: + self.fail() + + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:25]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45][20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): # type: ignore[call-overload] + self.assertEqual(a, b["i"]) + + self.assertEqual(1, len(list(self.db.test.find()[:1]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[:5]))) # type: ignore[call-overload] + + self.assertEqual(1, len(list(self.db.test.find()[99:100]))) # type: ignore[call-overload] + self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[10:10]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[:0]))) # type: ignore[call-overload] + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) # type: ignore[call-overload] + + self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) + + # AsyncCursors don't support slicing + @async_client_context.require_sync + def test_getitem_numeric_index(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + self.assertEqual(0, self.db.test.find()[0]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(50, self.db.test.find().skip(50)[0]["i"]) + self.assertEqual(50, self.db.test.find().skip(49)[1]["i"]) + self.assertEqual(50, self.db.test.find()[50]["i"]) + self.assertEqual(99, self.db.test.find()[99]["i"]) + + self.assertRaises(IndexError, lambda x: self.db.test.find()[x], -1) + self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) + self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) + + @async_client_context.require_sync + def test_iteration_with_list(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + cur = self.db.test.find().batch_size(10) + + self.assertEqual(100, len(list(cur))) # type: ignore[call-overload] + + def test_len(self): + with self.assertRaises(TypeError): + len(self.db.test.find()) # type: ignore[arg-type] + + def test_properties(self): + self.assertEqual(self.db.test, self.db.test.find().collection) + + with self.assertRaises(AttributeError): + self.db.test.find().collection = "hello" # type: ignore + + async def test_get_more(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"i": i} for i in range(10)]) + self.assertEqual(10, len(await db.test.find().batch_size(5).to_list())) + + async def test_tailable(self): + db = self.db + await db.drop_collection("test") + await db.create_collection("test", capped=True, size=1000, max=3) + self.addAsyncCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) + + await db.test.insert_one({"x": 1}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(1, doc["x"]) + self.assertEqual(1, count) + + await db.test.insert_one({"x": 2}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(2, doc["x"]) + self.assertEqual(1, count) + + await db.test.insert_one({"x": 3}) + count = 0 + async for doc in cursor: + count += 1 + self.assertEqual(3, doc["x"]) + self.assertEqual(1, count) + + # Capped rollover - the collection can never + # have more than 3 documents. Just make sure + # this doesn't raise... + await db.test.insert_many([{"x": i} for i in range(4, 7)]) + self.assertEqual(0, len(await cursor.to_list())) + + # and that the cursor doesn't think it's still alive. + self.assertFalse(cursor.alive) + + self.assertEqual(3, await db.test.count_documents({})) + + # __getitem__(index) + if _IS_SYNC: + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): + self.assertEqual(4, cursor[0]["x"]) + self.assertEqual(5, cursor[1]["x"]) + self.assertEqual(6, cursor[2]["x"]) + + cursor.rewind() + self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) + cursor.rewind() + self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) + cursor.rewind() + self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) + cursor.rewind() + self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) + cursor.rewind() + self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) + cursor.rewind() + self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) + + # The Async API does not support threading + @async_client_context.require_sync + def test_concurrent_close(self): + """Ensure a tailable can be closed from another thread.""" + db = self.db + db.drop_collection("test") + db.create_collection("test", capped=True, size=1000, max=3) + self.addCleanup(db.drop_collection, "test") + cursor = db.test.find(cursor_type=CursorType.TAILABLE) + + def iterate_cursor(): + while cursor.alive: + try: + for _doc in cursor: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise + + t = threading.Thread(target=iterate_cursor) + t.start() + time.sleep(1) + cursor.close() + self.assertFalse(cursor.alive) + t.join(3) + self.assertFalse(t.is_alive()) + + async def test_distinct(self): + await self.db.drop_collection("test") + + await self.db.test.insert_many([{"a": 1}, {"a": 2}, {"a": 2}, {"a": 2}, {"a": 3}]) + + distinct = await self.db.test.find({"a": {"$lt": 3}}).distinct("a") + distinct.sort() + + self.assertEqual([1, 2], distinct) + + await self.db.drop_collection("test") + + await self.db.test.insert_one({"a": {"b": "a"}, "c": 12}) + await self.db.test.insert_one({"a": {"b": "b"}, "c": 8}) + await self.db.test.insert_one({"a": {"b": "c"}, "c": 12}) + await self.db.test.insert_one({"a": {"b": "c"}, "c": 8}) + + distinct = await self.db.test.find({"c": 8}).distinct("a.b") + distinct.sort() + + self.assertEqual(["b", "c"], distinct) + + async def test_with_statement(self): + await self.db.drop_collection("test") + await self.db.test.insert_many([{} for _ in range(100)]) + + c1 = self.db.test.find() + async with self.db.test.find() as c2: + self.assertTrue(c2.alive) + self.assertFalse(c2.alive) + + async with self.db.test.find() as c2: + self.assertEqual(100, len(await c2.to_list())) + self.assertFalse(c2.alive) + self.assertTrue(c1.alive) + + @async_client_context.require_no_mongos + async def test_comment(self): + await self.client.drop_database(self.db) + await self.db.command("profile", 2) # Profile ALL commands. + try: + await self.db.test.find().comment("foo").to_list() + count = await self.db.system.profile.count_documents( + {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} + ) + self.assertEqual(count, 1) + + await self.db.test.find().comment("foo").distinct("type") + count = await self.db.system.profile.count_documents( + { + "ns": "pymongo_test.test", + "op": "command", + "command.distinct": "test", + "command.comment": "foo", + } + ) + self.assertEqual(count, 1) + finally: + await self.db.command("profile", 0) # Turn off profiling. + await self.db.system.profile.drop() + + await self.db.test.insert_many([{}, {}]) + cursor = self.db.test.find() + await anext(cursor) + self.assertRaises(InvalidOperation, cursor.comment, "hello") + + async def test_alive(self): + await self.db.test.delete_many({}) + await self.db.test.insert_many([{} for _ in range(3)]) + self.addAsyncCleanup(self.db.test.delete_many, {}) + cursor = self.db.test.find().batch_size(2) + n = 0 + while True: + await cursor.next() + n += 1 + if n == 3: + self.assertFalse(cursor.alive) + break + + self.assertTrue(cursor.alive) + + async def test_close_kills_cursor_synchronously(self): + # Kill any cursors possibly queued up by previous tests. + gc.collect() + await self.client._process_periodic_tasks() + + listener = AllowListEventListener("killCursors") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_close_kills_cursors + + # Add some test data. + docs_inserted = 1000 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + # Close a cursor while it's still open on the server. + cursor = coll.find().batch_size(10) + self.assertTrue(bool(await anext(cursor))) + self.assertLess(cursor.retrieved, docs_inserted) + await cursor.close() + + def assertCursorKilled(): + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + # Close a command cursor while it's still open on the server. + cursor = await coll.aggregate([], batchSize=10) + self.assertTrue(bool(await anext(cursor))) + await cursor.close() + + # The cursor should be killed if it had a non-zero id. + if cursor.cursor_id: + assertCursorKilled() + else: + self.assertEqual(0, len(listener.started_events)) + + @async_client_context.require_failCommand_appName + async def test_timeout_kills_cursor_asynchronously(self): + listener = AllowListEventListener("killCursors") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_timeout_kills_cursor + + # Add some test data. + docs_inserted = 10 + await coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + cursor = coll.find({}, batch_size=1) + await cursor.next() + + # Mock getMore commands timing out. + mock_timeout_errors = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "errorCode": 50, + "failCommands": ["getMore"], + }, + } + + async with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + await cursor.next() + + async def assertCursorKilled(): + await async_wait_until( + lambda: len(listener.succeeded_events), + "find successful killCursors command", + ) + + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + await assertCursorKilled() + listener.reset() + + cursor = await coll.aggregate([], batchSize=1) + await cursor.next() + + async with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + await cursor.next() + + await assertCursorKilled() + + def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__, eg test.find(invalidKwarg=1). + cursor = AsyncCursor.__new__(AsyncCursor) # Skip calling __init__ + cursor.__del__() # no error + + async def test_getMore_does_not_send_readPreference(self): + listener = AllowListEventListener("find", "getMore") + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # We never send primary read preference so override the default. + coll = client[self.db.name].get_collection( + "test", read_preference=ReadPreference.PRIMARY_PREFERRED + ) + + await coll.delete_many({}) + await coll.insert_many([{} for _ in range(5)]) + self.addAsyncCleanup(coll.drop) + + await coll.find(batch_size=3).to_list() + started = listener.started_events + self.assertEqual(2, len(started)) + self.assertEqual("find", started[0].command_name) + if async_client_context.is_rs or async_client_context.is_mongos: + self.assertIn("$readPreference", started[0].command) + else: + self.assertNotIn("$readPreference", started[0].command) + self.assertEqual("getMore", started[1].command_name) + self.assertNotIn("$readPreference", started[1].command) + + @async_client_context.require_replica_set + async def test_to_list_tailable(self): + oplog = self.client.local.oplog.rs + last = await oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() + ts = last["ts"] + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. + c = oplog.find( + {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True + ).max_await_time_ms(1) + self.addAsyncCleanup(c.close) + # Wait for the change to be read. + docs = [] + while not docs: + docs = await c.to_list() + self.assertGreaterEqual(len(docs), 1) + + async def test_to_list_empty(self): + c = self.db.does_not_exist.find() + docs = await c.to_list() + self.assertEqual([], docs) + + async def test_to_list_length(self): + coll = self.db.test + await coll.insert_many([{} for _ in range(5)]) + self.addAsyncCleanup(coll.drop) + c = coll.find() + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = await c.to_list(3) + self.assertEqual(len(docs), 3) + docs = await c.to_list(3) + self.assertEqual(len(docs), 2) + + @flaky(reason="PYTHON-3522") + async def test_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + await coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) + docs = await c.to_list() + self.assertGreaterEqual(len(docs), 0) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list_empty(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = await self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addAsyncCleanup(c.close) + docs = await c.to_list() + self.assertEqual([], docs) + + @async_client_context.require_change_streams + async def test_command_cursor_to_list_length(self): + db = self.db + await db.drop_collection("test") + await db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list()), 2) + + result = await db.test.aggregate([pipeline]) + self.assertEqual(len(await result.to_list(1)), 1) + + @async_client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") + async def test_command_cursor_to_list_csot_applied(self): + client = await self.async_single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + await coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = await coll.aggregate([], batchSize=1) + async with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + await cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + +class TestRawBatchCursor(AsyncIntegrationTest): + async def test_find_raw(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + batches = await c.find_raw_batches().sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_transactions + async def test_find_raw_transaction(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + async with client.start_session() as session: + async with await session.start_transaction(): + batches = await ( + client[self.db.name].test.find_raw_batches(session=session).sort("_id") + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "find") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_sessions + @async_client_context.require_failCommand_fail_point + async def test_find_raw_retryable_reads(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + async with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} + ): + batches = await client[self.db.name].test.find_raw_batches().sort("_id").to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: + self.assertEqual(cmd.command_name, "find") + + @async_client_context.require_version_min(5, 0, 0) + @async_client_context.require_no_standalone + async def test_find_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + async with client.start_session(snapshot=True) as session: + await db.test.distinct("x", {}, session=session) + batches = await db.test.find_raw_batches(session=session).sort("_id").to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + async def test_explain(self): + c = self.db.test + await c.insert_one({}) + explanation = await c.find_raw_batches().explain() + self.assertIsInstance(explanation, dict) + + async def test_empty(self): + await self.db.test.drop() + cursor = self.db.test.find_raw_batches() + with self.assertRaises(StopAsyncIteration): + await anext(cursor) + + async def test_clone(self): + await self.db.test.insert_one({}) + cursor = self.db.test.find_raw_batches() + # Copy of a RawBatchCursor is also a RawBatchCursor, not a Cursor. + self.assertIsInstance(await anext(cursor.clone()), bytes) + self.assertIsInstance(await anext(copy.copy(cursor)), bytes) + + @async_client_context.require_no_mongos + async def test_exhaust(self): + c = self.db.test + await c.drop() + await c.insert_many({"_id": i} for i in range(200)) + result = b"".join(await c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) + self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) + + async def test_server_error(self): + with self.assertRaises(OperationFailure) as exc: + await anext(self.db.test.find_raw_batches({"x": {"$bad": 1}})) + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + async def test_get_item(self): + with self.assertRaises(InvalidOperation): + self.db.test.find_raw_batches()[0] + + async def test_collation(self): + await anext(self.db.test.find_raw_batches(collation=Collation("en_US"))) + + async def test_read_concern(self): + await self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one( + {} + ) + c = self.db.get_collection("test", read_concern=ReadConcern("majority")) + await anext(c.find_raw_batches()) + + async def test_monitoring(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.drop() + await c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = c.find_raw_batches(batch_size=4) + + # First raw batch of 4 documents. + await anext(cursor) + + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("find", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("find", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # The batch is a list of one raw bytes object. + self.assertEqual(len(csr["firstBatch"]), 1) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(4)]) + + listener.reset() + + # Next raw batch of 4 documents. + await anext(cursor) + try: + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) + finally: + # Finish the cursor. + await cursor.close() + + +class TestRawBatchCommandCursor(AsyncIntegrationTest): + async def test_aggregate_raw(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + batches = await (await c.aggregate_raw_batches([{"$sort": {"_id": 1}}])).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_transactions + async def test_aggregate_raw_transaction(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + async with client.start_session() as session: + async with await session.start_transaction(): + batches = await ( + await client[self.db.name].test.aggregate_raw_batches( + [{"$sort": {"_id": 1}}], session=session + ) + ).to_list() + cmd = listener.started_events[0] + self.assertEqual(cmd.command_name, "aggregate") + self.assertIn("$clusterTime", cmd.command) + self.assertEqual(cmd.command["startTransaction"], True) + self.assertEqual(cmd.command["txnNumber"], 1) + # Ensure we update $clusterTime from the command response. + last_cmd = listener.succeeded_events[-1] + self.assertEqual( + last_cmd.reply["$clusterTime"]["clusterTime"], + session.cluster_time["clusterTime"], + ) + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + @async_client_context.require_sessions + @async_client_context.require_failCommand_fail_point + async def test_aggregate_raw_retryable_reads(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + async with self.fail_point( + {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} + ): + batches = await ( + await client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}]) + ).to_list() + + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events + self.assertEqual(cmds[0].command_name, "aggregate") + self.assertEqual(cmds[1].command_name, "aggregate") + + @async_client_context.require_version_min(5, 0, -1) + @async_client_context.require_no_standalone + async def test_aggregate_raw_snapshot_reads(self): + c = self.db.get_collection("test", write_concern=WriteConcern(w="majority")) + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], retryReads=True) + db = client[self.db.name] + async with client.start_session(snapshot=True) as session: + await db.test.distinct("x", {}, session=session) + batches = await ( + await db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session) + ).to_list() + self.assertEqual(1, len(batches)) + self.assertEqual(docs, decode_all(batches[0])) + + find_cmd = listener.started_events[1].command + self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") + self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) + + async def test_server_error(self): + c = self.db.test + await c.drop() + docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] + await c.insert_many(docs) + await c.insert_one({"_id": 10, "x": "not a number"}) + + with self.assertRaises(OperationFailure) as exc: + await ( + await self.db.test.aggregate_raw_batches( + [ + { + "$sort": {"_id": 1}, + }, + {"$project": {"x": {"$multiply": [2, "$x"]}}}, + ], + batchSize=4, + ) + ).to_list() + + # The server response was decoded, not left raw. + self.assertIsInstance(exc.exception.details, dict) + + async def test_get_item(self): + with self.assertRaises(InvalidOperation): + (await self.db.test.aggregate_raw_batches([]))[0] + + async def test_collation(self): + await anext(await self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) + + async def test_monitoring(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.drop() + await c.insert_many([{"_id": i} for i in range(10)]) + + listener.reset() + cursor = await c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) + + # Start cursor, no initial batch. + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("aggregate", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("aggregate", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + + # First batch is empty. + self.assertEqual(len(csr["firstBatch"]), 0) + listener.reset() + + # Batches of 4 documents. + n = 0 + async for batch in cursor: + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertEqual("getMore", started.command_name) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getMore", succeeded.command_name) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(len(csr["nextBatch"]), 1) + self.assertEqual(csr["nextBatch"][0], batch) + self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) + + n += 4 + listener.reset() + + @async_client_context.require_version_min(5, 0, -1) + @async_client_context.require_no_mongos + @async_client_context.require_sync + async def test_exhaust_cursor_db_set(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + await c.delete_many({}) + await c.insert_many([{"_id": i} for i in range(3)]) + + listener.reset() + + result = list(await c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) + + self.assertEqual(len(result), 3) + + self.assertEqual( + listener.started_command_names(), ["find", "getMore", "getMore", "getMore"] + ) + for cmd in listener.started_events: + self.assertEqual(cmd.command["$db"], "pymongo_test") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_custom_types.py b/test/asynchronous/test_custom_types.py new file mode 100644 index 0000000000..f8fa51ba76 --- /dev/null +++ b/test/asynchronous/test_custom_types.py @@ -0,0 +1,976 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations + +import datetime +import sys +import tempfile +from collections import OrderedDict +from decimal import Decimal +from random import random +from typing import Any, Tuple, Type, no_type_check + +from bson.decimal128 import DecimalDecoder, DecimalEncoder +from gridfs.asynchronous.grid_file import AsyncGridIn, AsyncGridOut + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import ( + _BUILT_IN_TYPES, + RE_TYPE, + Decimal128, + _bson_to_dict, + _dict_to_bson, + decode, + decode_all, + decode_file_iter, + decode_iter, + encode, +) +from bson.codec_options import ( + CodecOptions, + TypeCodec, + TypeDecoder, + TypeEncoder, + TypeRegistry, +) +from bson.errors import InvalidDocument +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from pymongo.asynchronous.collection import ReturnDocument +from pymongo.errors import DuplicateKeyError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) + + +class UndecipherableInt64Type: + def __init__(self, value): + self.value = value + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.value == other.value + # Does not compare equal to integers. + return False + + +class UndecipherableIntDecoder(TypeDecoder): + bson_type = Int64 + + def transform_bson(self, value): + return UndecipherableInt64Type(value) + + +class UndecipherableIntEncoder(TypeEncoder): + python_type = UndecipherableInt64Type + + def transform_python(self, value): + return Int64(value.value) + + +UNINT_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UndecipherableIntDecoder(), + ] + ) +) + + +UNINT_CODECOPTS = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder(), UndecipherableIntEncoder()]) +) + + +class UppercaseTextDecoder(TypeDecoder): + bson_type = str + + def transform_bson(self, value): + return value.upper() + + +UPPERSTR_DECODER_CODECOPTS = CodecOptions( + type_registry=TypeRegistry( + [ + UppercaseTextDecoder(), + ] + ) +) + + +def type_obfuscating_decoder_factory(rt_type): + class ResumeTokenToNanDecoder(TypeDecoder): + bson_type = rt_type + + def transform_bson(self, value): + return "NaN" + + return ResumeTokenToNanDecoder + + +class CustomBSONTypeTests: + @no_type_check + def roundtrip(self, doc): + bsonbytes = encode(doc, codec_options=self.codecopts) + rt_document = decode(bsonbytes, codec_options=self.codecopts) + self.assertEqual(doc, rt_document) + + def test_encode_decode_roundtrip(self): + self.roundtrip({"average": Decimal("56.47")}) + self.roundtrip({"average": {"b": Decimal("56.47")}}) + self.roundtrip({"average": [Decimal("56.47")]}) + self.roundtrip({"average": [[Decimal("56.47")]]}) + self.roundtrip({"average": [{"b": Decimal("56.47")}]}) + + @no_type_check + def test_decode_all(self): + documents = [] + for dec in range(3): + documents.append({"average": Decimal(f"56.4{dec}")}) + + bsonstream = b"" + for doc in documents: + bsonstream += encode(doc, codec_options=self.codecopts) + + self.assertEqual(decode_all(bsonstream, self.codecopts), documents) + + @no_type_check + def test__bson_to_dict(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + decoded_document = _bson_to_dict(rawbytes, self.codecopts) + self.assertEqual(document, decoded_document) + + @no_type_check + def test__dict_to_bson(self): + document = {"average": Decimal("56.47")} + rawbytes = encode(document, codec_options=self.codecopts) + encoded_document = _dict_to_bson(document, False, self.codecopts) + self.assertEqual(encoded_document, rawbytes) + + def _generate_multidocument_bson_stream(self): + inp_num = [str(random() * 100)[:4] for _ in range(10)] + docs = [{"n": Decimal128(dec)} for dec in inp_num] + edocs = [{"n": Decimal(dec)} for dec in inp_num] + bsonstream = b"" + for doc in docs: + bsonstream += encode(doc) + return edocs, bsonstream + + @no_type_check + def test_decode_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + for expected_doc, decoded_doc in zip(expected, decode_iter(bson_data, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + @no_type_check + def test_decode_file_iter(self): + expected, bson_data = self._generate_multidocument_bson_stream() + fileobj = tempfile.TemporaryFile() + fileobj.write(bson_data) + fileobj.seek(0) + + for expected_doc, decoded_doc in zip(expected, decode_file_iter(fileobj, self.codecopts)): + self.assertEqual(expected_doc, decoded_doc) + + fileobj.close() + + +class TestCustomPythonBSONTypeToBSONMonolithicCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.codecopts = DECIMAL_CODECOPTS + + +class TestCustomPythonBSONTypeToBSONMultiplexedCodec(CustomBSONTypeTests, unittest.TestCase): + @classmethod + def setUpClass(cls): + codec_options = CodecOptions( + type_registry=TypeRegistry((DecimalEncoder(), DecimalDecoder())) + ) + cls.codecopts = codec_options + + +class TestBSONFallbackEncoder(unittest.TestCase): + def _get_codec_options(self, fallback_encoder): + type_registry = TypeRegistry(fallback_encoder=fallback_encoder) + return CodecOptions(type_registry=type_registry) + + def test_simple(self): + codecopts = self._get_codec_options(lambda x: Decimal128(x)) + document = {"average": Decimal("56.47")} + bsonbytes = encode(document, codec_options=codecopts) + + exp_document = {"average": Decimal128("56.47")} + exp_bsonbytes = encode(exp_document) + self.assertEqual(bsonbytes, exp_bsonbytes) + + def test_erroring_fallback_encoder(self): + codecopts = self._get_codec_options(lambda _: 1 / 0) + + # fallback converter should not be invoked when encoding known types. + encode( + {"a": 1, "b": Decimal128("1.01"), "c": {"arr": ["abc", 3.678]}}, codec_options=codecopts + ) + + # expect an error when encoding a custom type. + document = {"average": Decimal("56.47")} + with self.assertRaises(ZeroDivisionError): + encode(document, codec_options=codecopts) + + def test_noop_fallback_encoder(self): + codecopts = self._get_codec_options(lambda x: x) + document = {"average": Decimal("56.47")} + with self.assertRaises(InvalidDocument): + encode(document, codec_options=codecopts) + + def test_type_unencodable_by_fallback_encoder(self): + def fallback_encoder(value): + try: + return Decimal128(value) + except: + raise TypeError("cannot encode type %s" % (type(value))) + + codecopts = self._get_codec_options(fallback_encoder) + document = {"average": Decimal} + with self.assertRaises(TypeError): + encode(document, codec_options=codecopts) + + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + + +class TestBSONTypeEnDeCodecs(unittest.TestCase): + def test_instantiation(self): + msg = "Can't instantiate abstract class" + + def run_test(base, attrs, fail): + codec = type("testcodec", (base,), attrs) + if fail: + with self.assertRaisesRegex(TypeError, msg): + codec() + else: + codec() + + class MyType: + pass + + run_test( + TypeEncoder, + { + "python_type": MyType, + }, + fail=True, + ) + run_test(TypeEncoder, {"transform_python": lambda s, x: x}, fail=True) + run_test( + TypeEncoder, {"transform_python": lambda s, x: x, "python_type": MyType}, fail=False + ) + + run_test( + TypeDecoder, + { + "bson_type": Decimal128, + }, + fail=True, + ) + run_test(TypeDecoder, {"transform_bson": lambda s, x: x}, fail=True) + run_test( + TypeDecoder, {"transform_bson": lambda s, x: x, "bson_type": Decimal128}, fail=False + ) + + run_test(TypeCodec, {"bson_type": Decimal128, "python_type": MyType}, fail=True) + run_test( + TypeCodec, + {"transform_bson": lambda s, x: x, "transform_python": lambda s, x: x}, + fail=True, + ) + run_test( + TypeCodec, + { + "python_type": MyType, + "transform_python": lambda s, x: x, + "transform_bson": lambda s, x: x, + "bson_type": Decimal128, + }, + fail=False, + ) + + def test_type_checks(self): + self.assertTrue(issubclass(TypeCodec, TypeEncoder)) + self.assertTrue(issubclass(TypeCodec, TypeDecoder)) + self.assertFalse(issubclass(TypeDecoder, TypeEncoder)) + self.assertFalse(issubclass(TypeEncoder, TypeDecoder)) + + +class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): + TypeA: Any + TypeB: Any + fallback_encoder_A2B: Any + fallback_encoder_A2BSON: Any + B2BSON: Type[TypeEncoder] + B2A: Type[TypeEncoder] + A2B: Type[TypeEncoder] + + @classmethod + def setUpClass(cls): + class TypeA: + def __init__(self, x): + self.value = x + + class TypeB: + def __init__(self, x): + self.value = x + + # transforms A, and only A into B + def fallback_encoder_A2B(value): + assert isinstance(value, TypeA) + return TypeB(value.value) + + # transforms A, and only A into something encodable + def fallback_encoder_A2BSON(value): + assert isinstance(value, TypeA) + return value.value + + # transforms B into something encodable + class B2BSON(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return value.value + + # transforms A into B + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class A2B(TypeEncoder): + python_type = TypeA + + def transform_python(self, value): + return TypeB(value.value) + + # transforms B into A + # technically, this isn't a proper type encoder as the output is not + # BSON-encodable. + class B2A(TypeEncoder): + python_type = TypeB + + def transform_python(self, value): + return TypeA(value.value) + + cls.TypeA = TypeA + cls.TypeB = TypeB + cls.fallback_encoder_A2B = staticmethod(fallback_encoder_A2B) + cls.fallback_encoder_A2BSON = staticmethod(fallback_encoder_A2BSON) + cls.B2BSON = B2BSON + cls.B2A = B2A + cls.A2B = A2B + + def test_encode_fallback_then_custom(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2BSON()], fallback_encoder=self.fallback_encoder_A2B) + ) + testdoc = {"x": self.TypeA(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_encode_custom_then_fallback(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2BSON) + ) + testdoc = {"x": self.TypeB(123)} + expected_bytes = encode({"x": 123}) + + self.assertEqual(encode(testdoc, codec_options=codecopts), expected_bytes) + + def test_chaining_encoders_fails(self): + codecopts = CodecOptions(type_registry=TypeRegistry([self.A2B(), self.B2BSON()])) + + with self.assertRaises(InvalidDocument): + encode({"x": self.TypeA(123)}, codec_options=codecopts) + + def test_infinite_loop_exceeds_max_recursion_depth(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([self.B2A()], fallback_encoder=self.fallback_encoder_A2B) + ) + + # Raises max recursion depth exceeded error + with self.assertRaises(RuntimeError): + encode({"x": self.TypeA(100)}, codec_options=codecopts) + + +class TestTypeRegistry(unittest.TestCase): + types: Tuple[object, object] + codecs: Tuple[Type[TypeCodec], Type[TypeCodec]] + fallback_encoder: Any + + @classmethod + def setUpClass(cls): + class MyIntType: + def __init__(self, x): + assert isinstance(x, int) + self.x = x + + class MyStrType: + def __init__(self, x): + assert isinstance(x, str) + self.x = x + + class MyIntCodec(TypeCodec): + @property + def python_type(self): + return MyIntType + + @property + def bson_type(self): + return int + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyIntType(value) + + class MyStrCodec(TypeCodec): + @property + def python_type(self): + return MyStrType + + @property + def bson_type(self): + return str + + def transform_python(self, value): + return value.x + + def transform_bson(self, value): + return MyStrType(value) + + def fallback_encoder(value): + return value + + cls.types = (MyIntType, MyStrType) + cls.codecs = (MyIntCodec, MyStrCodec) + cls.fallback_encoder = fallback_encoder + + def test_simple(self): + codec_instances = [codec() for codec in self.codecs] + + def assert_proper_initialization(type_registry, codec_instances): + self.assertEqual( + type_registry._encoder_map, + { + self.types[0]: codec_instances[0].transform_python, + self.types[1]: codec_instances[1].transform_python, + }, + ) + self.assertEqual( + type_registry._decoder_map, + {int: codec_instances[0].transform_bson, str: codec_instances[1].transform_bson}, + ) + self.assertEqual(type_registry._fallback_encoder, self.fallback_encoder) + + type_registry = TypeRegistry(codec_instances, self.fallback_encoder) + assert_proper_initialization(type_registry, codec_instances) + + type_registry = TypeRegistry( + fallback_encoder=self.fallback_encoder, type_codecs=codec_instances + ) + assert_proper_initialization(type_registry, codec_instances) + + # Ensure codec list held by the type registry doesn't change if we + # mutate the initial list. + codec_instances_copy = list(codec_instances) + codec_instances.pop(0) + self.assertListEqual(type_registry._TypeRegistry__type_codecs, codec_instances_copy) + + def test_simple_separate_codecs(self): + class MyIntEncoder(TypeEncoder): + python_type = self.types[0] + + def transform_python(self, value): + return value.x + + class MyIntDecoder(TypeDecoder): + bson_type = int + + def transform_bson(self, value): + return self.types[0](value) + + codec_instances: list = [MyIntDecoder(), MyIntEncoder()] + type_registry = TypeRegistry(codec_instances) + + self.assertEqual( + type_registry._encoder_map, + {MyIntEncoder.python_type: codec_instances[1].transform_python}, + ) + self.assertEqual( + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, + ) + + def test_initialize_fail(self): + err_msg = "Expected an instance of TypeEncoder, TypeDecoder, or TypeCodec, got .* instead" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(self.codecs) # type: ignore[arg-type] + + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([type("AnyType", (object,), {})()]) + + err_msg = f"fallback_encoder {True!r} is not a callable" + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry([], True) # type: ignore[arg-type] + + err_msg = "fallback_encoder {!r} is not a callable".format("hello") + with self.assertRaisesRegex(TypeError, err_msg): + TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + + def test_type_registry_repr(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" + self.assertEqual(r, repr(type_registry)) + + def test_type_registry_eq(self): + codec_instances = [codec() for codec in self.codecs] + self.assertEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances)) + + codec_instances_2 = [codec() for codec in self.codecs] + self.assertNotEqual(TypeRegistry(codec_instances), TypeRegistry(codec_instances_2)) + + def test_builtin_types_override_fails(self): + def run_test(base, attrs): + msg = ( + r"TypeEncoders cannot change how built-in types " + r"are encoded \(encoder .* transforms type .*\)" + ) + for pytype in _BUILT_IN_TYPES: + attrs.update({"python_type": pytype, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + # Test only some subtypes as not all can be subclassed. + if pytype in [ + bool, + type(None), + RE_TYPE, + ]: + continue + + class MyType(pytype): # type: ignore + pass + + attrs.update({"python_type": MyType, "transform_python": lambda x: x}) + codec = type("testcodec", (base,), attrs) + codec_instance = codec() + with self.assertRaisesRegex(TypeError, msg): + TypeRegistry( + [ + codec_instance, + ] + ) + + run_test(TypeEncoder, {}) + run_test(TypeCodec, {"bson_type": Decimal128, "transform_bson": lambda x: x}) + + +class TestCollectionWCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.drop() + + async def asyncTearDown(self): + await self.db.test.drop() + + async def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + await collection.insert_one({"_id": 1, "data": 2**520}) + ret = await collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + + async def test_command_errors_w_custom_type_decoder(self): + db = self.db + test_doc = {"_id": 1, "data": "a"} + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + result = await test.insert_one(test_doc) + self.assertEqual(result.inserted_id, test_doc["_id"]) + with self.assertRaises(DuplicateKeyError): + await test.insert_one(test_doc) + + async def test_find_w_custom_type_decoder(self): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + async def test_find_w_custom_type_decoder_and_document_class(self): + async def run_test(doc_cls): + db = self.db + input_docs = [{"x": Int64(k)} for k in [1, 2, 3]] + for doc in input_docs: + await db.test.insert_one(doc) + + test = db.get_collection( + "test", + codec_options=CodecOptions( + type_registry=TypeRegistry([UndecipherableIntDecoder()]), document_class=doc_cls + ), + ) + async for doc in test.find({}, batch_size=1): + self.assertIsInstance(doc, doc_cls) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + + for doc_cls in [RawBSONDocument, OrderedDict]: + await run_test(doc_cls) + + async def test_aggregate_w_custom_type_decoder(self): + db = self.db + await db.test.insert_many( + [ + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + {"status": "complete", "qty": Int64(10)}, + {"status": "in progress", "qty": Int64(1)}, + ] + ) + test = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + + pipeline: list = [ + {"$match": {"status": "complete"}}, + {"$group": {"_id": "$status", "total_qty": {"$sum": "$qty"}}}, + ] + result = await test.aggregate(pipeline) + + res = (await result.to_list())[0] + self.assertEqual(res["_id"], "complete") + self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) + self.assertEqual(res["total_qty"].value, 20) + + async def test_distinct_w_custom_type(self): + await self.db.drop_collection("test") + + test = self.db.get_collection("test", codec_options=UNINT_CODECOPTS) + values = [ + UndecipherableInt64Type(1), + UndecipherableInt64Type(2), + UndecipherableInt64Type(3), + {"b": UndecipherableInt64Type(3)}, + ] + await test.insert_many({"a": val} for val in values) + + self.assertEqual(values, await test.distinct("a")) + + async def test_find_one_and__w_custom_type_decoder(self): + db = self.db + c = db.get_collection("test", codec_options=UNINT_DECODER_CODECOPTS) + await c.insert_one({"_id": 1, "x": Int64(1)}) + + doc = await c.find_one_and_update( + {"_id": 1}, {"$inc": {"x": 1}}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 2) + + doc = await c.find_one_and_replace( + {"_id": 1}, {"x": Int64(3), "y": True}, return_document=ReturnDocument.AFTER + ) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertEqual(doc["y"], True) + + doc = await c.find_one_and_delete({"y": True}) + self.assertEqual(doc["_id"], 1) + self.assertIsInstance(doc["x"], UndecipherableInt64Type) + self.assertEqual(doc["x"].value, 3) + self.assertIsNone(await c.find_one()) + + +class TestGridFileCustomType(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + + async def test_grid_out_custom_opts(self): + db = self.db.with_options(codec_options=UPPERSTR_DECODER_CODECOPTS) + one = AsyncGridIn( + db.fs, + _id=5, + filename="my_file", + chunkSize=1000, + metadata={"foo": "red", "bar": "blue"}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(db.fs, 5) + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual(1000, two.chunk_size) + self.assertIsInstance(two.upload_date, datetime.datetime) + self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) + self.assertEqual(3, two.bar) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + +class ChangeStreamsWCustomTypesTestMixin: + @no_type_check + async def change_stream(self, *args, **kwargs): + stream = await self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + @no_type_check + async def insert_and_check(self, change_stream, insert_doc, expected_doc): + await self.input_target.insert_one(insert_doc) + change = await anext(change_stream) + self.assertEqual(change["fullDocument"], expected_doc) + + @no_type_check + async def kill_change_stream_cursor(self, change_stream): + # Cause a cursor not found error on the next getMore. + cursor = change_stream._cursor + address = _CursorAddress(cursor.address, cursor._ns) + client = self.input_target.database.client + await client._close_cursor_now(cursor.cursor_id, address) + + @no_type_check + async def test_simple(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [ + {"_id": 1, "data": "HELLO"}, + {"_id": 2, "data": "WORLD"}, + {"_id": 3, "data": "!"}, + ] + + change_stream = await self.change_stream() + + await self.insert_and_check(change_stream, input_docs[0], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[2]) + + @no_type_check + async def test_custom_type_in_pipeline(self): + codecopts = CodecOptions( + type_registry=TypeRegistry([UndecipherableIntEncoder(), UppercaseTextDecoder()]) + ) + await self.create_targets(codec_options=codecopts) + + input_docs = [ + {"_id": UndecipherableInt64Type(1), "data": "hello"}, + {"_id": 2, "data": "world"}, + {"_id": UndecipherableInt64Type(3), "data": "!"}, + ] + expected_docs = [{"_id": 2, "data": "WORLD"}, {"_id": 3, "data": "!"}] + + # UndecipherableInt64Type should be encoded with the TypeRegistry. + change_stream = await self.change_stream( + [{"$match": {"documentKey._id": {"$gte": UndecipherableInt64Type(2)}}}] + ) + + await self.input_target.insert_one(input_docs[0]) + await self.insert_and_check(change_stream, input_docs[1], expected_docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, input_docs[2], expected_docs[1]) + + @no_type_check + async def test_break_resume_token(self): + # Get one document from a change stream to determine resumeToken type. + await self.create_targets() + change_stream = await self.change_stream() + await self.input_target.insert_one({"data": "test"}) + change = await anext(change_stream) + resume_token_decoder = type_obfuscating_decoder_factory(type(change["_id"]["_data"])) + + # Custom-decoding the resumeToken type breaks resume tokens. + codecopts = CodecOptions( + type_registry=TypeRegistry([resume_token_decoder(), UndecipherableIntEncoder()]) + ) + + # Re-create targets, change stream and proceed. + await self.create_targets(codec_options=codecopts) + + docs = [{"_id": 1}, {"_id": 2}, {"_id": 3}] + + change_stream = await self.change_stream() + await self.insert_and_check(change_stream, docs[0], docs[0]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[1], docs[1]) + await self.kill_change_stream_cursor(change_stream) + await self.insert_and_check(change_stream, docs[2], docs[2]) + + @no_type_check + async def test_document_class(self): + async def run_test(doc_cls): + codecopts = CodecOptions( + type_registry=TypeRegistry([UppercaseTextDecoder(), UndecipherableIntEncoder()]), + document_class=doc_cls, + ) + + await self.create_targets(codec_options=codecopts) + change_stream = await self.change_stream() + + doc = {"a": UndecipherableInt64Type(101), "b": "xyz"} + await self.input_target.insert_one(doc) + change = await anext(change_stream) + + self.assertIsInstance(change, doc_cls) + self.assertEqual(change["fullDocument"]["a"], 101) + self.assertEqual(change["fullDocument"]["b"], "XYZ") + + for doc_cls in [OrderedDict, RawBSONDocument]: + await run_test(doc_cls) + + +class TestCollectionChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.db.get_collection("test", *args, **kwargs) + self.input_target = self.watched_target + # Ensure the collection exists and is empty. + await self.input_target.insert_one({}) + await self.input_target.delete_many({}) + + +class TestDatabaseChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.watched_target) + + async def create_targets(self, *args, **kwargs): + self.watched_target = self.client.get_database(self.db.name, *args, **kwargs) + self.input_target = self.watched_target.test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +class TestClusterChangeStreamsWCustomTypes( + AsyncIntegrationTest, ChangeStreamsWCustomTypesTestMixin +): + @async_client_context.require_version_min(4, 2, 0) + @async_client_context.require_change_streams + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.test.delete_many({}) + + async def asyncTearDown(self): + await self.input_target.drop() + await self.client.drop_database(self.db) + + async def create_targets(self, *args, **kwargs): + codec_options = kwargs.pop("codec_options", None) + if codec_options: + kwargs["type_registry"] = codec_options.type_registry + kwargs["document_class"] = codec_options.document_class + self.watched_target = await self.async_rs_client(*args, **kwargs) + self.input_target = self.watched_target[self.db.name].test + # Insert a record to ensure db, coll are created. + await self.input_target.insert_one({"data": "dummy"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_database.py b/test/asynchronous/test_database.py new file mode 100644 index 0000000000..b49183a852 --- /dev/null +++ b/test/asynchronous/test_database.py @@ -0,0 +1,773 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import re +import sys +from typing import Any, Iterable, List, Mapping, Union + +from pymongo.asynchronous.command_cursor import AsyncCommandCursor + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.test_custom_types import DECIMAL_CODECOPTS +from test.utils_shared import ( + IMPOSSIBLE_WRITE_CONCERN, + OvertCommandListener, + async_wait_until, +) + +from bson.codec_options import CodecOptions +from bson.dbref import DBRef +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.regex import Regex +from bson.son import SON +from pymongo import helpers_shared +from pymongo.asynchronous import auth +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + CollectionInvalid, + ExecutionTimeout, + InvalidName, + InvalidOperation, + OperationFailure, + WriteConcernError, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestDatabaseNoConnect(unittest.TestCase): + """Test Database features on a client that does not connect.""" + + client: AsyncMongoClient + + @classmethod + def setUpClass(cls): + cls.client = AsyncMongoClient(connect=False) + + def test_name(self): + self.assertRaises(TypeError, AsyncDatabase, self.client, 4) + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my db") + self.assertRaises(InvalidName, AsyncDatabase, self.client, 'my"db') + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my\x00db") + self.assertRaises(InvalidName, AsyncDatabase, self.client, "my\u0000db") + self.assertEqual("name", AsyncDatabase(self.client, "name").name) + + def test_get_collection(self): + codec_options = CodecOptions(tz_aware=True) + write_concern = WriteConcern(w=2, j=True) + read_concern = ReadConcern("majority") + coll = self.client.pymongo_test.get_collection( + "foo", codec_options, ReadPreference.SECONDARY, write_concern, read_concern + ) + self.assertEqual("foo", coll.name) + self.assertEqual(codec_options, coll.codec_options) + self.assertEqual(ReadPreference.SECONDARY, coll.read_preference) + self.assertEqual(write_concern, coll.write_concern) + self.assertEqual(read_concern, coll.read_concern) + + def test_getattr(self): + db = self.client.pymongo_test + self.assertIsInstance(db["_does_not_exist"], AsyncCollection) + + with self.assertRaises(AttributeError) as context: + db._does_not_exist + + # Message should be: "AttributeError: Database has no attribute + # '_does_not_exist'. To access the _does_not_exist collection, + # use database['_does_not_exist']". + self.assertIn("has no attribute '_does_not_exist'", str(context.exception)) + + def test_iteration(self): + db = self.client.pymongo_test + msg = "'AsyncDatabase' object is not iterable" + # Iteration fails + with self.assertRaisesRegex(TypeError, msg): + for _ in db: # type: ignore[misc] # error: "None" not callable [misc] + break + # Index fails + with self.assertRaises(TypeError): + _ = db[0] + # next fails + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): + _ = next(db) + # .next() fails + with self.assertRaisesRegex(TypeError, "'AsyncDatabase' object is not iterable"): + _ = db.next() + # Do not implement typing.Iterable. + self.assertNotIsInstance(db, Iterable) + + +class TestDatabase(AsyncIntegrationTest): + def test_equality(self): + self.assertNotEqual(AsyncDatabase(self.client, "test"), AsyncDatabase(self.client, "mike")) + self.assertEqual(AsyncDatabase(self.client, "test"), AsyncDatabase(self.client, "test")) + + # Explicitly test inequality + self.assertFalse(AsyncDatabase(self.client, "test") != AsyncDatabase(self.client, "test")) + + def test_hashable(self): + self.assertIn(self.client.test, {AsyncDatabase(self.client, "test")}) + + def test_get_coll(self): + db = AsyncDatabase(self.client, "pymongo_test") + self.assertEqual(db.test, db["test"]) + self.assertEqual(db.test, AsyncCollection(db, "test")) + self.assertNotEqual(db.test, AsyncCollection(db, "mike")) + self.assertEqual(db.test.mike, db["test.mike"]) + + def test_repr(self): + name = "AsyncDatabase" + self.assertEqual( + repr(AsyncDatabase(self.client, "pymongo_test")), + "{}({!r}, {})".format(name, self.client, repr("pymongo_test")), + ) + + async def test_create_collection(self): + db = AsyncDatabase(self.client, "pymongo_test") + + await db.test.insert_one({"hello": "world"}) + with self.assertRaises(CollectionInvalid): + await db.create_collection("test") + + await db.drop_collection("test") + + with self.assertRaises(TypeError): + await db.create_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.create_collection(None) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + await db.create_collection("coll..ection") # type: ignore[arg-type] + + test = await db.create_collection("test") + self.assertIn("test", await db.list_collection_names()) + await test.insert_one({"hello": "world"}) + self.assertEqual((await db.test.find_one())["hello"], "world") + + await db.drop_collection("test.foo") + await db.create_collection("test.foo") + self.assertIn("test.foo", await db.list_collection_names()) + with self.assertRaises(CollectionInvalid): + await db.create_collection("test.foo") + + async def test_list_collection_names(self): + db = AsyncDatabase(self.client, "pymongo_test") + await db.test.insert_one({"dummy": "object"}) + await db.test.mike.insert_one({"dummy": "object"}) + + colls = await db.list_collection_names() + self.assertIn("test", colls) + self.assertIn("test.mike", colls) + for coll in colls: + self.assertNotIn("$", coll) + + await db.systemcoll.test.insert_one({}) + no_system_collections = await db.list_collection_names( + filter={"name": {"$regex": r"^(?!system\.)"}} + ) + for coll in no_system_collections: + self.assertFalse(coll.startswith("system.")) + self.assertIn("systemcoll.test", no_system_collections) + + # Force more than one batch. + db = self.client.many_collections + for i in range(101): + await db["coll" + str(i)].insert_one({}) + # No Error + try: + await db.list_collection_names() + finally: + await self.client.drop_database("many_collections") + + async def test_list_collection_names_filter(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + await db.capped.drop() + await db.create_collection("capped", capped=True, size=4096) + await db.capped.insert_one({}) + await db.non_capped.insert_one({}) + self.addAsyncCleanup(client.drop_database, db.name) + filter: Union[None, Mapping[str, Any]] + # Should not send nameOnly. + for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): + listener.reset() + names = await db.list_collection_names(filter=filter) + self.assertEqual(names, ["capped"]) + self.assertNotIn("nameOnly", listener.started_events[0].command) + + # Should send nameOnly (except on 2.6). + for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): + listener.reset() + names = await db.list_collection_names(filter=filter) + self.assertIn("capped", names) + self.assertIn("non_capped", names) + command = listener.started_events[0].command + self.assertIn("nameOnly", command) + self.assertTrue(command["nameOnly"]) + + async def test_check_exists(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client[self.db.name] + await db.drop_collection("unique") + await db.create_collection("unique", check_exists=True) + self.assertIn("listCollections", listener.started_command_names()) + listener.reset() + await db.drop_collection("unique") + await db.create_collection("unique", check_exists=False) + self.assertGreater(len(listener.started_events), 0) + self.assertNotIn("listCollections", listener.started_command_names()) + + async def test_list_collections(self): + await self.client.drop_database("pymongo_test") + db = AsyncDatabase(self.client, "pymongo_test") + await db.test.insert_one({"dummy": "object"}) + await db.test.mike.insert_one({"dummy": "object"}) + + results = await db.list_collections() + colls = [result["name"] async for result in results] + + # All the collections present. + self.assertIn("test", colls) + self.assertIn("test.mike", colls) + + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) + + # Duplicate check. + coll_cnt: dict = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt: dict = {} + + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) + + colls = await (await db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() + self.assertEqual(1, len(colls)) + + colls = await ( + await db.list_collections(filter={"name": {"$regex": "^test.mike$"}}) + ).to_list() + self.assertEqual(1, len(colls)) + + await db.drop_collection("test") + + await db.create_collection("test", capped=True, size=4096) + results = await db.list_collections(filter={"options.capped": True}) + colls = [result["name"] async for result in results] + + # Checking only capped collections are present + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) + + # No collection containing a '$'. + for coll in colls: + self.assertNotIn("$", coll) + + # Duplicate check. + coll_cnt = {} + for coll in colls: + try: + # Found duplicate. + coll_cnt[coll] += 1 + self.fail("Found duplicate") + except KeyError: + coll_cnt[coll] = 1 + coll_cnt = {} + + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) + + await self.client.drop_database("pymongo_test") + + async def test_list_collection_names_single_socket(self): + client = await self.async_rs_or_single_client(maxPoolSize=1) + await client.drop_database("test_collection_names_single_socket") + db = client.test_collection_names_single_socket + for i in range(200): + await db.create_collection(str(i)) + + await db.list_collection_names() # Must not hang. + await client.drop_database("test_collection_names_single_socket") + + async def test_drop_collection(self): + db = AsyncDatabase(self.client, "pymongo_test") + + with self.assertRaises(TypeError): + await db.drop_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.drop_collection(None) # type: ignore[arg-type] + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection("test") + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection("test") + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.drop_collection(db.test) + self.assertNotIn("test", await db.list_collection_names()) + + await db.test.insert_one({"dummy": "object"}) + self.assertIn("test", await db.list_collection_names()) + await db.test.drop() + self.assertNotIn("test", await db.list_collection_names()) + await db.test.drop() + + await db.drop_collection(db.test.doesnotexist) + + if async_client_context.is_rs: + db_wc = AsyncDatabase( + self.client, "pymongo_test", write_concern=IMPOSSIBLE_WRITE_CONCERN + ) + with self.assertRaises(WriteConcernError): + await db_wc.drop_collection("test") + + async def test_validate_collection(self): + db = self.client.pymongo_test + + with self.assertRaises(TypeError): + await db.validate_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.validate_collection(None) # type: ignore[arg-type] + + await db.test.insert_one({"dummy": "object"}) + + with self.assertRaises(OperationFailure): + await db.validate_collection("test.doesnotexist") + with self.assertRaises(OperationFailure): + await db.validate_collection(db.test.doesnotexist) + + self.assertTrue(await db.validate_collection("test")) + self.assertTrue(await db.validate_collection(db.test)) + self.assertTrue(await db.validate_collection(db.test, full=True)) + self.assertTrue(await db.validate_collection(db.test, scandata=True)) + self.assertTrue(await db.validate_collection(db.test, scandata=True, full=True)) + self.assertTrue(await db.validate_collection(db.test, True, True)) + + @async_client_context.require_version_min(4, 3, 3) + @async_client_context.require_no_standalone + async def test_validate_collection_background(self): + db = self.client.pymongo_test.with_options(write_concern=WriteConcern(w="majority")) + await db.test.insert_one({"dummy": "object"}) + coll = db.test + self.assertTrue(await db.validate_collection(coll, background=False)) + # The inMemory storage engine does not support background=True. + if async_client_context.storage_engine != "inMemory": + # background=True requires the collection exist in a checkpoint. + await self.client.admin.command("fsync") + self.assertTrue(await db.validate_collection(coll, background=True)) + self.assertTrue(await db.validate_collection(coll, scandata=True, background=True)) + # The server does not support background=True with full=True. + # Assert that we actually send the background option by checking + # that this combination fails. + with self.assertRaises(OperationFailure): + await db.validate_collection(coll, full=True, background=True) + + async def test_command(self): + self.maxDiff = None + db = self.client.admin + first = await db.command("buildinfo") + second = await db.command({"buildinfo": 1}) + third = await db.command("buildinfo", 1) + self.assertEqualReply(first, second) + self.assertEqualReply(second, third) + + # We use 'aggregate' as our example command, since it's an easy way to + # retrieve a BSON regex from a collection using a command. + async def test_command_with_regex(self): + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one({"r": re.compile(".*")}) + await db.test.insert_one({"r": Regex(".*")}) + + result = await db.command("aggregate", "test", pipeline=[], cursor={}) + for doc in result["cursor"]["firstBatch"]: + self.assertIsInstance(doc["r"], Regex) + + async def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if async_client_context.version.at_least(8, 0): + await self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + await self.db.command({"insert": "test", "documents": [{}]}) + await self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + await self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + await self.db.test.drop() + + async def test_cursor_command(self): + db = self.client.pymongo_test + await db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + await db.test.insert_many(docs) + + cursor = await db.cursor_command("find", "test") + + self.assertIsInstance(cursor, AsyncCommandCursor) + + result_docs = await cursor.to_list() + self.assertEqual(docs, result_docs) + + async def test_cursor_command_invalid(self): + with self.assertRaises(InvalidOperation): + await self.db.cursor_command("usersInfo", "test") + + @async_client_context.require_no_fips + def test_password_digest(self): + with self.assertRaises(TypeError): + auth._password_digest(5) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(True) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(None) # type: ignore[arg-type, call-arg] + + self.assertIsInstance(auth._password_digest("mike", "password"), str) + self.assertEqual( + auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" + ) + self.assertEqual( + auth._password_digest("Gustave", "Dor\xe9"), "81e0e2364499209f466e75926a162d73" + ) + + async def test_id_ordering(self): + # PyMongo attempts to have _id show up first + # when you iterate key/value pairs in a document. + # This isn't reliable since python dicts don't + # guarantee any particular order. This will never + # work right in Jython or any Python or environment + # with hash randomization enabled (e.g. tox). + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) + + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + cursor = db.test.find() + async for x in cursor: + for k, _v in x.items(): + self.assertEqual(k, "_id") + break + + async def test_deref(self): + db = self.client.pymongo_test + await db.test.drop() + + with self.assertRaises(TypeError): + await db.dereference(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.dereference("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + await db.dereference(None) # type: ignore[arg-type] + + self.assertEqual(None, await db.dereference(DBRef("test", ObjectId()))) + obj: dict[str, Any] = {"x": True} + key = (await db.test.insert_one(obj)).inserted_id + self.assertEqual(obj, await db.dereference(DBRef("test", key))) + self.assertEqual(obj, await db.dereference(DBRef("test", key, "pymongo_test"))) + with self.assertRaises(ValueError): + await db.dereference(DBRef("test", key, "foo")) + + self.assertEqual(None, await db.dereference(DBRef("test", 4))) + obj = {"_id": 4} + await db.test.insert_one(obj) + self.assertEqual(obj, await db.dereference(DBRef("test", 4))) + + async def test_deref_kwargs(self): + db = self.client.pymongo_test + await db.test.drop() + + await db.test.insert_one({"_id": 4, "foo": "bar"}) + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) + ) + self.assertEqual( + SON([("foo", "bar")]), await db.dereference(DBRef("test", 4), projection={"_id": False}) + ) + + # TODO some of these tests belong in the collection level testing. + async def test_insert_find_one(self): + db = self.client.pymongo_test + await db.test.drop() + + a_doc = SON({"hello": "world"}) + a_key = (await db.test.insert_one(a_doc)).inserted_id + self.assertIsInstance(a_doc["_id"], ObjectId) + self.assertEqual(a_doc["_id"], a_key) + self.assertEqual(a_doc, await db.test.find_one({"_id": a_doc["_id"]})) + self.assertEqual(a_doc, await db.test.find_one(a_key)) + self.assertEqual(None, await db.test.find_one(ObjectId())) + self.assertEqual(a_doc, await db.test.find_one({"hello": "world"})) + self.assertEqual(None, await db.test.find_one({"hello": "test"})) + + b = await db.test.find_one() + assert b is not None + b["hello"] = "mike" + await db.test.replace_one({"_id": b["_id"]}, b) + + self.assertNotEqual(a_doc, await db.test.find_one(a_key)) + self.assertEqual(b, await db.test.find_one(a_key)) + self.assertEqual(b, await db.test.find_one()) + + count = 0 + async for _ in db.test.find(): + count += 1 + self.assertEqual(count, 1) + + async def test_long(self): + db = self.client.pymongo_test + await db.test.drop() + await db.test.insert_one({"x": 9223372036854775807}) + retrieved = (await db.test.find_one())["x"] + self.assertEqual(Int64(9223372036854775807), retrieved) + self.assertIsInstance(retrieved, Int64) + await db.test.delete_many({}) + await db.test.insert_one({"x": Int64(1)}) + retrieved = (await db.test.find_one())["x"] + self.assertEqual(Int64(1), retrieved) + self.assertIsInstance(retrieved, Int64) + + async def test_delete(self): + db = self.client.pymongo_test + await db.test.drop() + + await db.test.insert_one({"x": 1}) + await db.test.insert_one({"x": 2}) + await db.test.insert_one({"x": 3}) + length = 0 + async for _ in db.test.find(): + length += 1 + self.assertEqual(length, 3) + + await db.test.delete_one({"x": 1}) + length = 0 + async for _ in db.test.find(): + length += 1 + self.assertEqual(length, 2) + + await db.test.delete_one(await db.test.find_one()) # type: ignore[arg-type] + await db.test.delete_one(await db.test.find_one()) # type: ignore[arg-type] + self.assertEqual(await db.test.find_one(), None) + + await db.test.insert_one({"x": 1}) + await db.test.insert_one({"x": 2}) + await db.test.insert_one({"x": 3}) + + self.assertTrue(await db.test.find_one({"x": 2})) + await db.test.delete_one({"x": 2}) + self.assertFalse(await db.test.find_one({"x": 2})) + + self.assertTrue(await db.test.find_one()) + await db.test.delete_many({}) + self.assertFalse(await db.test.find_one()) + + def test_command_response_without_ok(self): + # Sometimes (SERVER-10891) the server's response to a badly-formatted + # command document will have no 'ok' field. We should raise + # OperationFailure instead of KeyError. + with self.assertRaises(OperationFailure): + helpers_shared._check_command_response({}, None) + + try: + helpers_shared._check_command_response({"$err": "foo"}, None) + except OperationFailure as e: + self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") + else: + self.fail("_check_command_response didn't raise OperationFailure") + + def test_mongos_response(self): + error_document = { + "ok": 0, + "errmsg": "outer", + "raw": {"shard0/host0,host1": {"ok": 0, "errmsg": "inner"}}, + } + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("inner", str(context.exception)) + + # If a shard has no primary and you run a command like dbstats, which + # cannot be run on a secondary, mongos's response includes empty "raw" + # errors. See SERVER-15428. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("outer", str(context.exception)) + + # Raw error has ok: 0 but no errmsg. Not a known case, but test it. + error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} + + with self.assertRaises(OperationFailure) as context: + helpers_shared._check_command_response(error_document, None) + + self.assertIn("outer", str(context.exception)) + + @async_client_context.require_test_commands + @async_client_context.require_no_mongos + async def test_command_max_time_ms(self): + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn" + ) + try: + db = self.client.pymongo_test + await db.command("count", "test") + with self.assertRaises(ExecutionTimeout): + await db.command("count", "test", maxTimeMS=1) + pipeline = [{"$project": {"name": 1, "count": 1}}] + # Database command helper. + await db.command("aggregate", "test", pipeline=pipeline, cursor={}) + with self.assertRaises(ExecutionTimeout): + await db.command( + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) + # Collection helper. + await db.test.aggregate(pipeline=pipeline) + with self.assertRaises(ExecutionTimeout): + await db.test.aggregate(pipeline, maxTimeMS=1) + finally: + await self.client.admin.command( + "configureFailPoint", "maxTimeAlwaysTimeOut", mode="off" + ) + + def test_with_options(self): + codec_options = DECIMAL_CODECOPTS + read_preference = ReadPreference.SECONDARY_PREFERRED + write_concern = WriteConcern(j=True) + read_concern = ReadConcern(level="majority") + + # List of all options to compare. + allopts = [ + "name", + "client", + "codec_options", + "read_preference", + "write_concern", + "read_concern", + ] + + db1 = self.client.get_database( + "with_options_test", + codec_options=codec_options, + read_preference=read_preference, + write_concern=write_concern, + read_concern=read_concern, + ) + + # Case 1: swap no options + db2 = db1.with_options() + for opt in allopts: + self.assertEqual(getattr(db1, opt), getattr(db2, opt)) + + # Case 2: swap all options + newopts = { + "codec_options": CodecOptions(), + "read_preference": ReadPreference.PRIMARY, + "write_concern": WriteConcern(w=1), + "read_concern": ReadConcern(level="local"), + } + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] + for opt in newopts: + self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) + + +class TestDatabaseAggregation(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.pipeline: List[Mapping[str, Any]] = [ + {"$listLocalSessions": {}}, + {"$limit": 1}, + {"$addFields": {"dummy": "dummy field"}}, + {"$project": {"_id": 0, "dummy": 1}}, + ] + self.result = {"dummy": "dummy field"} + self.admin = self.client.admin + + async def test_database_aggregation(self): + async with await self.admin.aggregate(self.pipeline) as cursor: + result = await anext(cursor) + self.assertEqual(result, self.result) + + @async_client_context.require_no_mongos + async def test_database_aggregation_fake_cursor(self): + coll_name = "test_output" + write_stage: dict + if async_client_context.version < (4, 3): + db_name = "admin" + write_stage = {"$out": coll_name} + else: + # SERVER-43287 disallows writing with $out to the admin db, use + # $merge instead. + db_name = "pymongo_test" + write_stage = {"$merge": {"into": {"db": db_name, "coll": coll_name}}} + output_coll = self.client[db_name][coll_name] + await output_coll.drop() + self.addAsyncCleanup(output_coll.drop) + + admin = self.admin.with_options(write_concern=WriteConcern(w=0)) + pipeline = self.pipeline[:] + pipeline.append(write_stage) + async with await admin.aggregate(pipeline) as cursor: + with self.assertRaises(StopAsyncIteration): + await anext(cursor) + + async def lambda_fn(): + return await output_coll.find_one() + + result = await async_wait_until(lambda_fn, "read unacknowledged write") + self.assertEqual(result["dummy"], self.result["dummy"]) + + def test_bool(self): + with self.assertRaises(NotImplementedError): + bool(AsyncDatabase(self.client, "test")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_discovery_and_monitoring.py b/test/asynchronous/test_discovery_and_monitoring.py new file mode 100644 index 0000000000..5820d00c48 --- /dev/null +++ b/test/asynchronous/test_discovery_and_monitoring.py @@ -0,0 +1,587 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import asyncio +import os +import socketserver +import sys +import threading +import time +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky + +from pymongo.asynchronous.pool import AsyncConnection +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + AsyncUnitTest, + async_client_context, + unittest, +) +from test.asynchronous.pymongo_mocks import DummyMonitor +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils import ( + async_get_pool, +) +from test.utils_shared import ( + CMAPListener, + HeartbeatEventListener, + HeartbeatEventsListListener, + assertion_context, + async_barrier_wait, + async_create_barrier, + async_wait_until, + server_name_to_type, +) +from unittest.mock import patch + +from bson import Timestamp, json_util +from pymongo import common, monitoring +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology, _ErrorContext +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.errors import ( + AutoReconnect, + ConfigurationError, + NetworkTimeout, + NotPrimaryError, + OperationFailure, +) +from pymongo.hello import Hello, HelloCompat +from pymongo.helpers_shared import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent +from pymongo.server_description import SERVER_TYPE, ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) + + +async def create_mock_topology(uri, monitor_class=DummyMonitor): + parsed_uri = await parse_uri(uri) + replica_set_name = None + direct_connection = None + load_balanced = None + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] + if "directConnection" in parsed_uri["options"]: + direct_connection = parsed_uri["options"]["directConnection"] + if "loadBalanced" in parsed_uri["options"]: + load_balanced = parsed_uri["options"]["loadBalanced"] + + topology_settings = TopologySettings( + parsed_uri["nodelist"], + replica_set_name=replica_set_name, + monitor_class=monitor_class, + direct_connection=direct_connection, + load_balanced=load_balanced, + ) + + c = Topology(topology_settings) + await c.open() + return c + + +async def got_hello(topology, server_address, hello_response): + server_description = ServerDescription(server_address, Hello(hello_response), 0) + await topology.on_change(server_description) + + +async def got_app_error(topology, app_error): + server_address = common.partition_node(app_error["address"]) + server = topology.get_server_by_address(server_address) + error_type = app_error["type"] + generation = app_error.get("generation", server.pool.gen.get_overall()) + when = app_error["when"] + max_wire_version = app_error["maxWireVersion"] + # XXX: We could get better test coverage by mocking the errors on the + # Pool/AsyncConnection. + try: + if error_type == "command": + _check_command_response(app_error["response"], max_wire_version) + _check_write_command_response(app_error["response"]) + elif error_type == "network": + raise AutoReconnect("mock non-timeout network error") + elif error_type == "timeout": + raise NetworkTimeout("mock network timeout error") + else: + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError + except (AutoReconnect, NotPrimaryError, OperationFailure) as e: + if when == "beforeHandshakeCompletes": + completed_handshake = False + elif when == "afterHandshakeCompletes": + completed_handshake = True + else: + raise AssertionError(f"Unknown when field {when}") + + await topology.handle_error( + server_address, + _ErrorContext(e, max_wire_version, generation, completed_handshake, None), + ) + + +def get_type(topology, hostname): + description = topology.get_server_by_address((hostname, 27017)).description + return description.server_type + + +class TestAllScenarios(AsyncUnitTest): + pass + + +def topology_type_name(topology_type): + return TOPOLOGY_TYPE._fields[topology_type] + + +def server_type_name(server_type): + return SERVER_TYPE._fields[server_type] + + +def check_outcome(self, topology, outcome): + expected_servers = outcome["servers"] + + # Check weak equality before proceeding. + self.assertEqual(len(topology.description.server_descriptions()), len(expected_servers)) + + if outcome.get("compatible") is False: + with self.assertRaises(ConfigurationError): + topology.description.check_compatible() + else: + # No error. + topology.description.check_compatible() + + # Since lengths are equal, every actual server must have a corresponding + # expected server. + for expected_server_address, expected_server in expected_servers.items(): + node = common.partition_node(expected_server_address) + self.assertTrue(topology.has_server(node)) + actual_server = topology.get_server_by_address(node) + actual_server_description = actual_server.description + expected_server_type = server_name_to_type(expected_server["type"]) + + self.assertEqual( + server_type_name(expected_server_type), + server_type_name(actual_server_description.server_type), + ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) + + self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) + + self.assertEqual(expected_server.get("setVersion"), actual_server_description.set_version) + + self.assertEqual(expected_server.get("electionId"), actual_server_description.election_id) + + self.assertEqual( + expected_server.get("topologyVersion"), actual_server_description.topology_version + ) + + expected_pool = expected_server.get("pool") + if expected_pool: + self.assertEqual(expected_pool.get("generation"), actual_server.pool.gen.get_overall()) + + self.assertEqual(outcome["setName"], topology.description.replica_set_name) + self.assertEqual( + outcome.get("logicalSessionTimeoutMinutes"), + topology.description.logical_session_timeout_minutes, + ) + + expected_topology_type = getattr(TOPOLOGY_TYPE, outcome["topologyType"]) + self.assertEqual( + topology_type_name(expected_topology_type), + topology_type_name(topology.description.topology_type), + ) + + self.assertEqual(outcome.get("maxSetVersion"), topology.description.max_set_version) + self.assertEqual(outcome.get("maxElectionId"), topology.description.max_election_id) + + +def create_test(scenario_def): + async def run_scenario(self): + c = await create_mock_topology(scenario_def["uri"]) + + for i, phase in enumerate(scenario_def["phases"]): + # Including the phase description makes failures easier to debug. + description = phase.get("description", str(i)) + with assertion_context(f"phase: {description}"): + for response in phase.get("responses", []): + await got_hello(c, common.partition_node(response[0]), response[1]) + + for app_error in phase.get("applicationErrors", []): + await got_app_error(c, app_error) + + check_outcome(self, c, phase["outcome"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(SDAM_PATH): + dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestClusterTimeComparison(AsyncPyMongoTestCase): + async def test_cluster_time_comparison(self): + t = await create_mock_topology("mongodb://host") + + async def send_cluster_time(time, inc): + old = t.max_cluster_time() + new = {"clusterTime": Timestamp(time, inc)} + await got_hello( + t, + ("host", 27017), + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": new, + }, + ) + + actual = t.max_cluster_time() + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) + + await send_cluster_time(0, 1) + await send_cluster_time(2, 2) + await send_cluster_time(2, 1) + await send_cluster_time(1, 3) + await send_cluster_time(2, 3) + + +class TestIgnoreStaleErrors(AsyncIntegrationTest): + async def test_ignore_stale_connection_errors(self): + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = async_create_barrier(N_TASKS) + client = await self.async_rs_or_single_client(minPoolSize=N_TASKS) + + # Wait for initial discovery. + await client.admin.command("ping") + pool = await async_get_pool(client) + starting_generation = pool.gen.get_overall() + await async_wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") + + async def mock_command(*args, **kwargs): + # Synchronize all tasks to ensure they use the same generation. + await async_barrier_wait(barrier, timeout=30) + raise AutoReconnect("mock AsyncConnection.command error") + + for conn in pool.conns: + conn.command = mock_command + + async def insert_command(i): + try: + await client.test.command("insert", "test", documents=[{"i": i}]) + except AutoReconnect: + pass + + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: + await t.start() + for t in tasks: + await t.join() + + # Expect a single pool reset for the network error + self.assertEqual(starting_generation + 1, pool.gen.get_overall()) + + # Server should be selectable. + await client.admin.command("ping") + + +class CMAPHeartbeatListener(HeartbeatEventListener, CMAPListener): + pass + + +class TestPoolManagement(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_pool_unpause(self): + # This test implements the prose test "AsyncConnection Pool Management" + listener = CMAPHeartbeatListener() + _ = await self.async_single_client( + appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] + ) + # Assert that AsyncConnectionPoolReadyEvent occurs after the first + # ServerHeartbeatSucceededEvent. + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + pool_ready = listener.events_by_type(monitoring.PoolReadyEvent)[0] + hb_succeeded = listener.events_by_type(monitoring.ServerHeartbeatSucceededEvent)[0] + self.assertGreater(listener.events.index(pool_ready), listener.events.index(hb_succeeded)) + + listener.reset() + fail_hello = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMPoolManagementTest", + }, + } + async with self.fail_point(fail_hello): + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + await listener.async_wait_for_event(monitoring.PoolClearedEvent, 1) + await listener.async_wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) + await listener.async_wait_for_event(monitoring.PoolReadyEvent, 1) + + @async_client_context.require_failCommand_appName + @async_client_context.require_test_commands + @async_client_context.require_async + @flaky(reason="PYTHON-5428") + async def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = await self.async_single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = await (await client._get_topology()).select_server( + writable_server_selector, _Op.TEST + ) + await async_wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) + + await client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + async def run_task(): + while True: + start_time = time.monotonic() + await client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + await asyncio.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + await task.start() + original_close = AsyncConnection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + async def mock_close(self, reason): + await asyncio.sleep(close_delay) + await original_close(self, reason) + + AsyncConnection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + async with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + await listener.async_wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + await listener.async_wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + await async_wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + await task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + AsyncConnection.close_conn = original_close + + +class TestServerMonitoringMode(AsyncIntegrationTest): + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_rtt_connection_is_enabled_stream(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="stream") + await client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if async_client_context.version >= (4, 4): + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False + else: + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False + return True + + await async_wait_until(predicate, "find all RTT monitors") + + async def test_rtt_connection_is_disabled_poll(self): + client = await self.async_rs_or_single_client(serverMonitoringMode="poll") + + await self.assert_rtt_connection_is_disabled(client) + + async def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = await self.async_rs_or_single_client(serverMonitoringMode="auto") + await self.assert_rtt_connection_is_disabled(client) + + async def assert_rtt_connection_is_disabled(self, client): + await client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) + + +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() + + +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True + + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() + + +class TestHeartbeatStartOrdering(AsyncPyMongoTestCase): + async def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) + + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + await server_thread.start() + _c = await self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) + + else: + + async def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (await reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + await writer.wait_closed() + + server = await asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + await server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + await _c.aconnect() + + await listener.async_wait_for_event(ServerHeartbeatStartedEvent, 1) + await listener.async_wait_for_event(ServerHeartbeatFailedEvent, 1) + + server.close() + await server.wait_closed() + await _c.close() + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) + + +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_dns.py b/test/asynchronous/test_dns.py new file mode 100644 index 0000000000..5666612218 --- /dev/null +++ b/test/asynchronous/test_dns.py @@ -0,0 +1,308 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import glob +import json +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + unittest, +) +from test.utils_shared import async_wait_until +from unittest.mock import MagicMock, patch + +from pymongo.asynchronous.uri_parser import parse_uri +from pymongo.common import validate_read_preference_tags +from pymongo.errors import ConfigurationError +from pymongo.uri_parser_shared import split_hosts + +_IS_SYNC = False + + +class TestDNSRepl(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) + load_balanced = False + + @async_client_context.require_replica_set + def asyncSetUp(self): + pass + + +class TestDNSLoadBalanced(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) + load_balanced = True + + @async_client_context.require_load_balancer + def asyncSetUp(self): + pass + + +class TestDNSSharded(AsyncPyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) + load_balanced = False + + @async_client_context.require_mongos + def asyncSetUp(self): + pass + + +def create_test(test_case): + async def run_test(self): + uri = test_case["uri"] + seeds = test_case.get("seeds") + num_seeds = test_case.get("numSeeds", len(seeds or [])) + hosts = test_case.get("hosts") + num_hosts = test_case.get("numHosts", len(hosts or [])) + + options = test_case.get("options", {}) + if "ssl" in options: + options["tls"] = options.pop("ssl") + parsed_options = test_case.get("parsed_options") + # See DRIVERS-1324, unless tls is explicitly set to False we need TLS. + needs_tls = not (options and (options.get("ssl") is False or options.get("tls") is False)) + if needs_tls and not async_client_context.tls: + self.skipTest("this test requires a TLS cluster") + if not needs_tls and async_client_context.tls: + self.skipTest("this test requires a non-TLS cluster") + + if seeds: + seeds = split_hosts(",".join(seeds)) + if hosts: + hosts = frozenset(split_hosts(",".join(hosts))) + + if seeds or num_seeds: + result = await parse_uri(uri, validate=True) + if seeds is not None: + self.assertEqual(sorted(result["nodelist"]), sorted(seeds)) + if num_seeds is not None: + self.assertEqual(len(result["nodelist"]), num_seeds) + if options: + opts = result["options"] + if "readpreferencetags" in opts: + rpts = validate_read_preference_tags( + "readPreferenceTags", opts.pop("readpreferencetags") + ) + opts["readPreferenceTags"] = rpts + self.assertEqual(result["options"], options) + if parsed_options: + for opt, expected in parsed_options.items(): + if opt == "user": + self.assertEqual(result["username"], expected) + elif opt == "password": + self.assertEqual(result["password"], expected) + elif opt == "auth_database" or opt == "db": + self.assertEqual(result["database"], expected) + + hostname = next(iter(async_client_context.client.nodes))[0] + # The replica set members must be configured as 'localhost'. + if hostname == "localhost": + copts = async_client_context.default_client_options.copy() + # Remove tls since SRV parsing should add it automatically. + copts.pop("tls", None) + if async_client_context.tls: + # Our test certs don't support the SRV hosts used in these + # tests. + copts["tlsAllowInvalidHostnames"] = True + + client = self.simple_client(uri, **copts) + if client._options.connect: + await client.aconnect() + if num_seeds is not None: + self.assertEqual(len(client._topology_settings.seeds), num_seeds) + if hosts is not None: + await async_wait_until( + lambda: hosts == client.nodes, "match test hosts to client nodes" + ) + if num_hosts is not None: + await async_wait_until( + lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" + ) + if test_case.get("ping", True): + await client.admin.command("ping") + # XXX: we should block until SRV poller runs at least once + # and re-run these assertions. + else: + try: + await parse_uri(uri) + except (ConfigurationError, ValueError): + pass + else: + self.fail("failed to raise an exception") + + return run_test + + +def create_tests(cls): + for filename in glob.glob(os.path.join(cls.TEST_PATH, "*.json")): + test_suffix, _ = os.path.splitext(os.path.basename(filename)) + with open(filename) as dns_test_file: + test_method = create_test(json.load(dns_test_file)) + setattr(cls, "test_" + test_suffix, test_method) + + +create_tests(TestDNSRepl) +create_tests(TestDNSLoadBalanced) +create_tests(TestDNSSharded) + + +class TestParsingErrors(AsyncPyMongoTestCase): + async def test_invalid_host(self): + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + await client.aconnect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + await client.aconnect() + + +class IsolatedAsyncioTestCaseInsensitive(AsyncIntegrationTest): + async def test_connect_case_insensitive(self): + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + await client.aconnect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + + +class TestInitialDnsSeedlistDiscovery(AsyncPyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + async def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.asyncresolver.resolve") as mock_resolver: + + async def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + if "expected_error" not in case: + await parse_uri(connection_string) + else: + try: + await parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + async def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.asyncresolver.resolve"): + await parse_uri("mongodb+srv://localhost/") + await parse_uri("mongodb+srv://mongo.local/") + + async def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + async def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + await self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_encryption.py b/test/asynchronous/test_encryption.py new file mode 100644 index 0000000000..74c0136ad0 --- /dev/null +++ b/test/asynchronous/test_encryption.py @@ -0,0 +1,3753 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption spec.""" +from __future__ import annotations + +import base64 +import copy +import http.client +import json +import os +import pathlib +import re +import socket +import socketserver +import ssl +import sys +import textwrap +import traceback +import uuid +import warnings +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, async_client_context +from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.utils import flaky +from test.asynchronous.utils_spec_runner import AsyncSpecRunner, AsyncSpecTestCreator +from threading import Thread +from typing import Any, Dict, Mapping, Optional + +import pytest + +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.daemon import _spawn_daemon +from pymongo.uri_parser_shared import _parse_kms_tls_options + +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False + +sys.path[0:0] = [""] + +from test import ( + unittest, +) +from test.asynchronous.test_bulk import AsyncBulkTestBase +from test.asynchronous.unified_format import generate_test_classes +from test.asynchronous.utils_spec_runner import AsyncSpecRunner +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, + AWS_CREDS, + AWS_TEMP_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + DEFAULT_KMS_TLS, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.utils_shared import ( + AllowListEventListener, + OvertCommandListener, + TopologyEventListener, + async_wait_until, + camel_to_snake_args, + is_greenthread_patched, +) + +from bson import BSON, DatetimeMS, Decimal128, encode, json_util +from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import BSONError +from bson.json_util import JSONOptions +from bson.son import SON +from pymongo import ReadPreference +from pymongo.asynchronous import encryption +from pymongo.asynchronous.encryption import Algorithm, AsyncClientEncryption, QueryType +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.cursor_shared import CursorType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ConfigurationError, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + InvalidOperation, + OperationFailure, + PyMongoError, + ServerSelectionTimeoutError, + WriteError, +) +from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.encryption + +KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} + + +def get_client_opts(client): + return client.options + + +class TestAutoEncryptionOpts(AsyncPyMongoTestCase): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_crypt_shared(self): + # Test that we can pick up crypt_shared lib automatically + self.simple_client( + auto_encryption_opts=AutoEncryptionOpts( + KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True + ), + connect=False, + ) + + @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") + def test_init_requires_pymongocrypt(self): + with self.assertRaises(ConfigurationError): + AutoEncryptionOpts({}, "keyvault.datakeys") + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init(self): + opts = AutoEncryptionOpts({}, "keyvault.datakeys") + self.assertEqual(opts._kms_providers, {}) + self.assertEqual(opts._key_vault_namespace, "keyvault.datakeys") + self.assertEqual(opts._key_vault_client, None) + self.assertEqual(opts._schema_map, None) + self.assertEqual(opts._bypass_auto_encryption, False) + self.assertEqual(opts._mongocryptd_uri, "mongodb://localhost:27020") + self.assertEqual(opts._mongocryptd_bypass_spawn, False) + self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + self.assertEqual(opts._kms_tls_options, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_init_spawn_args(self): + # User can override idleShutdownTimeoutSecs + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--idleShutdownTimeoutSecs=88"] + ) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=88"]) + + # idleShutdownTimeoutSecs is added by default + opts = AutoEncryptionOpts({}, "keyvault.datakeys", mongocryptd_spawn_args=[]) + self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) + + # Also added when other options are given + opts = AutoEncryptionOpts( + {}, "keyvault.datakeys", mongocryptd_spawn_args=["--quiet", "--port=27020"] + ) + self.assertEqual( + opts._mongocryptd_spawn_args, + ["--quiet", "--port=27020", "--idleShutdownTimeoutSecs=60"], + ) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_init_kms_tls_options(self): + # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): + AsyncMongoClient(auto_encryption_opts=opts) + + tls_opts: Any + for tls_opts in [ + {"kmip": {"tls": True, "tlsInsecure": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, + {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, + ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): + AsyncMongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) + with self.assertRaises(FileNotFoundError): + AsyncMongoClient(auto_encryption_opts=opts) + # Success cases: + tls_opts: Any + for tls_opts in [None, {}]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + ctx = _kms_ssl_contexts["aws"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + opts = AutoEncryptionOpts( + {}, + "k.d", + kms_tls_options=DEFAULT_KMS_TLS, + ) + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] + self.assertEqual(ctx.check_hostname, True) + self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + client = self.simple_client(auto_encryption_opts=None, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, None) + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def test_kwargs(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.simple_client(auto_encryption_opts=opts, connect=False) + self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) + + +class AsyncEncryptionIntegrationTest(AsyncIntegrationTest): + """Base class for encryption integration tests.""" + + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + + def assertEncrypted(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, 6) + + def assertBinaryUUID(self, val): + self.assertIsInstance(val, Binary) + self.assertEqual(val.subtype, UUID_SUBTYPE) + + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addAsyncCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: AsyncMongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = AsyncClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + + +# Location of JSON test files. +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + +SPEC_PATH = os.path.join(BASE, "spec") + +OPTS = CodecOptions() + +# Use SON to preserve the order of fields while parsing json. Use tz_aware +# =False to match how CodecOptions decodes dates. +JSON_OPTS = JSONOptions(document_class=SON, tz_aware=False) + + +def read(*paths): + with open(os.path.join(BASE, *paths)) as fp: + return fp.read() + + +def json_data(*paths): + return json_util.loads(read(*paths), json_options=JSON_OPTS) + + +def bson_data(*paths): + return encode(json_data(*paths), codec_options=OPTS) + + +class TestClientSimple(AsyncEncryptionIntegrationTest): + async def _test_auto_encrypt(self, opts): + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + # Create the encrypted field's data key. + key_vault = await create_key_vault( + self.client.keyvault.datakeys, json_data("custom", "key-document-local.json") + ) + self.addAsyncCleanup(key_vault.drop) + + # Collection.insert_one/insert_many auto encrypts. + docs = [ + {"_id": 0, "ssn": "000"}, + {"_id": 1, "ssn": "111"}, + {"_id": 2, "ssn": "222"}, + {"_id": 3, "ssn": "333"}, + {"_id": 4, "ssn": "444"}, + {"_id": 5, "ssn": "555"}, + ] + encrypted_coll = client.pymongo_test.test + await encrypted_coll.insert_one(docs[0]) + await encrypted_coll.insert_many(docs[1:3]) + unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) + await unack.insert_one(docs[3]) + await unack.insert_many(docs[4:], ordered=False) + + async def count_documents(): + return await self.db.test.count_documents({}) == len(docs) + + await async_wait_until(count_documents, "insert documents with w=0") + + # Database.command auto decrypts. + res = await client.pymongo_test.command("find", "test", filter={"ssn": "000"}) + decrypted_docs = res["cursor"]["firstBatch"] + self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) + + # Collection.find auto decrypts. + decrypted_docs = await encrypted_coll.find().to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.find auto decrypts getMores. + decrypted_docs = await encrypted_coll.find(batch_size=1).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts. + decrypted_docs = await (await encrypted_coll.aggregate([])).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.aggregate auto decrypts getMores. + decrypted_docs = await (await encrypted_coll.aggregate([], batchSize=1)).to_list() + self.assertEqual(decrypted_docs, docs) + + # Collection.distinct auto decrypts. + decrypted_ssns = await encrypted_coll.distinct("ssn") + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) + + # Make sure the field is actually encrypted. + async for encrypted_doc in self.db.test.find(): + self.assertIsInstance(encrypted_doc["_id"], int) + self.assertEncrypted(encrypted_doc["ssn"]) + + # Attempt to encrypt an unencodable object. + with self.assertRaises(BSONError): + await encrypted_coll.insert_one({"unencodeable": object()}) + + async def test_auto_encrypt(self): + # Configure the encrypted field via jsonSchema. + json_schema = json_data("custom", "schema.json") + await create_with_schema(self.db.test, json_schema) + self.addAsyncCleanup(self.db.test.drop) + + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + await self._test_auto_encrypt(opts) + + async def test_auto_encrypt_local_schema_map(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"pymongo_test.test": json_data("custom", "schema.json")} + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas) + + await self._test_auto_encrypt(opts) + + async def test_use_after_close(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + await client.admin.command("ping") + await client.aclose() + with self.assertRaisesRegex(InvalidOperation, "Cannot use AsyncMongoClient after close"): + await client.admin.command("ping") + + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", + ) + @async_client_context.require_sync + async def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + async def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + await client.admin.command("ping") + + with self.fork(target): + await target() + + +class TestEncryptedBulkWrite(AsyncBulkTestBase, AsyncEncryptionIntegrationTest): + async def test_upsert_uuid_standard_encrypt(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + encrypted_coll = client.pymongo_test.test + coll = encrypted_coll.with_options(codec_options=options) + uuids = [uuid.uuid4() for _ in range(3)] + result = await coll.bulk_write( + [ + UpdateOne({"_id": uuids[0]}, {"$set": {"a": 0}}, upsert=True), + ReplaceOne({"a": 1}, {"_id": uuids[1]}, upsert=True), + # This is just here to make the counts right in all cases. + ReplaceOne({"_id": uuids[2]}, {"_id": uuids[2]}, upsert=True), + ] + ) + self.assertEqualResponse( + { + "nMatched": 0, + "nModified": 0, + "nUpserted": 3, + "nInserted": 0, + "nRemoved": 0, + "upserted": [ + {"index": 0, "_id": uuids[0]}, + {"index": 1, "_id": uuids[1]}, + {"index": 2, "_id": uuids[2]}, + ], + }, + result.bulk_api_result, + ) + + +class TestClientMaxWireVersion(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_raise_unsupported_error(self): + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + msg = "find_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.find_raw_batches({}) + + msg = "aggregate_raw_batches does not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client.test.test.aggregate_raw_batches([]) + + if async_client_context.is_mongos: + msg = "Exhaust cursors are not supported by mongos" + else: + msg = "exhaust cursors do not support auto encryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await anext(client.test.test.find(cursor_type=CursorType.EXHAUST)) + + +class TestExplicitSimple(AsyncEncryptionIntegrationTest): + async def test_encrypt_decrypt(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + # Use standard UUID representation. + key_vault = async_client_context.client.keyvault.get_collection( + "datakeys", codec_options=OPTS + ) + self.addAsyncCleanup(key_vault.drop) + + # Create the encrypted field's data key. + key_id = await client_encryption.create_data_key("local", key_alt_names=["name"]) + self.assertBinaryUUID(key_id) + self.assertTrue(await key_vault.find_one({"_id": key_id})) + + # Create an unused data key to make sure filtering works. + unused_key_id = await client_encryption.create_data_key("local", key_alt_names=["unused"]) + self.assertBinaryUUID(unused_key_id) + self.assertTrue(await key_vault.find_one({"_id": unused_key_id})) + + doc = {"_id": 0, "ssn": "000"} + encrypted_ssn = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + + # Ensure encryption via key_alt_name for the same key produces the + # same output. + encrypted_ssn2 = await client_encryption.encrypt( + doc["ssn"], Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="name" + ) + self.assertEqual(encrypted_ssn, encrypted_ssn2) + + # Test encryption via UUID + encrypted_ssn3 = await client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + + # Test decryption. + decrypted_ssn = await client_encryption.decrypt(encrypted_ssn) + self.assertEqual(decrypted_ssn, doc["ssn"]) + + async def test_validation(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + + msg = "value to decrypt must be a bson.binary.Binary with subtype 6" + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt("str") # type: ignore[arg-type] + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.decrypt(Binary(b"123")) + + msg = "key_id must be a bson.binary.Binary with subtype 4" + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + with self.assertRaisesRegex(TypeError, msg): + await client_encryption.encrypt("str", algo, key_id=Binary(b"123")) + + async def test_bson_errors(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + + # Attempt to encrypt an unencodable object. + unencodable_value = object() + with self.assertRaises(BSONError): + await client_encryption.encrypt( + unencodable_value, + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=Binary.from_uuid(uuid.uuid4()), + ) + + async def test_codec_options(self): + with self.assertRaisesRegex(TypeError, "codec_options must be"): + self.create_client_encryption( + KMS_PROVIDERS, + "keyvault.datakeys", + async_client_context.client, + None, # type: ignore[arg-type] + ) + + opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) + client_encryption_legacy = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + + # Create the encrypted field's data key. + key_id = await client_encryption_legacy.create_data_key("local") + + # Encrypt a UUID with JAVA_LEGACY codec options. + value = uuid.uuid4() + encrypted_legacy = await client_encryption_legacy.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_value_legacy = await client_encryption_legacy.decrypt(encrypted_legacy) + self.assertEqual(decrypted_value_legacy, value) + + # Encrypt the same UUID with STANDARD codec options. + opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, opts + ) + encrypted_standard = await client_encryption.encrypt( + value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id + ) + decrypted_standard = await client_encryption.decrypt(encrypted_standard) + self.assertEqual(decrypted_standard, value) + + # Test that codec_options is applied during encryption. + self.assertNotEqual(encrypted_standard, encrypted_legacy) + # Test that codec_options is applied during decryption. + self.assertEqual( + await client_encryption_legacy.decrypt(encrypted_standard), Binary.from_uuid(value) + ) + self.assertNotEqual(await client_encryption.decrypt(encrypted_legacy), value) + + async def test_close(self): + client_encryption = self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) + await client_encryption.close() + # Close can be called multiple times. + await client_encryption.close() + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + msg = "Cannot use closed AsyncClientEncryption" + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.create_data_key("local") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.encrypt("val", algo, key_alt_name="name") + with self.assertRaisesRegex(InvalidOperation, msg): + await client_encryption.decrypt(Binary(b"", 6)) + + async def test_with_statement(self): + async with self.create_client_encryption( + KMS_PROVIDERS, "keyvault.datakeys", async_client_context.client, OPTS + ) as client_encryption: + pass + with self.assertRaisesRegex(InvalidOperation, "Cannot use closed AsyncClientEncryption"): + await client_encryption.create_data_key("local") + + +# Spec tests +AWS_TEMP_NO_SESSION_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), +} + + +class AsyncTestSpec(AsyncSpecRunner): + @classmethod + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + async def _setup_class(cls): + await super()._setup_class() + + def parse_auto_encrypt_opts(self, opts): + """Parse clientOptions.autoEncryptOpts.""" + opts = camel_to_snake_args(opts) + kms_providers = opts["kms_providers"] + if "aws" in kms_providers: + kms_providers["aws"] = AWS_CREDS + if not any(AWS_CREDS.values()): + self.skipTest("AWS environment credentials are not set") + if "awsTemporary" in kms_providers: + kms_providers["aws"] = AWS_TEMP_CREDS + del kms_providers["awsTemporary"] + if not any(AWS_TEMP_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "awsTemporaryNoSessionToken" in kms_providers: + kms_providers["aws"] = AWS_TEMP_NO_SESSION_CREDS + del kms_providers["awsTemporaryNoSessionToken"] + if not any(AWS_TEMP_NO_SESSION_CREDS.values()): + self.skipTest("AWS Temp environment credentials are not set") + if "azure" in kms_providers: + kms_providers["azure"] = AZURE_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("Azure environment credentials are not set") + if "gcp" in kms_providers: + kms_providers["gcp"] = GCP_CREDS + if not any(AZURE_CREDS.values()): + self.skipTest("GCP environment credentials are not set") + if "kmip" in kms_providers: + kms_providers["kmip"] = KMIP_CREDS + opts["kms_tls_options"] = DEFAULT_KMS_TLS + if "key_vault_namespace" not in opts: + opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) + + opts = dict(opts) + return AutoEncryptionOpts(**opts) + + def parse_client_options(self, opts): + """Override clientOptions parsing to support autoEncryptOpts.""" + encrypt_opts = opts.pop("autoEncryptOpts", None) + if encrypt_opts: + opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) + + return super().parse_client_options(opts) + + def get_object_name(self, op): + """Default object is collection.""" + return op.get("object", "collection") + + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") + if "type=symbol" in desc: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") + + async def setup_scenario(self, scenario_def): + """Override a test's setup.""" + key_vault_data = scenario_def["key_vault_data"] + encrypted_fields = scenario_def["encrypted_fields"] + json_schema = scenario_def["json_schema"] + data = scenario_def["data"] + coll = async_client_context.client.get_database("keyvault", codec_options=OPTS)["datakeys"] + await coll.delete_many({}) + if key_vault_data: + await coll.insert_many(key_vault_data) + + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + db = async_client_context.client.get_database(db_name, codec_options=OPTS) + await db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + wc = WriteConcern(w="majority") + kwargs: Dict[str, Any] = {} + if json_schema: + kwargs["validator"] = {"$jsonSchema": json_schema} + kwargs["codec_options"] = OPTS + if not data: + kwargs["write_concern"] = wc + if encrypted_fields: + kwargs["encryptedFields"] = encrypted_fields + await db.create_collection(coll_name, **kwargs) + coll = db[coll_name] + if data: + # Load data. + await coll.with_options(write_concern=wc).insert_many(scenario_def["data"]) + + def allowable_errors(self, op): + """Override expected error classes.""" + errors = super().allowable_errors(op) + # An updateOne test expects encryption to error when no $ operator + # appears but pymongo raises a client side ValueError in this case. + if op["name"] == "updateOne": + errors += (ValueError,) + return errors + + +def create_test(scenario_def, test, name): + @async_client_context.require_test_commands + async def run_scenario(self): + await self.run_scenario(scenario_def, test) + + return run_scenario + + +test_creator = AsyncSpecTestCreator(create_test, AsyncTestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator.create_tests() + +if _HAVE_PYMONGOCRYPT: + globals().update( + generate_test_classes( + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] + ) + ) + +# Prose Tests +LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +GCP_KEY_ID = Binary(base64.b64decode(b"GCPAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) +KMIP_KEY_ID = Binary(base64.b64decode(b"KMIPAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) + + +async def create_with_schema(coll, json_schema): + """Create and return a Collection with a jsonSchema.""" + await coll.with_options(write_concern=WriteConcern(w="majority")).drop() + return await coll.database.create_collection( + coll.name, validator={"$jsonSchema": json_schema}, codec_options=OPTS + ) + + +async def create_key_vault(vault, *data_keys): + """Create the key vault collection with optional data keys.""" + vault = vault.with_options(write_concern=WriteConcern(w="majority"), codec_options=OPTS) + await vault.drop() + if data_keys: + await vault.insert_many(data_keys) + await vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) + return vault + + +class TestDataKeyDoubleEncryption(AsyncEncryptionIntegrationTest): + client_encrypted: AsyncMongoClient + client_encryption: AsyncClientEncryption + listener: OvertCommandListener + vault: Any + + KMS_PROVIDERS = ALL_KMS_PROVIDERS + + MASTER_KEYS = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": None, + } + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + await self.client.db.coll.drop() + self.vault = await create_key_vault(self.client.keyvault.datakeys) + + # Configure the encrypted field via the local schema_map option. + schemas = { + "db.coll": { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + } + } + }, + } + } + opts = AutoEncryptionOpts( + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self.listener.reset() + + async def asyncTearDown(self) -> None: + await self.vault.drop() + + async def run_test(self, provider_name): + # Create data key. + master_key: Any = self.MASTER_KEYS[provider_name] + datakey_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] + ) + self.assertBinaryUUID(datakey_id) + cmd = self.listener.started_events[-1] + self.assertEqual("insert", cmd.command_name) + self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) + docs = await self.vault.find({"_id": datakey_id}).to_list() + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) + + # Encrypt by key_id. + encrypted = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=datakey_id, + ) + self.assertEncrypted(encrypted) + await self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) + doc_decrypted = await self.client_encrypted.db.coll.find_one({"_id": provider_name}) + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore + + # Encrypt by key_alt_name. + encrypted_altname = await self.client_encryption.encrypt( + f"hello {provider_name}", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_alt_name=f"{provider_name}_altname", + ) + self.assertEqual(encrypted_altname, encrypted) + + # Explicitly encrypting an auto encrypted field. + with self.assertRaisesRegex(EncryptionError, r"encrypt element of type"): + await self.client_encrypted.db.coll.insert_one({"encrypted_placeholder": encrypted}) + + async def test_data_key_local(self): + await self.run_test("local") + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_data_key_aws(self): + await self.run_test("aws") + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_data_key_azure(self): + await self.run_test("azure") + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_data_key_gcp(self): + await self.run_test("gcp") + + async def test_data_key_kmip(self): + await self.run_test("kmip") + + +class TestExternalKeyVault(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def _test_external_key_vault(self, with_external_key_vault): + await self.client.db.coll.drop() + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + ) + self.addAsyncCleanup(vault.drop) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + if with_external_key_vault: + key_vault_client = await self.async_rs_or_single_client( + username="fake-user", password="fake-pwd" + ) + else: + key_vault_client = async_client_context.client + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + key_vault_client=key_vault_client, + ) + + client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + client_encryption = self.create_client_encryption( + self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS + ) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + if with_external_key_vault: + # Authentication error. + with self.assertRaises(EncryptionError) as ctx: + await client_encryption.encrypt( + "test", + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=LOCAL_KEY_ID, + ) + # AuthenticationFailed error. + self.assertIsInstance(ctx.exception.cause, OperationFailure) + self.assertEqual(ctx.exception.cause.code, 18) + else: + await client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=LOCAL_KEY_ID + ) + + async def test_external_key_vault_1(self): + await self._test_external_key_vault(True) + + async def test_external_key_vault_2(self): + await self._test_external_key_vault(False) + + +class TestViews(AsyncEncryptionIntegrationTest): + @staticmethod + def kms_providers(): + return {"local": {"key": LOCAL_MASTER_KEY}} + + async def test_views_are_prohibited(self): + await self.client.db.view.drop() + await self.client.db.create_collection("view", viewOn="coll") + self.addAsyncCleanup(self.client.db.view.drop) + + opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") + client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, uuidRepresentation="standard" + ) + + with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): + await client_encrypted.db.view.insert_one({}) + + +class TestCorpus(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + + @staticmethod + def kms_providers(): + return ALL_KMS_PROVIDERS + + @staticmethod + def fix_up_schema(json_schema): + """Remove deprecated symbol/dbPointer types from json schema.""" + for key in list(json_schema["properties"]): + if "_symbol_" in key or "_dbPointer_" in key: + del json_schema["properties"][key] + return json_schema + + @staticmethod + def fix_up_curpus(corpus): + """Disallow deprecated symbol/dbPointer types from corpus test.""" + for key in corpus: + if "_symbol_" in key or "_dbPointer_" in key: + corpus[key]["allowed"] = False + return corpus + + @staticmethod + def fix_up_curpus_encrypted(corpus_encrypted, corpus): + """Fix the expected values for deprecated symbol/dbPointer types.""" + for key in corpus_encrypted: + if "_symbol_" in key or "_dbPointer_" in key: + corpus_encrypted[key] = copy.deepcopy(corpus[key]) + return corpus_encrypted + + async def _test_corpus(self, opts): + # Drop and create the collection 'db.coll' with jsonSchema. + coll = await create_with_schema( + self.client.db.coll, self.fix_up_schema(json_data("corpus", "corpus-schema.json")) + ) + self.addAsyncCleanup(coll.drop) + + vault = await create_key_vault( + self.client.keyvault.datakeys, + json_data("corpus", "corpus-key-local.json"), + json_data("corpus", "corpus-key-aws.json"), + json_data("corpus", "corpus-key-azure.json"), + json_data("corpus", "corpus-key-gcp.json"), + json_data("corpus", "corpus-key-kmip.json"), + ) + self.addAsyncCleanup(vault.drop) + + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + client_encryption = self.create_client_encryption( + self.kms_providers(), + "keyvault.datakeys", + async_client_context.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) + corpus_copied: SON = SON() + for key, value in corpus.items(): + corpus_copied[key] = copy.deepcopy(value) + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + if value["method"] == "auto": + continue + if value["method"] == "explicit": + identifier = value["identifier"] + self.assertIn(identifier, ("id", "altname")) + kms = value["kms"] + self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) + if identifier == "id": + if kms == "local": + kwargs = {"key_id": LOCAL_KEY_ID} + elif kms == "aws": + kwargs = {"key_id": AWS_KEY_ID} + elif kms == "azure": + kwargs = {"key_id": AZURE_KEY_ID} + elif kms == "gcp": + kwargs = {"key_id": GCP_KEY_ID} + else: + kwargs = {"key_id": KMIP_KEY_ID} + else: + kwargs = {"key_alt_name": kms} + + self.assertIn(value["algo"], ("det", "rand")) + if value["algo"] == "det": + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + else: + algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random + + try: + encrypted_val = await client_encryption.encrypt( + value["value"], + algo, + **kwargs, # type: ignore[arg-type] + ) + if not value["allowed"]: + self.fail(f"encrypt should have failed: {key!r}: {value!r}") + corpus_copied[key]["value"] = encrypted_val + except Exception: + if value["allowed"]: + tb = traceback.format_exc() + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") + + await client_encrypted.db.coll.insert_one(corpus_copied) + corpus_decrypted = await client_encrypted.db.coll.find_one() + self.assertEqual(corpus_decrypted, corpus) + + corpus_encrypted_expected = self.fix_up_curpus_encrypted( + json_data("corpus", "corpus-encrypted.json"), corpus + ) + corpus_encrypted_actual = await coll.find_one() + for key, value in corpus_encrypted_actual.items(): + if key in ( + "_id", + "altname_aws", + "altname_azure", + "altname_gcp", + "altname_local", + "altname_kmip", + ): + continue + + if value["algo"] == "det": + self.assertEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + elif value["algo"] == "rand" and value["allowed"]: + self.assertNotEqual(value["value"], corpus_encrypted_expected[key]["value"], key) + + if value["allowed"]: + decrypt_actual = await client_encryption.decrypt(value["value"]) + decrypt_expected = await client_encryption.decrypt( + corpus_encrypted_expected[key]["value"] + ) + self.assertEqual(decrypt_actual, decrypt_expected, key) + else: + self.assertEqual(value["value"], corpus[key]["value"], key) + + async def test_corpus(self): + opts = AutoEncryptionOpts( + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS + ) + await self._test_corpus(opts) + + async def test_corpus_local_schema(self): + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": self.fix_up_schema(json_data("corpus", "corpus-schema.json"))} + opts = AutoEncryptionOpts( + self.kms_providers(), + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, + ) + await self._test_corpus(opts) + + +_2_MiB = 2097152 +_16_MiB = 16777216 + + +class TestBsonSizeBatches(AsyncEncryptionIntegrationTest): + """Prose tests for BSON size limits and batch splitting.""" + + coll: AsyncCollection + coll_encrypted: AsyncCollection + client_encrypted: AsyncMongoClient + listener: OvertCommandListener + + async def asyncSetUp(self): + await super().asyncSetUp() + db = async_client_context.client.db + self.coll = db.coll + await self.coll.drop() + # Configure the encrypted 'db.coll' collection via jsonSchema. + json_schema = json_data("limits", "limits-schema.json") + await db.create_collection( + "coll", + validator={"$jsonSchema": json_schema}, + codec_options=OPTS, + write_concern=WriteConcern(w="majority"), + ) + + # Create the key vault. + coll = async_client_context.client.get_database( + "keyvault", write_concern=WriteConcern(w="majority"), codec_options=OPTS + )["datakeys"] + await coll.drop() + await coll.insert_one(json_data("limits", "limits-key.json")) + + opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + self.listener = OvertCommandListener() + self.client_encrypted = await self.async_rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] + ) + self.coll_encrypted = self.client_encrypted.db.coll + + async def asyncTearDown(self) -> None: + await self.coll_encrypted.drop() + + async def test_01_insert_succeeds_under_2MiB(self): + doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "over_2mib_under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_02_insert_succeeds_over_2MiB_post_encryption(self): + doc = {"_id": "encryption_exceeds_2mib", "unencrypted": "a" * ((2**21) - 2000)} + doc.update(json_data("limits", "limits-doc.json")) + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_2mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_03_bulk_batch_split(self): + doc1 = {"_id": "over_2mib_1", "unencrypted": "a" * _2_MiB} + doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + async def test_04_bulk_batch_split(self): + limits_doc = json_data("limits", "limits-doc.json") + doc1 = {"_id": "encryption_exceeds_2mib_1", "unencrypted": "a" * (_2_MiB - 2000)} + doc1.update(limits_doc) + doc2 = {"_id": "encryption_exceeds_2mib_2", "unencrypted": "a" * (_2_MiB - 2000)} + doc2.update(limits_doc) + self.listener.reset() + await self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) + + async def test_05_insert_succeeds_just_under_16MiB(self): + doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "under_16mib_bulk" + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + + async def test_06_insert_fails_over_16MiB(self): + limits_doc = json_data("limits", "limits-doc.json") + doc = {"_id": "encryption_exceeds_16mib", "unencrypted": "a" * (_16_MiB - 2000)} + doc.update(limits_doc) + + with self.assertRaisesRegex(WriteError, "object to insert too large"): + await self.coll_encrypted.insert_one(doc) + + # Same with bulk_write. + doc["_id"] = "encryption_exceeds_16mib_bulk" + with self.assertRaises(BulkWriteError) as ctx: + await self.coll_encrypted.bulk_write([InsertOne(doc)]) + err = ctx.exception.details["writeErrors"][0] + self.assertIn(err["code"], [2, 10334]) + self.assertIn("object to insert too large", err["errmsg"]) + + +class TestCustomEndpoint(AsyncEncryptionIntegrationTest): + """Prose tests for creating data keys with a custom endpoint.""" + + @unittest.skipUnless( + any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), + "No environment credentials are set", + ) + async def asyncSetUp(self): + await super().asyncSetUp() + kms_providers = { + "aws": AWS_CREDS, + "azure": AZURE_CREDS, + "gcp": GCP_CREDS, + "kmip": KMIP_CREDS, + } + self.client_encryption = self.create_client_encryption( + kms_providers=kms_providers, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + + kms_providers_invalid = copy.deepcopy(kms_providers) + kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" + self.client_encryption_invalid = self.create_client_encryption( + kms_providers=kms_providers_invalid, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + kms_tls_options=DEFAULT_KMS_TLS, + ) + self._kmip_host_error = None + self._invalid_host_error = None + + async def run_test_expected_success(self, provider_name, master_key): + data_key_id = await self.client_encryption.create_data_key( + provider_name, master_key=master_key + ) + encrypted = await self.client_encryption.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption.decrypt(encrypted)) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_01_aws_region_key(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_aws_region_key_endpoint(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com", + }, + ) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_03_aws_region_key_endpoint_port(self): + await self.run_test_expected_success( + "aws", + { + "region": "us-east-1", + "key": ( + "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0" + ), + "endpoint": "kms.us-east-1.amazonaws.com:443", + }, + ) + + async def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + await self.client_encryption.create_data_key("kmip", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_05_aws_endpoint_wrong_region(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "kms.us-east-2.amazonaws.com", + } + # The full error should be something like: + # "Credential should be scoped to a valid region, not 'us-east-1'" + # but we only check for EncryptionError to avoid breaking on slight + # changes to AWS' error message. + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_06_aws_endpoint_invalid_host(self): + master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + "endpoint": "doesnotexist.invalid", + } + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption.create_data_key("aws", master_key=master_key) + + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def test_07_azure(self): + master_key = { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + } + await self.run_test_expected_success("azure", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("azure", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_08_gcp_valid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "cloudkms.googleapis.com:443", + } + await self.run_test_expected_success("gcp", master_key) + + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaisesRegex(EncryptionError, self.invalid_host_error): + await self.client_encryption_invalid.create_data_key("gcp", master_key=master_key) + + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def test_09_gcp_invalid_endpoint(self): + master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + "endpoint": "doesnotexist.invalid:443", + } + + # The full error should be something like: + # "Invalid KMS response, no access_token returned. HTTP status=200" + with self.assertRaisesRegex(EncryptionError, "Invalid KMS response"): + await self.client_encryption.create_data_key("gcp", master_key=master_key) + + def dns_error(self, host, port): + # The full error should be something like: + # "[Errno 8] nodename nor servname provided, or not known" + with self.assertRaises(Exception) as ctx: + socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM) + return re.escape(str(ctx.exception)) + + @property + def invalid_host_error(self): + if self._invalid_host_error is None: + self._invalid_host_error = self.dns_error("doesnotexist.invalid", 443) + return self._invalid_host_error + + @property + def kmip_host_error(self): + if self._kmip_host_error is None: + self._kmip_host_error = self.dns_error("doesnotexist.local", 5698) + return self._kmip_host_error + + async def test_10_kmip_invalid_endpoint(self): + key = {"keyId": "1"} + await self.run_test_expected_success("kmip", key) + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption_invalid.create_data_key("kmip", key) + + async def test_11_kmip_master_key_endpoint(self): + key = {"keyId": "1", "endpoint": KMIP_CREDS["endpoint"]} + await self.run_test_expected_success("kmip", key) + # Override invalid endpoint: + data_key_id = await self.client_encryption_invalid.create_data_key("kmip", master_key=key) + encrypted = await self.client_encryption_invalid.encrypt( + "test", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=data_key_id + ) + self.assertEqual("test", await self.client_encryption_invalid.decrypt(encrypted)) + + async def test_12_kmip_master_key_invalid_endpoint(self): + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} + with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): + await self.client_encryption.create_data_key("kmip", key) + + +class AzureGCPEncryptionTestMixin(AsyncEncryptionIntegrationTest): + DEK = None + KMS_PROVIDER_MAP = None + KEYVAULT_DB = "keyvault" + KEYVAULT_COLL = "datakeys" + client: AsyncMongoClient + + async def _setup(self): + keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) + await create_key_vault(keyvault, self.DEK) + + async def _test_explicit(self, expectation): + await self._setup() + client_encryption = self.create_client_encryption( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), + async_client_context.client, + OPTS, + ) + + ciphertext = await client_encryption.encrypt( + "string0", + algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=self.DEK["_id"], + ) + + self.assertEqual(bytes(ciphertext), base64.b64decode(expectation)) + self.assertEqual(await client_encryption.decrypt(ciphertext), "string0") + + async def _test_automatic(self, expectation_extjson, payload): + await self._setup() + encrypted_db = "db" + encrypted_coll = "coll" + keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) + + encryption_opts = AutoEncryptionOpts( + self.KMS_PROVIDER_MAP, # type: ignore[arg-type] + keyvault_namespace, + schema_map=self.SCHEMA_MAP, + ) + + insert_listener = AllowListEventListener("insert") + client = await self.async_rs_or_single_client( + auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] + ) + + coll = client.get_database(encrypted_db).get_collection( + encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") + ) + await coll.drop() + + expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) + + await coll.insert_one(payload) + event = insert_listener.started_events[0] + inserted_doc = event.command["documents"][0] + + for key, value in expected_document.items(): + self.assertEqual(value, inserted_doc[key]) + + output_doc = await coll.find_one({}) + for key, value in payload.items(): + self.assertEqual(output_doc[key], value) + + +class TestAzureEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() + + async def test_explicit(self): + return await self._test_explicit( + "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_azure": "string0"}) + + +class TestGCPEncryption(AzureGCPEncryptionTestMixin, AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") + async def asyncSetUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + await super().asyncSetUp() + + async def test_explicit(self): + return await self._test_explicit( + "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==" + ) + + async def test_automatic(self): + expected_document_extjson = textwrap.dedent( + """ + {"secret_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06"} + }}""" + ) + return await self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests +class TestDeadlockProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client_test = await self.async_rs_or_single_client( + maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" + ) + + self.client_keyvault_listener = OvertCommandListener() + self.client_keyvault = await self.async_rs_or_single_client( + maxPoolSize=1, + readConcernLevel="majority", + w="majority", + event_listeners=[self.client_keyvault_listener], + ) + + await self.client_test.keyvault.datakeys.drop() + await self.client_test.db.coll.drop() + await self.client_test.keyvault.datakeys.insert_one( + json_data("external", "external-key.json") + ) + _ = await self.client_test.db.create_collection( + "coll", + validator={"$jsonSchema": json_data("external", "external-schema.json")}, + codec_options=OPTS, + ) + + client_encryption = self.create_client_encryption( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=self.client_test, + codec_options=OPTS, + ) + self.ciphertext = await client_encryption.encrypt( + "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" + ) + + self.client_listener = OvertCommandListener() + self.topology_listener = TopologyEventListener() + self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") + + async def _run_test(self, max_pool_size, auto_encryption_opts): + client_encrypted = await self.async_rs_or_single_client( + readConcernLevel="majority", + w="majority", + maxPoolSize=max_pool_size, + auto_encryption_opts=auto_encryption_opts, + event_listeners=[self.client_listener, self.topology_listener], + ) + + if auto_encryption_opts._bypass_auto_encryption is True: + await self.client_test.db.coll.insert_one({"_id": 0, "encrypted": self.ciphertext}) + elif auto_encryption_opts._bypass_auto_encryption is False: + await client_encrypted.db.coll.insert_one({"_id": 0, "encrypted": "string0"}) + else: + raise RuntimeError("bypass_auto_encryption must be a bool") + + result = await client_encrypted.db.coll.find_one({"_id": 0}) + self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) + + async def test_case_1(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 4) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "insert") + self.assertEqual(cev[2].database_name, "db") + self.assertEqual(cev[3].command_name, "find") + self.assertEqual(cev[3].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_2(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_3(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 2) + + async def test_case_4(self): + await self._run_test( + max_pool_size=1, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_5(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 5) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "listCollections") + self.assertEqual(cev[1].database_name, "keyvault") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "keyvault") + self.assertEqual(cev[3].command_name, "insert") + self.assertEqual(cev[3].database_name, "db") + self.assertEqual(cev[4].command_name, "find") + self.assertEqual(cev[4].database_name, "db") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_6(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=False, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 3) + self.assertEqual(cev[0].command_name, "listCollections") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "insert") + self.assertEqual(cev[1].database_name, "db") + self.assertEqual(cev[2].command_name, "find") + self.assertEqual(cev[2].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_7(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=None + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 2) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + self.assertEqual(cev[1].command_name, "find") + self.assertEqual(cev[1].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + async def test_case_8(self): + await self._run_test( + max_pool_size=None, + auto_encryption_opts=AutoEncryptionOpts( + *self.optargs, bypass_auto_encryption=True, key_vault_client=self.client_keyvault + ), + ) + + cev = self.client_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "db") + + cev = self.client_keyvault_listener.started_events + self.assertEqual(len(cev), 1) + self.assertEqual(cev[0].command_name, "find") + self.assertEqual(cev[0].database_name, "keyvault") + + self.assertEqual(len(self.topology_listener.results["opened"]), 1) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events +class TestDecryptProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.db.drop_collection("decryption_events") + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + keyID = await self.client_encryption.create_data_key("local") + self.cipher_text = await self.client_encryption.encrypt( + "hello", key_id=keyID, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + self.malformed_cipher_text = self.cipher_text[:-1] + (self.cipher_text[-1] ^ 1).to_bytes( + 1, "big" + ) + self.malformed_cipher_text = Binary(self.malformed_cipher_text, 6) + opts = AutoEncryptionOpts( + key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map + ) + self.listener = AllowListEventListener("aggregate") + self.encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] + ) + + async def test_01_command_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(OperationFailure): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: + self.assertEqual(event.failure["code"], 123) + + async def test_02_network_error(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": {"errorCode": 123, "closeConnection": True, "failCommands": ["aggregate"]}, + } + ): + with self.assertRaises(AutoReconnect): + await self.encrypted_client.db.decryption_events.aggregate([]) + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") + + async def test_03_decrypt_error(self): + await self.encrypted_client.db.decryption_events.insert_one( + {"encrypted": self.malformed_cipher_text} + ) + with self.assertRaises(EncryptionError): + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual( + event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text + ) + + async def test_04_decrypt_success(self): + await self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) + await anext(await self.encrypted_client.db.decryption_events.aggregate([])) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) + self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd +class TestBypassSpawningMongocryptdProse(AsyncEncryptionIntegrationTest): + @unittest.skipIf( + os.environ.get("TEST_CRYPT_SHARED"), + "this prose test does not work when crypt_shared is on a system dynamic " + "library search path.", + ) + async def test_mongocryptd_bypass_spawn(self): + # Lower the mongocryptd timeout to reduce the test run time. + self._original_timeout = encryption._MONGOCRYPTD_TIMEOUT_MS + encryption._MONGOCRYPTD_TIMEOUT_MS = 500 + + def reset_timeout(): + encryption._MONGOCRYPTD_TIMEOUT_MS = self._original_timeout + + self.addCleanup(reset_timeout) + + # Configure the encrypted field via the local schema_map option. + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + schema_map=schemas, + mongocryptd_bypass_spawn=True, + mongocryptd_uri="mongodb://localhost:27027/", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + with self.assertRaisesRegex(EncryptionError, "Timeout"): + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + + async def test_bypassAutoEncryption(self): + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + "keyvault.datakeys", + bypass_auto_encryption=True, + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=27027", + ], + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.insert_one({"unencrypted": "test"}) + # Validate that mongocryptd was not spawned: + mongocryptd_client = self.simple_client( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) + with self.assertRaises(ServerSelectionTimeoutError): + await mongocryptd_client.admin.command("ping") + + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_via_loading_shared_library(self): + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted((await async_client_context.client.db.coll.find_one({}))["encrypted"]) + no_mongocryptd_client = self.simple_client( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ServerSelectionTimeoutError): + await no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + async def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + await create_key_vault( + async_client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + await client_encrypted.db.coll.drop() + await client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + + +# https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests +class TestKmsTLSProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + self.patch_system_certs(CA_PEM) + self.client_encrypted = self.create_client_encryption( + {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS + ) + + async def test_invalid_kms_certificate_expired(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9000", + } + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encrypted.create_data_key("aws", master_key=key) + + async def test_invalid_hostname_in_kms_certificate(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "mongodb://127.0.0.1:9001", + } + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): + await self.client_encrypted.create_data_key("aws", master_key=key) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests +class TestKmsTLSOptions(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" + kms_tls_opts_ca_only = { + "aws": {"tlsCAFile": CA_PEM}, + "azure": {"tlsCAFile": CA_PEM}, + "gcp": {"tlsCAFile": CA_PEM}, + "kmip": {"tlsCAFile": CA_PEM}, + } + self.client_encryption_no_client_cert = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 2, same providers as above but with tlsCertificateKeyFile. + kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) + for p in kms_tls_opts: + kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM + self.client_encryption_with_tls = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + # 3, update endpoints to expired host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" + self.client_encryption_expired = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # 3, update endpoints to invalid host. + providers: dict = copy.deepcopy(providers) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" + self.client_encryption_invalid_hostname = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only + ) + # Errors when client has no cert, some examples: + # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) + self.cert_error = ( + "certificate required|SSL handshake failed|" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" + ) + # On Python 3.10+ this error might be: + # EOF occurred in violation of protocol (_ssl.c:2384) + if sys.version_info[:2] >= (3, 10): + self.cert_error += "|EOF" + # On Windows this error might be: + # [WinError 10054] An existing connection was forcibly closed by the remote host + if sys.platform == "win32": + self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) + + async def test_01_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("aws", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_tls.create_data_key("aws", key) + # Some examples: + # certificate verify failed: certificate has expired (_ssl.c:1129) + # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) + key["endpoint"] = "127.0.0.1:9000" + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("aws", key) + # Some examples: + # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" + # hostname '127.0.0.1' doesn't match 'wronghost.com' + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("aws", key) + + async def test_02_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("azure", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("azure", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("azure", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("azure", key) + + async def test_03_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("gcp", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_tls.create_data_key("gcp", key) + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("gcp", key) + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("gcp", key) + + async def test_04_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_no_client_cert.create_data_key("kmip") + await self.client_encryption_with_tls.create_data_key("kmip") + # Expired cert error. + with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): + await self.client_encryption_expired.create_data_key("kmip") + # Invalid cert hostname error. + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): + await self.client_encryption_invalid_hostname.create_data_key("kmip") + + async def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + async def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + await self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + await self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + async def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + await self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + await self.client_encryption_with_names.create_data_key("kmip:with_tls") + + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames +class TestUniqueIndexOnKeyAltNamesProse(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await create_key_vault(self.client.keyvault.datakeys) + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() + ) + self.def_key_id = await self.client_encryption.create_data_key( + "local", key_alt_names=["def"] + ) + + async def test_01_create_key(self): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["abc"]) + with self.assertRaisesRegex(EncryptionError, "E11000 duplicate key error collection"): + await self.client_encryption.create_data_key("local", key_alt_names=["def"]) + + async def test_02_add_key_alt_name(self): + key_id = await self.client_encryption.create_data_key("local") + await self.client_encryption.add_key_alt_name(key_id, "abc") + key_doc = await self.client_encryption.add_key_alt_name(key_id, "abc") + assert key_doc["keyAltNames"] == ["abc"] + with self.assertRaisesRegex(DuplicateKeyError, "E11000 duplicate key error collection"): + await self.client_encryption.add_key_alt_name(key_id, "def") + key_doc = await self.client_encryption.add_key_alt_name(self.def_key_id, "def") + assert key_doc["keyAltNames"] == ["def"] + + +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption +class TestExplicitQueryableEncryption(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.db = self.client.test_queryable_encryption + await self.client.drop_database(self.db) + await self.db.command( + "create", "explicit_encryption", encryptedFields=self.encrypted_fields + ) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + async def test_01_insert_encrypted_indexed_and_find(self): + val = "encrypted indexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedIndexed"], val) + + async def test_02_insert_encrypted_indexed_and_find_contention(self): + val = "encrypted indexed value" + contention = 10 + for _ in range(contention): + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=contention + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"encryptedIndexed": insert_payload} + ) + + find_payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertLessEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + # Find with contention_factor will return all 10 documents. + find_payload = await self.client_encryption.encrypt( + val, + Algorithm.INDEXED, + self.key1_id, + query_type=QueryType.EQUALITY, + contention_factor=contention, + ) + docs = ( + await self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() + ) + + self.assertEqual(len(docs), 10) + for doc in docs: + self.assertEqual(doc["encryptedIndexed"], val) + + async def test_03_insert_encrypted_unindexed(self): + val = "encrypted unindexed value" + insert_payload = await self.client_encryption.encrypt( + val, Algorithm.UNINDEXED, self.key1_id + ) + await self.encrypted_client[self.db.name].explicit_encryption.insert_one( + {"_id": 1, "encryptedUnindexed": insert_payload} + ) + + docs = ( + await self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() + ) + self.assertEqual(len(docs), 1) + self.assertEqual(docs[0]["encryptedUnindexed"], val) + + async def test_04_roundtrip_encrypted_indexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt( + val, Algorithm.INDEXED, self.key1_id, contention_factor=0 + ) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + async def test_05_roundtrip_encrypted_unindexed(self): + val = "encrypted indexed value" + payload = await self.client_encryption.encrypt(val, Algorithm.UNINDEXED, self.key1_id) + decrypted = await self.client_encryption.decrypt(payload) + self.assertEqual(decrypted, val) + + +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + await encrypted_client.drop_database("db") + + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + await create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addAsyncCleanup(async_client_context.client.drop_database, "db") + + await encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + await encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + await encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + await encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + await encrypted_client.db.create_collection("no_schema") + await encrypted_client.db.create_collection("no_schema2") + + unencrypted_client = await self.async_rs_or_single_client() + + await encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = await unencrypted_client.db.csfle.find_one() + self.assertIsInstance(doc["csfle"], Binary) + await encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = await unencrypted_client.db.csfle2.find_one() + self.assertIsInstance(doc["csfle2"], Binary) + await encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = await unencrypted_client.db.qe.find_one() + self.assertIsInstance(doc["qe"], Binary) + await encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = await unencrypted_client.db.qe2.find_one() + self.assertIsInstance(doc["qe2"], Binary) + await encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + await encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) + + await encrypted_client.close() + await unencrypted_client.close() + + @async_client_context.require_version_min(8, 1, -1) + async def test_1_csfle_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_2_qe_joins_no_schema(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_3_no_schema_joins_csfle(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_4_no_schema_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_5_csfle_joins_csfle2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_6_qe_joins_qe2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_7_no_schema_joins_no_schema2(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = await anext( + await encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @async_client_context.require_version_min(8, 1, -1) + async def test_8_csfle_joins_qe(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("not supported", str(exc)) + + @async_client_context.require_version_max(8, 1, -1) + async def test_9_error(self): + encrypted_client = await self.async_rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = await anext( + await encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("Upgrade", str(exc)) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap +class TestRewrapWithSeparateClientEncryption(AsyncEncryptionIntegrationTest): + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + async def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + await self.run_test(src_provider, dst_provider) + + async def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + await self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``AsyncClientEncryption`` object named ``client_encryption1`` + client_encryption1 = self.create_client_encryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = await client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = await client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``AsyncClientEncryption`` object named ``client_encryption2`` + client2 = await self.async_rs_or_single_client() + client_encryption2 = self.create_client_encryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = await client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = await client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = await client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials +class TestOnDemandAWSCredentials(AsyncEncryptionIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + async def test_01_failure(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def test_02_success(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=OPTS, + ) + await self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(AsyncEncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + + async def test_queryable_encryption(self): + # AsyncMongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + async def AsyncMongoClient(**kwargs): + c = await self.async_rs_or_single_client(**kwargs) + return c + + # Drop data from prior test runs. + await self.client.keyvault.datakeys.drop() + await self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = await AsyncMongoClient() + client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = await client_encryption.create_data_key("local") + key2_id = await client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = await AsyncMongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. + db = encrypted_client.docs_examples + encrypted_coll = await db.create_collection("encrypted") + + # Auto encrypt an insert and find. + + # Encrypt an insert. + await encrypted_coll.insert_one( + { + "_id": 1, + "encrypted_indexed": "indexed_value", + "encrypted_unindexed": "unindexed_value", + } + ) + + # Encrypt a find. + res = await encrypted_coll.find_one({"encrypted_indexed": "indexed_value"}) + assert res is not None + assert res["encrypted_indexed"] == "indexed_value" + assert res["encrypted_unindexed"] == "unindexed_value" + + # Find documents without decryption. + unencrypted_client = await AsyncMongoClient() + unencrypted_coll = unencrypted_client.docs_examples.encrypted + res = await unencrypted_coll.find_one({"_id": 1}) + assert res is not None + assert isinstance(res["encrypted_indexed"], Binary) + assert isinstance(res["encrypted_unindexed"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption +class TestRangeQueryProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = await self.async_rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + + async def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): + find_payload = await self.client_encryption.encrypt_expression( + expression=expression, + key_id=key_id or self.key1_id, + algorithm=Algorithm.RANGE, + query_type=QueryType.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + await self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + async def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + await self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + await self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + async def encrypt_and_cast(i): + return await self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": await encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + await self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(await self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + await self.run_expression_find( + name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts + ) + # Case 2, with UUID key_id + await self.run_expression_find( + name, + expression, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + key_id=self.key1_id.as_uuid(), + ) + + # Case 3. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + await self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + await self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + await self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + await self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + await self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), + max=cast_func(200), + sparsity=1, + trim_factor=1, + precision=2, + ), + ) + + async def test_double_no_precision(self): + await self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1, trim_factor=1), float) + + async def test_double_precision(self): + await self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, trim_factor=1, precision=2), + float, + ) + + async def test_decimal_no_precision(self): + await self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1, trim_factor=1), lambda x: Decimal128(str(x)) + ) + + async def test_decimal_precision(self): + await self.run_test_cases( + "DecimalPrecision", + RangeOpts( + min=Decimal128("0.0"), + max=Decimal128("200.0"), + sparsity=1, + trim_factor=1, + precision=2, + ), + lambda x: Decimal128(str(x)), + ) + + async def test_datetime(self): + await self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1, trim_factor=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + async def test_int(self): + await self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 0, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + await self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.key_id = await self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + async def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + async def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = await self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(AsyncEncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + async def asyncSetUp(self): + await super().asyncSetUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + async def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + async def _test(self, provider, master_key): + await self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + await self.http_post("/set_failpoint/network", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/network", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + await self.http_post("/set_failpoint/http", {"count": 1}) + key_id = await self.client_encryption.create_data_key(provider, master_key=master_key) + await self.http_post("/set_failpoint/http", {"count": 1}) + await self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + await self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + await self.client_encryption.create_data_key(provider, master_key=master_key) + + async def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) + await self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + await self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + await self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") + async def asyncSetUp(self): + await super().asyncSetUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + await self.client.drop_database(self.db) + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + + async def test_01_simple_create(self): + coll, _ = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + await coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + async def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + await self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + async def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. + with self.assertRaisesRegex( + EncryptedCollectionError, + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + async def test_04_insert_encrypted(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = await self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + await coll.insert_one({"ssn": encrypted_value}) + + async def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + async def test_options_forward(self): + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + async def test_mixed_null_keyids(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + coll, ef = await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + await self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + await coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + async def test_create_datakey_fails(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + async def test_create_failure(self): + key = await self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + async def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + await self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + await self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + await self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(AsyncEncryptionIntegrationTest): + @async_client_context.require_no_standalone + @async_client_context.require_version_min(8, 2, -1) + @async_client_context.require_libmongocrypt_min(1, 15, 1) + @async_client_context.require_pymongocrypt_min(1, 16, 0) + async def asyncSetUp(self): + await super().asyncSetUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = await create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addAsyncCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = await self.async_rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + await db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + await self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + await db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + await self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + await coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + async def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = await self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + async def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = await self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = await self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + async def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + await self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") +class TestNoSessionsSupport(AsyncEncryptionIntegrationTest): + mongocryptd_client: AsyncMongoClient + MONGOCRYPTD_PORT = 27020 + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + + self.listener = OvertCommandListener() + self.mongocryptd_client = self.simple_client( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + + hello = await self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + async def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + await self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + await self.mongocryptd_client.close() + + async def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + async with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + await self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + await self.mongocryptd_client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_examples.py b/test/asynchronous/test_examples.py new file mode 100644 index 0000000000..21770f490c --- /dev/null +++ b/test/asynchronous/test_examples.py @@ -0,0 +1,1446 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MongoDB documentation examples in Python.""" +from __future__ import annotations + +import asyncio +import datetime +import functools +import sys +import threading +import time +from test.asynchronous.helpers import ConcurrentRunner + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import async_wait_until + +import pymongo +from pymongo.errors import ConnectionFailure, OperationFailure +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSampleShellCommands(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.db.inventory.drop() + + async def asyncTearDown(self): + # Run after every test. + await self.db.inventory.drop() + await self.client.drop_database("pymongo_test") + + async def test_first_three_examples(self): + db = self.db + + # Start Example 1 + await db.inventory.insert_one( + { + "item": "canvas", + "qty": 100, + "tags": ["cotton"], + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + } + ) + # End Example 1 + + self.assertEqual(await db.inventory.count_documents({}), 1) + + # Start Example 2 + cursor = db.inventory.find({"item": "canvas"}) + # End Example 2 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 3 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "tags": ["blank", "red"], + "size": {"h": 14, "w": 21, "uom": "cm"}, + }, + { + "item": "mat", + "qty": 85, + "tags": ["gray"], + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + }, + { + "item": "mousepad", + "qty": 25, + "tags": ["gel", "blue"], + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + }, + ] + ) + # End Example 3 + + self.assertEqual(await db.inventory.count_documents({}), 4) + + async def test_query_top_level_fields(self): + db = self.db + + # Start Example 6 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 6 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 7 + cursor = db.inventory.find({}) + # End Example 7 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 9 + cursor = db.inventory.find({"status": "D"}) + # End Example 9 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 10 + cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) + # End Example 10 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 11 + cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) + # End Example 11 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 12 + cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) + # End Example 12 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 13 + cursor = db.inventory.find( + {"status": "A", "$or": [{"qty": {"$lt": 30}}, {"item": {"$regex": "^p"}}]} + ) + # End Example 13 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_embedded_documents(self): + db = self.db + + # Start Example 14 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "A", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 14 + + # Start Example 15 + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) + # End Example 15 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 16 + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) + # End Example 16 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 17 + cursor = db.inventory.find({"size.uom": "in"}) + # End Example 17 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 18 + cursor = db.inventory.find({"size.h": {"$lt": 15}}) + # End Example 18 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 19 + cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) + # End Example 19 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_arrays(self): + db = self.db + + # Start Example 20 + await db.inventory.insert_many( + [ + {"item": "journal", "qty": 25, "tags": ["blank", "red"], "dim_cm": [14, 21]}, + {"item": "notebook", "qty": 50, "tags": ["red", "blank"], "dim_cm": [14, 21]}, + { + "item": "paper", + "qty": 100, + "tags": ["red", "blank", "plain"], + "dim_cm": [14, 21], + }, + {"item": "planner", "qty": 75, "tags": ["blank", "red"], "dim_cm": [22.85, 30]}, + {"item": "postcard", "qty": 45, "tags": ["blue"], "dim_cm": [10, 15.25]}, + ] + ) + # End Example 20 + + # Start Example 21 + cursor = db.inventory.find({"tags": ["red", "blank"]}) + # End Example 21 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 22 + cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) + # End Example 22 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 23 + cursor = db.inventory.find({"tags": "red"}) + # End Example 23 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 24 + cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) + # End Example 24 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 25 + cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) + # End Example 25 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 26 + cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) + # End Example 26 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 27 + cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) + # End Example 27 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 28 + cursor = db.inventory.find({"tags": {"$size": 3}}) + # End Example 28 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_query_array_of_documents(self): + db = self.db + + # Start Example 29 + await db.inventory.insert_many( + [ + { + "item": "journal", + "instock": [ + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, + ], + }, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, + { + "item": "paper", + "instock": [ + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, + ], + }, + { + "item": "planner", + "instock": [ + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, + ], + }, + { + "item": "postcard", + "instock": [ + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, + ], + }, + ] + ) + # End Example 29 + + # Start Example 30 + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) + # End Example 30 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 31 + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) + # End Example 31 + + self.assertEqual(len(await cursor.to_list()), 0) + + # Start Example 32 + cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) + # End Example 32 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 33 + cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) + # End Example 33 + + self.assertEqual(len(await cursor.to_list()), 5) + + # Start Example 34 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) + # End Example 34 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 35 + cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) + # End Example 35 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 36 + cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) + # End Example 36 + + self.assertEqual(len(await cursor.to_list()), 4) + + # Start Example 37 + cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) + # End Example 37 + + self.assertEqual(len(await cursor.to_list()), 2) + + async def test_query_null(self): + db = self.db + + # Start Example 38 + await db.inventory.insert_many([{"_id": 1, "item": None}, {"_id": 2}]) + # End Example 38 + + # Start Example 39 + cursor = db.inventory.find({"item": None}) + # End Example 39 + + self.assertEqual(len(await cursor.to_list()), 2) + + # Start Example 40 + cursor = db.inventory.find({"item": {"$type": 10}}) + # End Example 40 + + self.assertEqual(len(await cursor.to_list()), 1) + + # Start Example 41 + cursor = db.inventory.find({"item": {"$exists": False}}) + # End Example 41 + + self.assertEqual(len(await cursor.to_list()), 1) + + async def test_projection(self): + db = self.db + + # Start Example 42 + await db.inventory.insert_many( + [ + { + "item": "journal", + "status": "A", + "size": {"h": 14, "w": 21, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 5}], + }, + { + "item": "notebook", + "status": "A", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "C", "qty": 5}], + }, + { + "item": "paper", + "status": "D", + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "instock": [{"warehouse": "A", "qty": 60}], + }, + { + "item": "planner", + "status": "D", + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "instock": [{"warehouse": "A", "qty": 40}], + }, + { + "item": "postcard", + "status": "A", + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "instock": [{"warehouse": "B", "qty": 15}, {"warehouse": "C", "qty": 35}], + }, + ] + ) + # End Example 42 + + # Start Example 43 + cursor = db.inventory.find({"status": "A"}) + # End Example 43 + + self.assertEqual(len(await cursor.to_list()), 3) + + # Start Example 44 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) + # End Example 44 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 45 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) + # End Example 45 + + async for doc in cursor: + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 46 + cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) + # End Example 46 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + + # Start Example 47 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) + # End Example 47 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) + size = doc["size"] + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) + + # Start Example 48 + cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) + # End Example 48 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) + size = doc["size"] + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) + + # Start Example 49 + cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) + # End Example 49 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + for subdoc in doc["instock"]: + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) + + # Start Example 50 + cursor = db.inventory.find( + {"status": "A"}, {"item": 1, "status": 1, "instock": {"$slice": -1}} + ) + # End Example 50 + + async for doc in cursor: + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 1) + + async def test_update_and_replace(self): + db = self.db + + # Start Example 51 + await db.inventory.insert_many( + [ + { + "item": "canvas", + "qty": 100, + "size": {"h": 28, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "mat", + "qty": 85, + "size": {"h": 27.9, "w": 35.5, "uom": "cm"}, + "status": "A", + }, + { + "item": "mousepad", + "qty": 25, + "size": {"h": 19, "w": 22.85, "uom": "cm"}, + "status": "P", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketchbook", + "qty": 80, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "sketch pad", + "qty": 95, + "size": {"h": 22.85, "w": 30.5, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 51 + + # Start Example 52 + await db.inventory.update_one( + {"item": "paper"}, + {"$set": {"size.uom": "cm", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 52 + + async for doc in db.inventory.find({"item": "paper"}): + self.assertEqual(doc["size"]["uom"], "cm") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 53 + await db.inventory.update_many( + {"qty": {"$lt": 50}}, + {"$set": {"size.uom": "in", "status": "P"}, "$currentDate": {"lastModified": True}}, + ) + # End Example 53 + + async for doc in db.inventory.find({"qty": {"$lt": 50}}): + self.assertEqual(doc["size"]["uom"], "in") + self.assertEqual(doc["status"], "P") + self.assertIn("lastModified", doc) + + # Start Example 54 + await db.inventory.replace_one( + {"item": "paper"}, + { + "item": "paper", + "instock": [{"warehouse": "A", "qty": 60}, {"warehouse": "B", "qty": 40}], + }, + ) + # End Example 54 + + async for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): + self.assertEqual(len(doc.keys()), 2) + self.assertIn("item", doc) + self.assertIn("instock", doc) + self.assertEqual(len(doc["instock"]), 2) + + async def test_delete(self): + db = self.db + + # Start Example 55 + await db.inventory.insert_many( + [ + { + "item": "journal", + "qty": 25, + "size": {"h": 14, "w": 21, "uom": "cm"}, + "status": "A", + }, + { + "item": "notebook", + "qty": 50, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "P", + }, + { + "item": "paper", + "qty": 100, + "size": {"h": 8.5, "w": 11, "uom": "in"}, + "status": "D", + }, + { + "item": "planner", + "qty": 75, + "size": {"h": 22.85, "w": 30, "uom": "cm"}, + "status": "D", + }, + { + "item": "postcard", + "qty": 45, + "size": {"h": 10, "w": 15.25, "uom": "cm"}, + "status": "A", + }, + ] + ) + # End Example 55 + + self.assertEqual(await db.inventory.count_documents({}), 5) + + # Start Example 57 + await db.inventory.delete_many({"status": "A"}) + # End Example 57 + + self.assertEqual(await db.inventory.count_documents({}), 3) + + # Start Example 58 + await db.inventory.delete_one({"status": "D"}) + # End Example 58 + + self.assertEqual(await db.inventory.count_documents({}), 2) + + # Start Example 56 + await db.inventory.delete_many({}) + # End Example 56 + + self.assertEqual(await db.inventory.count_documents({}), 0) + + @async_client_context.require_change_streams + async def test_change_streams(self): + db = self.db + done = False + + async def insert_docs(): + nonlocal done + while not done: + await db.inventory.insert_one({"username": "alice"}) + await db.inventory.delete_one({"username": "alice"}) + await asyncio.sleep(0.005) + + t = ConcurrentRunner(target=insert_docs) + await t.start() + + try: + # 1. The database for reactive, real-time applications + # Start Changestream Example 1 + cursor = await db.inventory.watch() + await anext(cursor) + # End Changestream Example 1 + await cursor.close() + + # Start Changestream Example 2 + cursor = await db.inventory.watch(full_document="updateLookup") + await anext(cursor) + # End Changestream Example 2 + await cursor.close() + + # Start Changestream Example 3 + resume_token = cursor.resume_token + cursor = await db.inventory.watch(resume_after=resume_token) + await anext(cursor) + # End Changestream Example 3 + await cursor.close() + + # Start Changestream Example 4 + pipeline = [ + {"$match": {"fullDocument.username": "alice"}}, + {"$addFields": {"newField": "this is an added field!"}}, + ] + cursor = await db.inventory.watch(pipeline=pipeline) + await anext(cursor) + # End Changestream Example 4 + await cursor.close() + finally: + done = True + await t.join() + + async def test_aggregate_examples(self): + db = self.db + + # Start Aggregation Example 1 + await db.sales.aggregate([{"$match": {"items.fruit": "banana"}}, {"$sort": {"date": 1}}]) + # End Aggregation Example 1 + + # Start Aggregation Example 2 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + {"$match": {"items.fruit": "banana"}}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "count": {"$sum": "$items.quantity"}, + } + }, + {"$project": {"dayOfWeek": "$_id.day", "numberSold": "$count", "_id": 0}}, + {"$sort": {"numberSold": 1}}, + ] + ) + # End Aggregation Example 2 + + # Start Aggregation Example 3 + await db.sales.aggregate( + [ + {"$unwind": "$items"}, + { + "$group": { + "_id": {"day": {"$dayOfWeek": "$date"}}, + "items_sold": {"$sum": "$items.quantity"}, + "revenue": {"$sum": {"$multiply": ["$items.quantity", "$items.price"]}}, + } + }, + { + "$project": { + "day": "$_id.day", + "revenue": 1, + "items_sold": 1, + "discount": { + "$cond": {"if": {"$lte": ["$revenue", 250]}, "then": 25, "else": 0} + }, + } + }, + ] + ) + # End Aggregation Example 3 + + # Start Aggregation Example 4 + await db.air_alliances.aggregate( + [ + { + "$lookup": { + "from": "air_airlines", + "let": {"constituents": "$airlines"}, + "pipeline": [{"$match": {"$expr": {"$in": ["$name", "$$constituents"]}}}], + "as": "airlines", + } + }, + { + "$project": { + "_id": 0, + "name": 1, + "airlines": { + "$filter": { + "input": "$airlines", + "as": "airline", + "cond": {"$eq": ["$$airline.country", "Canada"]}, + } + }, + } + }, + ] + ) + # End Aggregation Example 4 + + @async_client_context.require_version_min(4, 4) + async def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + + async def test_commands(self): + db = self.db + await db.restaurants.insert_one({}) + + # Start runCommand Example 1 + await db.command("buildInfo") + # End runCommand Example 1 + + # Start runCommand Example 2 + await db.command("count", "restaurants") + # End runCommand Example 2 + + async def test_index_management(self): + db = self.db + + # Start Index Example 1 + await db.records.create_index("score") + # End Index Example 1 + + # Start Index Example 1 + await db.restaurants.create_index( + [("cuisine", pymongo.ASCENDING), ("name", pymongo.ASCENDING)], + partialFilterExpression={"rating": {"$gt": 5}}, + ) + # End Index Example 1 + + @async_client_context.require_replica_set + async def test_misc(self): + # Marketing examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + self.addAsyncCleanup(client.drop_database, "my_database") + + # 2. Tunable consistency controls + collection = client.my_database.my_collection + async with client.start_session() as session: + await collection.insert_one({"_id": 1}, session=session) + await collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) + async for _doc in collection.find({}, session=session): + pass + + # 3. Exploiting the power of arrays + collection = client.test.array_updates_test + await collection.update_one( + {"_id": 1}, {"$set": {"a.$[i].b": 2}}, array_filters=[{"i.b": 0}] + ) + + +class TestTransactionExamples(AsyncIntegrationTest): + @async_client_context.require_transactions + async def test_transactions(self): + # Transaction examples + client = self.client + self.addAsyncCleanup(client.drop_database, "hr") + self.addAsyncCleanup(client.drop_database, "reporting") + + employees = client.hr.employees + events = client.reporting.events + await employees.insert_one({"employee": 3, "status": "Active"}) + await events.insert_one({"employee": 3, "status": {"new": "Active", "old": None}}) + + # Start Transactions Intro Example 1 + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), write_concern=WriteConcern(w="majority") + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Intro Example 1 + + async with client.start_session() as session: + await update_employee_info(session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 1 + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + print("Transaction aborted. Caught exception during transaction.") + + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + # End Transactions Retry Example 1 + + async with client.start_session() as session: + await run_transaction_with_retry(update_employee_info, session) + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + # Start Transactions Retry Example 2 + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # End Transactions Retry Example 2 + + # Test commit_with_retry from the previous examples + async def _insert_employee_retry_commit(session): + async with await session.start_transaction(): + await employees.insert_one({"employee": 4, "status": "Active"}, session=session) + await events.insert_one( + {"employee": 4, "status": {"new": "Active", "old": None}}, session=session + ) + + await commit_with_retry(session) + + async with client.start_session() as session: + await run_transaction_with_retry(_insert_employee_retry_commit, session) + + employee = await employees.find_one({"employee": 4}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Active") + + # Start Transactions Retry Example 3 + + async def run_transaction_with_retry(txn_func, session): + while True: + try: + await txn_func(session) # performs transaction + break + except (ConnectionFailure, OperationFailure) as exc: + # If transient error, retry the whole transaction + if exc.has_error_label("TransientTransactionError"): + print("TransientTransactionError, retrying transaction ...") + continue + else: + raise + + async def commit_with_retry(session): + while True: + try: + # Commit uses write concern set at transaction start. + await session.commit_transaction() + print("Transaction committed.") + break + except (ConnectionFailure, OperationFailure) as exc: + # Can retry commit + if exc.has_error_label("UnknownTransactionCommitResult"): + print("UnknownTransactionCommitResult, retrying commit operation ...") + continue + else: + print("Error during commit ...") + raise + + # Updates two collections in a transactions + + async def update_employee_info(session): + employees_coll = session.client.hr.employees + events_coll = session.client.reporting.events + + async with await session.start_transaction( + read_concern=ReadConcern("snapshot"), + write_concern=WriteConcern(w="majority"), + read_preference=ReadPreference.PRIMARY, + ): + await employees_coll.update_one( + {"employee": 3}, {"$set": {"status": "Inactive"}}, session=session + ) + await events_coll.insert_one( + {"employee": 3, "status": {"new": "Inactive", "old": "Active"}}, session=session + ) + + await commit_with_retry(session) + + # Start a session. + async with client.start_session() as session: + try: + await run_transaction_with_retry(update_employee_info, session) + except Exception: + # Do something with error. + raise + + # End Transactions Retry Example 3 + + employee = await employees.find_one({"employee": 3}) + assert employee is not None + self.assertIsNotNone(employee) + self.assertEqual(employee["status"], "Inactive") + + async def MongoClient(_): + return await self.async_rs_client() + + uriString = None + + # Start Transactions withTxn API Example 1 + + # For a replica set, include the replica set name and a seedlist of the members in the URI string; e.g. + # uriString = 'mongodb://mongodb0.example.com:27017,mongodb1.example.com:27017/?replicaSet=myRepl' + # For a sharded cluster, connect to the mongos instances; e.g. + # uriString = 'mongodb://mongos0.example.com:27017,mongos1.example.com:27017/' + + client = await MongoClient(uriString) + wc_majority = WriteConcern("majority", wtimeout=1000) + + # Prereq: Create collections. + await client.get_database("mydb1", write_concern=wc_majority).foo.insert_one({"abc": 0}) + await client.get_database("mydb2", write_concern=wc_majority).bar.insert_one({"xyz": 0}) + + # Step 1: Define the callback that specifies the sequence of operations to perform inside the transactions. + async def callback(session): + collection_one = session.client.mydb1.foo + collection_two = session.client.mydb2.bar + + # Important:: You must pass the session to the operations. + await collection_one.insert_one({"abc": 1}, session=session) + await collection_two.insert_one({"xyz": 999}, session=session) + + # Step 2: Start a client session. + async with client.start_session() as session: + # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). + await session.with_transaction(callback) + + # End Transactions withTxn API Example 1 + + +class TestCausalConsistencyExamples(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def test_causal_consistency(self): + # Causal consistency examples + client = self.client + self.addAsyncCleanup(client.drop_database, "test") + await client.test.drop_collection("items") + await client.test.items.insert_one( + {"sku": "111", "name": "Peanuts", "start": datetime.datetime.today()} + ) + + # Start Causal Consistency Example 1 + async with client.start_session(causal_consistency=True) as s1: + current_date = datetime.datetime.today() + items = client.get_database( + "test", + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + await items.update_one( + {"sku": "111", "end": None}, {"$set": {"end": current_date}}, session=s1 + ) + await items.insert_one( + {"sku": "nuts-111", "name": "Pecans", "start": current_date}, session=s1 + ) + # End Causal Consistency Example 1 + + assert s1.cluster_time is not None + assert s1.operation_time is not None + + # Start Causal Consistency Example 2 + async with client.start_session(causal_consistency=True) as s2: + s2.advance_cluster_time(s1.cluster_time) + s2.advance_operation_time(s1.operation_time) + + items = client.get_database( + "test", + read_preference=ReadPreference.SECONDARY, + read_concern=ReadConcern("majority"), + write_concern=WriteConcern("majority", wtimeout=1000), + ).items + async for item in items.find({"end": None}, session=s2): + print(item) + # End Causal Consistency Example 2 + + +class TestVersionedApiExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(4, 7) + async def test_versioned_api(self): + # Versioned API examples + async def MongoClient(_, server_api): + return await self.async_rs_client(server_api=server_api, connect=False) + + uri = None + + # Start Versioned API Example 1 + from pymongo.server_api import ServerApi + + await MongoClient(uri, server_api=ServerApi("1")) + # End Versioned API Example 1 + + # Start Versioned API Example 2 + await MongoClient(uri, server_api=ServerApi("1", strict=True)) + # End Versioned API Example 2 + + # Start Versioned API Example 3 + await MongoClient(uri, server_api=ServerApi("1", strict=False)) + # End Versioned API Example 3 + + # Start Versioned API Example 4 + await MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + # End Versioned API Example 4 + + @unittest.skip("PYTHON-3167 count has been added to API version 1") + @async_client_context.require_version_min(4, 7) + async def test_versioned_api_migration(self): + # SERVER-58785 + if await async_client_context.is_topology_type( + ["sharded"] + ) and not async_client_context.version.at_least(5, 0, 2): + self.skipTest("This test needs MongoDB 5.0.2 or newer") + + client = await self.async_rs_client(server_api=ServerApi("1", strict=True)) + await client.db.sales.drop() + + # Start Versioned API Example 5 + def strptime(s): + return datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%SZ") + + await client.db.sales.insert_many( + [ + { + "_id": 1, + "item": "abc", + "price": 10, + "quantity": 2, + "date": strptime("2021-01-01T08:00:00Z"), + }, + { + "_id": 2, + "item": "jkl", + "price": 20, + "quantity": 1, + "date": strptime("2021-02-03T09:00:00Z"), + }, + { + "_id": 3, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-03T09:05:00Z"), + }, + { + "_id": 4, + "item": "abc", + "price": 10, + "quantity": 10, + "date": strptime("2021-02-15T08:00:00Z"), + }, + { + "_id": 5, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T09:05:00Z"), + }, + { + "_id": 6, + "item": "xyz", + "price": 5, + "quantity": 5, + "date": strptime("2021-02-15T12:05:10Z"), + }, + { + "_id": 7, + "item": "xyz", + "price": 5, + "quantity": 10, + "date": strptime("2021-02-15T14:12:12Z"), + }, + { + "_id": 8, + "item": "abc", + "price": 10, + "quantity": 5, + "date": strptime("2021-03-16T20:20:13Z"), + }, + ] + ) + # End Versioned API Example 5 + + with self.assertRaisesRegex( + OperationFailure, + "Provided apiStrict:true, but the command count is not in API Version 1", + ): + await client.db.command("count", "sales", query={}) + # Start Versioned API Example 6 + # pymongo.errors.OperationFailure: Provided apiStrict:true, but the command count is not in API Version 1, full error: {'ok': 0.0, 'errmsg': 'Provided apiStrict:true, but the command count is not in API Version 1', 'code': 323, 'codeName': 'APIStrictError'} + # End Versioned API Example 6 + + # Start Versioned API Example 7 + await client.db.sales.count_documents({}) + # End Versioned API Example 7 + + # Start Versioned API Example 8 + # 8 + # End Versioned API Example 8 + + +class TestSnapshotQueryExamples(AsyncIntegrationTest): + @async_client_context.require_version_min(5, 0) + async def test_snapshot_query(self): + client = self.client + + if not await async_client_context.is_topology_type(["replicaset", "sharded"]): + self.skipTest("Must be a sharded or replicaset") + + self.addAsyncCleanup(client.drop_database, "pets") + db = client.pets + await db.drop_collection("cats") + await db.drop_collection("dogs") + await db.cats.insert_one( + {"name": "Whiskers", "color": "white", "age": 10, "adoptable": True} + ) + await db.dogs.insert_one( + {"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True} + ) + + async def predicate_one(): + return await self.check_for_snapshot(db.cats) + + async def predicate_two(): + return await self.check_for_snapshot(db.dogs) + + await async_wait_until(predicate_two, "success") + await async_wait_until(predicate_one, "success") + + # Start Snapshot Query Example 1 + + db = client.pets + async with client.start_session(snapshot=True) as s: + adoptablePetsCount = ( + await ( + await db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + await ( + await db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] + + print(adoptablePetsCount) + + # End Snapshot Query Example 1 + db = client.retail + self.addAsyncCleanup(client.drop_database, "retail") + await db.drop_collection("sales") + + saleDate = datetime.datetime.now() + await db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) + + async def predicate_three(): + return await self.check_for_snapshot(db.sales) + + await async_wait_until(predicate_three, "success") + + # Start Snapshot Query Example 2 + db = client.retail + async with client.start_session(snapshot=True) as s: + _ = ( + await ( + await db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] + + # End Snapshot Query Example 2 + + async def check_for_snapshot(self, collection): + """Wait for snapshot reads to become available to prevent this error: + [246:SnapshotUnavailable]: Unable to read from a snapshot due to pending collection catalog changes; please retry the operation. Snapshot timestamp is Timestamp(1646666892, 4). Collection minimum is Timestamp(1646666892, 5) (on localhost:27017, modern retry, attempt 1) + From https://github.com/mongodb/mongo-ruby-driver/commit/7c4117b58e3d12e237f7536f7521e18fc15f79ac + """ + async with self.client.start_session(snapshot=True) as s: + try: + if await collection.find_one(session=s): + return True + return False + except OperationFailure as e: + # Retry them as the server demands... + if e.code == 246: # SnapshotUnavailable + return False + raise + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_grid_file.py b/test/asynchronous/test_grid_file.py new file mode 100644 index 0000000000..2a7e9e1f9d --- /dev/null +++ b/test/asynchronous/test_grid_file.py @@ -0,0 +1,873 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the grid_file module.""" +from __future__ import annotations + +import datetime +import io +import sys +import zipfile +from io import BytesIO +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncUnitTest, + async_client_context, + qcheck, + unittest, +) + +from pymongo.asynchronous.database import AsyncDatabase + +sys.path[0:0] = [""] + +from test.utils_shared import OvertCommandListener + +from bson.objectid import ObjectId +from gridfs.asynchronous.grid_file import ( + _SEEK_CUR, + _SEEK_END, + DEFAULT_CHUNK_SIZE, + AsyncGridFS, + AsyncGridIn, + AsyncGridOut, + AsyncGridOutCursor, +) +from gridfs.errors import NoFile +from pymongo import AsyncMongoClient +from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError +from pymongo.message import _CursorAddress + +_IS_SYNC = False + + +class AsyncTestGridFileNoConnect(AsyncUnitTest): + """Test GridFile features on a client that does not connect.""" + + db: AsyncDatabase + + @classmethod + def setUpClass(cls): + cls.db = AsyncMongoClient(connect=False).pymongo_test + + def test_grid_in_custom_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + + self.assertEqual(5, a._id) + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + self.assertEqual("text/html", a.content_type) + self.assertEqual(1000, a.chunk_size) + self.assertEqual(["foo"], a.aliases) + self.assertEqual({"foo": 1, "bar": 2}, a.metadata) + self.assertEqual(3, a.bar) + self.assertEqual("hello", a.baz) + self.assertRaises(AttributeError, getattr, a, "mike") + + b = AsyncGridIn(self.db.fs, content_type="text/html", chunk_size=1000, baz=100) + self.assertEqual("text/html", b.content_type) + self.assertEqual(1000, b.chunk_size) + self.assertEqual(100, b.baz) + + +class AsyncTestGridFile(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + await self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) + + async def test_basic(self): + f = AsyncGridIn(self.db.fs, filename="test") + await f.write(b"hello world") + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + # make sure it's still there... + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + + f = AsyncGridIn(self.db.fs, filename="test") + await f.close() + self.assertEqual(2, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"", await g.read()) + + # test that reading 0 returns proper type + self.assertEqual(b"", await g.read(0)) + + async def test_md5(self): + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\n") + await f.close() + self.assertEqual(None, f.md5) + + async def test_alternate_collection(self): + await self.db.alt.files.delete_many({}) + await self.db.alt.chunks.delete_many({}) + + f = AsyncGridIn(self.db.alt) + await f.write(b"hello world") + await f.close() + + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + g = AsyncGridOut(self.db.alt, f._id) + self.assertEqual(b"hello world", await g.read()) + + async def test_grid_in_default_opts(self): + self.assertRaises(TypeError, AsyncGridIn, "foo") + + a = AsyncGridIn(self.db.fs) + + self.assertIsInstance(a._id, ObjectId) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual(None, a.filename) + self.assertEqual(None, a.name) + a.filename = "my_file" + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual(None, a.content_type) + a.content_type = "text/html" + + self.assertEqual("text/html", a.content_type) + + self.assertRaises(AttributeError, getattr, a, "length") + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertRaises(AttributeError, getattr, a, "upload_date") + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertRaises(AttributeError, getattr, a, "aliases") + a.aliases = ["foo"] + + self.assertEqual(["foo"], a.aliases) + + self.assertRaises(AttributeError, getattr, a, "metadata") + a.metadata = {"foo": 1} + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + await a.close() + + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + await a.set("forty_two", 42) + + self.assertEqual(42, a.forty_two) + + self.assertIsInstance(a._id, ObjectId) + self.assertRaises(AttributeError, setattr, a, "_id", 5) + + self.assertEqual("my_file", a.filename) + self.assertEqual("my_file", a.name) + + self.assertEqual("text/html", a.content_type) + + self.assertEqual(0, a.length) + self.assertRaises(AttributeError, setattr, a, "length", 5) + + self.assertEqual(255 * 1024, a.chunk_size) + self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) + + self.assertIsInstance(a.upload_date, datetime.datetime) + self.assertRaises(AttributeError, setattr, a, "upload_date", 5) + + self.assertEqual(["foo"], a.aliases) + + self.assertEqual({"foo": 1}, a.metadata) + + self.assertEqual(None, a.md5) + self.assertRaises(AttributeError, setattr, a, "md5", 5) + + # Make sure custom attributes that were set both before and after + # a.close() are reflected in b. PYTHON-411. + b = await AsyncGridFS(self.db).get_last_version(filename=a.filename) + self.assertEqual(a.metadata, b.metadata) + self.assertEqual(a.aliases, b.aliases) + self.assertEqual(a.forty_two, b.forty_two) + + async def test_grid_out_default_opts(self): + self.assertRaises(TypeError, AsyncGridOut, "foo") + + gout = AsyncGridOut(self.db.fs, 5) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await gout.open() + gout.name + + a = AsyncGridIn(self.db.fs) + await a.close() + + b = AsyncGridOut(self.db.fs, a._id) + if not _IS_SYNC: + await b.open() + + self.assertEqual(a._id, b._id) + self.assertEqual(0, b.length) + self.assertEqual(None, b.content_type) + self.assertEqual(None, b.name) + self.assertEqual(None, b.filename) + self.assertEqual(255 * 1024, b.chunk_size) + self.assertIsInstance(b.upload_date, datetime.datetime) + self.assertEqual(None, b.aliases) + self.assertEqual(None, b.metadata) + self.assertEqual(None, b.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, b, attr, 5) + + async def test_grid_out_cursor_options(self): + self.assertRaises( + TypeError, AsyncGridOutCursor.__init__, self.db.fs, {}, projection={"filename": 1} + ) + + cursor = AsyncGridOutCursor(self.db.fs, {}) + cursor_clone = cursor.clone() + + cursor_dict = cursor.__dict__.copy() + cursor_dict.pop("_session") + cursor_clone_dict = cursor_clone.__dict__.copy() + cursor_clone_dict.pop("_session") + self.assertDictEqual(cursor_dict, cursor_clone_dict) + + self.assertRaises(NotImplementedError, cursor.add_option, 0) + self.assertRaises(NotImplementedError, cursor.remove_option, 0) + + async def test_grid_out_custom_opts(self): + one = AsyncGridIn( + self.db.fs, + _id=5, + filename="my_file", + contentType="text/html", + chunkSize=1000, + aliases=["foo"], + metadata={"foo": 1, "bar": 2}, + bar=3, + baz="hello", + ) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, 5) + + if not _IS_SYNC: + await two.open() + + self.assertEqual("my_file", two.name) + self.assertEqual("my_file", two.filename) + self.assertEqual(5, two._id) + self.assertEqual(11, two.length) + self.assertEqual("text/html", two.content_type) + self.assertEqual(1000, two.chunk_size) + self.assertIsInstance(two.upload_date, datetime.datetime) + self.assertEqual(["foo"], two.aliases) + self.assertEqual({"foo": 1, "bar": 2}, two.metadata) + self.assertEqual(3, two.bar) + self.assertEqual(None, two.md5) + + for attr in [ + "_id", + "name", + "content_type", + "length", + "chunk_size", + "upload_date", + "aliases", + "metadata", + "md5", + ]: + self.assertRaises(AttributeError, setattr, two, attr, 5) + + async def test_grid_out_file_document(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"foo bar") + await one.close() + + two = AsyncGridOut(self.db.fs, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await two.read()) + + three = AsyncGridOut(self.db.fs, 5, file_document=await self.db.fs.files.find_one()) + self.assertEqual(b"foo bar", await three.read()) + + four = AsyncGridOut(self.db.fs, file_document={}) + with self.assertRaises(NoFile): + if not _IS_SYNC: + await four.open() + four.name + + async def test_write_file_like(self): + one = AsyncGridIn(self.db.fs) + await one.write(b"hello world") + await one.close() + + two = AsyncGridOut(self.db.fs, one._id) + + three = AsyncGridIn(self.db.fs) + await three.write(two) + await three.close() + + four = AsyncGridOut(self.db.fs, three._id) + self.assertEqual(b"hello world", await four.read()) + + five = AsyncGridIn(self.db.fs, chunk_size=2) + await five.write(b"hello") + buffer = BytesIO(b" world") + await five.write(buffer) + await five.write(b" and mongodb") + await five.close() + self.assertEqual( + b"hello world and mongodb", await AsyncGridOut(self.db.fs, five._id).read() + ) + + async def test_write_lines(self): + a = AsyncGridIn(self.db.fs) + await a.writelines([b"hello ", b"world"]) + await a.close() + + self.assertEqual(b"hello world", await AsyncGridOut(self.db.fs, a._id).read()) + + async def test_close(self): + f = AsyncGridIn(self.db.fs) + await f.close() + with self.assertRaises(ValueError): + await f.write("test") + await f.close() + + async def test_closed(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write(b"Hello world.\nHow are you?") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertFalse(g.closed) + await g.read(1) + self.assertFalse(g.closed) + await g.read(100) + self.assertFalse(g.closed) + await g.close() + self.assertTrue(g.closed) + + async def test_multi_chunk_file(self): + random_string = b"a" * (DEFAULT_CHUNK_SIZE + 1000) + + f = AsyncGridIn(self.db.fs) + await f.write(random_string) + await f.close() + + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(2, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(random_string, await g.read()) + + async def test_small_chunks(self): + self.files = 0 + self.chunks = 0 + + async def helper(data): + f = AsyncGridIn(self.db.fs, chunkSize=1) + await f.write(data) + await f.close() + + self.files += 1 + self.chunks += len(data) + + self.assertEqual(self.files, await self.db.fs.files.count_documents({})) + self.assertEqual(self.chunks, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(data, await g.read(10) + await g.read(10)) + return True + + await qcheck.check_unittest(self, helper, qcheck.gen_string(qcheck.gen_range(0, 20))) + + async def test_seek(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world", await g.read()) + await g.seek(0) + self.assertEqual(b"hello world", await g.read()) + await g.seek(1) + self.assertEqual(b"ello world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-1) + + await g.seek(-3, _SEEK_END) + self.assertEqual(b"rld", await g.read()) + await g.seek(0, _SEEK_END) + self.assertEqual(b"", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_END) + + await g.seek(3) + await g.seek(3, _SEEK_CUR) + self.assertEqual(b"world", await g.read()) + with self.assertRaises(IOError): + await g.seek(-100, _SEEK_CUR) + + async def test_tell(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(0, g.tell()) + await g.read(0) + self.assertEqual(0, g.tell()) + await g.read(1) + self.assertEqual(1, g.tell()) + await g.read(2) + self.assertEqual(3, g.tell()) + await g.read() + self.assertEqual(g.length, g.tell()) + + async def test_multiple_reads(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"he", await g.read(2)) + self.assertEqual(b"ll", await g.read(2)) + self.assertEqual(b"o ", await g.read(2)) + self.assertEqual(b"wo", await g.read(2)) + self.assertEqual(b"rl", await g.read(2)) + self.assertEqual(b"d", await g.read(2)) + self.assertEqual(b"", await g.read(2)) + + async def test_readline(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual(b"ello world,\n", await g.readline()) + self.assertEqual(b"How a", await g.readline(5)) + self.assertEqual(b"", await g.readline(0)) + self.assertEqual(b"re you?\n", await g.readline()) + self.assertEqual(b"Hope all is well.\n", await g.readline(1000)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual(b"", await g.readline()) + + # Try readline() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.readline(2)) + self.assertEqual(b"l", await g.read(1)) + self.assertEqual(b"lo", await g.readline(2)) + self.assertEqual(b" world,\n", await g.readline()) + + # Only readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"H", await g.readline(1)) + self.assertEqual(b"e", await g.readline(1)) + self.assertEqual(b"llo world,\n", await g.readline()) + + async def test_readlines(self): + f = AsyncGridIn(self.db.fs, chunkSize=5) + await f.write( + b"""Hello world, +How are you? +Hope all is well. +Bye""" + ) + await f.close() + + # Try read(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"He", await g.read(2)) + self.assertEqual([b"llo world,\n", b"How are you?\n"], await g.readlines(11)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines()) + self.assertEqual([], await g.readlines()) + + # Try readline(), then readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"Hello world,\n", await g.readline()) + self.assertEqual([b"How are you?\n", b"Hope all is well.\n"], await g.readlines(13)) + self.assertEqual(b"Bye", await g.readline()) + self.assertEqual([], await g.readlines()) + + # Only readlines(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual( + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + await g.readlines(0), + ) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual([b"How are you?\n"], await g.readlines(12)) + self.assertEqual([b"Hope all is well.\n", b"Bye"], await g.readlines(18)) + + # Try readlines() first, then read(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"H", await g.read(1)) + self.assertEqual([b"ow are you?\n", b"Hope all is well.\n"], await g.readlines(29)) + self.assertEqual([b"Bye"], await g.readlines(1)) + + # Try readlines() first, then readline(). + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual([b"Hello world,\n"], await g.readlines(1)) + self.assertEqual(b"How are you?\n", await g.readline()) + self.assertEqual([b"Hope all is well.\n"], await g.readlines(17)) + self.assertEqual(b"Bye", await g.readline()) + + async def test_iterator(self): + f = AsyncGridIn(self.db.fs) + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + f = AsyncGridIn(self.db.fs) + await f.write(b"hello world\nhere are\nsome lines.") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], await g.to_list()) + + self.assertEqual(b"", await g.read(5)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], await g.to_list()) + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"hello world\n", await anext(aiter(g))) + self.assertEqual(b"here", await g.read(4)) + self.assertEqual(b" are\n", await anext(aiter(g))) + self.assertEqual(b"some lines", await g.read(10)) + self.assertEqual(b".", await anext(aiter(g))) + with self.assertRaises(StopAsyncIteration): + await aiter(g).__anext__() + + f = AsyncGridIn(self.db.fs, chunk_size=2) + await f.write(b"hello world") + await f.close() + g = AsyncGridOut(self.db.fs, f._id) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], await g.to_list()) + + async def test_read_unaligned_buffer_size(self): + in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." + f = AsyncGridIn(self.db.fs, chunkSize=16) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + out_data = b"" + while 1: + s = await g.read(13) + if not s: + break + out_data += s + + self.assertEqual(in_data, out_data) + + async def test_readchunk(self): + in_data = b"a" * 10 + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(in_data) + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(3, len(await g.readchunk())) + + self.assertEqual(2, len(await g.read(2))) + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(3, len(await g.read(3))) + + self.assertEqual(1, len(await g.readchunk())) + + self.assertEqual(0, len(await g.readchunk())) + + async def test_write_unicode(self): + f = AsyncGridIn(self.db.fs) + with self.assertRaises(TypeError): + await f.write("foo") + + f = AsyncGridIn(self.db.fs, encoding="utf-8") + await f.write("foo") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual(b"foo", await g.read()) + + f = AsyncGridIn(self.db.fs, encoding="iso-8859-1") + await f.write("aé") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + self.assertEqual("aé".encode("iso-8859-1"), await g.read()) + + async def test_set_after_close(self): + f = AsyncGridIn(self.db.fs, _id="foo", bar="baz") + + self.assertEqual("foo", f._id) + self.assertEqual("baz", f.bar) + self.assertRaises(AttributeError, getattr, f, "baz") + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + await f.set("bar", "foo") + await f.set("baz", 5) + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertRaises(AttributeError, getattr, f, "uploadDate") + + await f.close() + + self.assertEqual("foo", f._id) + self.assertEqual("foo", f.bar) + self.assertEqual(5, f.baz) + self.assertTrue(f.uploadDate) + + self.assertRaises(AttributeError, setattr, f, "_id", 5) + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + await f.set("bar", "a") + await f.set("baz", "b") + self.assertRaises(AttributeError, setattr, f, "upload_date", 5) + + g = AsyncGridOut(self.db.fs, f._id) + if not _IS_SYNC: + await g.open() + self.assertEqual("a", g.bar) + self.assertEqual("b", g.baz) + # Versions 2.0.1 and older saved a _closed field for some reason. + self.assertRaises(AttributeError, getattr, g, "_closed") + + async def test_context_manager(self): + contents = b"Imagine this is some important data..." + + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + + async with AsyncGridOut(self.db.fs, infile._id) as outfile: + self.assertEqual(contents, await outfile.read()) + + async def test_exception_file_non_existence(self): + contents = b"Imagine this is some important data..." + + with self.assertRaises(ConnectionError): + async with AsyncGridIn(self.db.fs, filename="important") as infile: + await infile.write(contents) + raise ConnectionError("Test exception") + + # Expectation: File chunks are written, entry in files doesn't appear. + self.assertEqual( + await self.db.fs.chunks.count_documents({"files_id": infile._id}), infile._chunk_number + ) + + self.assertIsNone(await self.db.fs.files.find_one({"_id": infile._id})) + self.assertTrue(infile.closed) + + async def test_prechunked_string(self): + async def write_me(s, chunk_size): + buf = BytesIO(s) + infile = AsyncGridIn(self.db.fs) + while True: + to_write = buf.read(chunk_size) + if to_write == b"": + break + await infile.write(to_write) + await infile.close() + buf.close() + + outfile = AsyncGridOut(self.db.fs, infile._id) + data = await outfile.read() + self.assertEqual(s, data) + + s = b"x" * DEFAULT_CHUNK_SIZE * 4 + # Test with default chunk size + await write_me(s, DEFAULT_CHUNK_SIZE) + # Multiple + await write_me(s, DEFAULT_CHUNK_SIZE * 3) + # Custom + await write_me(s, 262300) + + async def test_grid_out_lazy_connect(self): + fs = self.db.fs + outfile = AsyncGridOut(fs, file_id=-1) + with self.assertRaises(NoFile): + await outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + await outfile.open() + outfile.filename + + infile = AsyncGridIn(fs, filename=1) + await infile.close() + + outfile = AsyncGridOut(fs, infile._id) + await outfile.read() + outfile.filename + + outfile = AsyncGridOut(fs, infile._id) + await outfile.readchunk() + + async def test_grid_in_lazy_connect(self): + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) + fs = client.db.fs + infile = AsyncGridIn(fs, file_id=-1, chunk_size=1) + with self.assertRaises(ServerSelectionTimeoutError): + await infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + await infile.close() + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + AsyncGridIn((await self.async_rs_or_single_client(w=0)).pymongo_test.fs) + + async def test_survive_cursor_not_found(self): + # By default the find command returns 101 documents in the first batch. + # Use 102 batches to cause a single getMore. + chunk_size = 1024 + data = b"d" * (102 * chunk_size) + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client.pymongo_test + async with AsyncGridIn(db.fs, chunk_size=chunk_size) as infile: + await infile.write(data) + + async with AsyncGridOut(db.fs, infile._id) as outfile: + self.assertEqual(len(await outfile.readchunk()), chunk_size) + + # Kill the cursor to simulate the cursor timing out on the server + # when an application spends a long time between two calls to + # readchunk(). + assert await client.address is not None + await client._close_cursor_now( + outfile._chunk_iter._cursor.cursor_id, + _CursorAddress(await client.address, db.fs.chunks.full_name), # type: ignore[arg-type] + ) + + # Read the rest of the file without error. + self.assertEqual(len(await outfile.read()), len(data) - chunk_size) + + # Paranoid, ensure that a getMore was actually sent. + self.assertIn("getMore", listener.started_command_names()) + + @async_client_context.require_sync + async def test_zip(self): + zf = BytesIO() + z = zipfile.ZipFile(zf, "w") + z.writestr("test.txt", b"hello world") + z.close() + zf.seek(0) + + f = AsyncGridIn(self.db.fs, filename="test.zip") + await f.write(zf) + await f.close() + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + g = AsyncGridOut(self.db.fs, f._id) + z = zipfile.ZipFile(g) + self.assertSequenceEqual(z.namelist(), ["test.txt"]) + self.assertEqual(z.read("test.txt"), b"hello world") + + async def test_grid_out_unsupported_operations(self): + f = AsyncGridIn(self.db.fs, chunkSize=3) + await f.write(b"hello world") + await f.close() + + g = AsyncGridOut(self.db.fs, f._id) + + self.assertRaises(io.UnsupportedOperation, g.writelines, [b"some", b"lines"]) + self.assertRaises(io.UnsupportedOperation, g.write, b"some text") + self.assertRaises(io.UnsupportedOperation, g.fileno) + self.assertRaises(io.UnsupportedOperation, g.truncate) + + self.assertFalse(g.writable()) + self.assertFalse(g.isatty()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs.py b/test/asynchronous/test_gridfs.py new file mode 100644 index 0000000000..f60352f3cb --- /dev/null +++ b/test/asynchronous/test_gridfs.py @@ -0,0 +1,603 @@ +# +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_joinall +from test.utils_shared import one + +import gridfs +from bson.binary import Binary +from gridfs.asynchronous.grid_file import DEFAULT_CHUNK_SIZE, AsyncGridOutCursor +from gridfs.errors import CorruptGridFile, FileExists, NoFile +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, fs, n): + super().__init__() + self.fs = fs + self.n = n + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = self.fs.new_file(filename="test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, fs, n, results): + super().__init__() + self.fs = fs + self.n = n + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.n): + file = await self.fs.get("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfsNoConnect(unittest.IsolatedAsyncioTestCase): + db: AsyncDatabase + + async def asyncSetUp(self): + await super().asyncSetUp() + self.db = AsyncMongoClient(connect=False).pymongo_test + + async def test_gridfs(self): + self.assertRaises(TypeError, gridfs.AsyncGridFS, "foo") + self.assertRaises(TypeError, gridfs.AsyncGridFS, self.db, 5) + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFS + alt: gridfs.AsyncGridFS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFS(self.db) + self.alt = gridfs.AsyncGridFS(self.db, "alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.put(b"hello world") + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.get(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.fs.get("foo") + oid = await self.fs.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.fs.get("foo")).read()) + + async def test_multi_chunk_delete(self): + await self.db.fs.drop() + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFS(self.db) + oid = await gfs.put(b"hello", chunkSize=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_list(self): + self.assertEqual([], await self.fs.list()) + await self.fs.put(b"hello world") + self.assertEqual([], await self.fs.list()) + + # PYTHON-598: in server versions before 2.5.x, creating an index on + # filename, uploadDate causes list() to include None. + await self.fs.get_last_version() + self.assertEqual([], await self.fs.list()) + + await self.fs.put(b"", filename="mike") + await self.fs.put(b"foo", filename="test") + await self.fs.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.fs.list())) + + async def test_empty_file(self): + oid = await self.fs.put(b"") + self.assertEqual(b"", await (await self.fs.get(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.put(b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.get(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_put_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.put(b"junk") + + self.assertTrue( + any( + info.get("key") == [("files_id", 1), ("n", 1)] + for info in (await chunks.index_information()).values() + ) + ) + self.assertTrue( + any( + info.get("key") == [("filename", 1), ("uploadDate", 1)] + for info in (await files.index_information()).values() + ) + ) + + async def test_alt_collection(self): + oid = await self.alt.put(b"hello world") + self.assertEqual(b"hello world", await (await self.alt.get(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.get(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.get("foo") + oid = await self.alt.put(b"hello world", _id="foo") + self.assertEqual("foo", oid) + self.assertEqual(b"hello world", await (await self.alt.get("foo")).read()) + + await self.alt.put(b"", filename="mike") + await self.alt.put(b"foo", filename="test") + await self.alt.put(b"", filename="hello world") + + self.assertEqual({"mike", "test", "hello world"}, set(await self.alt.list())) + + async def test_threaded_reads(self): + await self.fs.put(b"hello", _id="test") + + tasks = [] + results: list = [] + for i in range(10): + tasks.append(JustRead(self.fs, 10, results)) + await tasks[i].start() + + await async_joinall(tasks) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + tasks = [] + for i in range(10): + tasks.append(JustWrite(self.fs, 10)) + await tasks[i].start() + + await async_joinall(tasks) + + f = await self.fs.get_last_version("test") + self.assertEqual(await f.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + two = self.fs.new_file(filename="test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.put(b"baz", filename="test") + + self.assertEqual(b"baz", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.get_last_version("test") + + async def test_get_last_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author") + + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author")).read()) + await self.fs.delete(one) + + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author2") + + self.assertEqual(b"foo", await (await self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", await (await self.fs.get_last_version(filename="test")).read()) + + with self.assertRaises(NoFile): + await self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + await self.fs.get_last_version(filename="nottest", author="author1") + + await self.fs.delete(one) + await self.fs.delete(two) + + async def test_get_version(self): + await self.fs.put(b"foo", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"bar", filename="test") + await asyncio.sleep(0.01) + await self.fs.put(b"baz", filename="test") + await asyncio.sleep(0.01) + + self.assertEqual(b"foo", await (await self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", await (await self.fs.get_version("test", 2)).read()) + + self.assertEqual(b"baz", await (await self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", await (await self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", await (await self.fs.get_version("test", -3)).read()) + + with self.assertRaises(NoFile): + await self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + await self.fs.get_version("test", -4) + + async def test_get_version_with_metadata(self): + one = await self.fs.put(b"foo", filename="test", author="author1") + await asyncio.sleep(0.01) + two = await self.fs.put(b"bar", filename="test", author="author1") + await asyncio.sleep(0.01) + three = await self.fs.put(b"baz", filename="test", author="author2") + + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=-2)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=-1)).read(), + ) + self.assertEqual( + b"foo", + await (await self.fs.get_version(filename="test", author="author1", version=0)).read(), + ) + self.assertEqual( + b"bar", + await (await self.fs.get_version(filename="test", author="author1", version=1)).read(), + ) + self.assertEqual( + b"baz", + await (await self.fs.get_version(filename="test", author="author2", version=0)).read(), + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=-1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.get_version(filename="test", version=2)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + await self.fs.get_version(filename="test", author="author1", version=2) + + await self.fs.delete(one) + await self.fs.delete(two) + await self.fs.delete(three) + + async def test_put_filelike(self): + oid = await self.fs.put(BytesIO(b"hello world"), chunk_size=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.get(oid)).read()) + + async def test_file_exists(self): + oid = await self.fs.put(b"hello") + with self.assertRaises(FileExists): + await self.fs.put(b"world", _id=oid) + + one = self.fs.new_file(_id=123) + await one.write(b"some content") + await one.close() + + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + with self.assertRaises(FileExists): + await two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + two = self.fs.new_file(_id=123) + await two.write(b"some content") + with self.assertRaises(FileExists): + await two.close() + # Original file is still readable. + self.assertEqual(await (await self.fs.get(123)).read(), b"some content") + + async def test_exists(self): + oid = await self.fs.put(b"hello") + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + + self.assertFalse(await self.fs.exists(filename="mike")) + self.assertFalse(await self.fs.exists("mike")) + + oid = await self.fs.put(b"hello", filename="mike", foo=12) + self.assertTrue(await self.fs.exists(oid)) + self.assertTrue(await self.fs.exists({"_id": oid})) + self.assertTrue(await self.fs.exists(_id=oid)) + self.assertTrue(await self.fs.exists(filename="mike")) + self.assertTrue(await self.fs.exists({"filename": "mike"})) + self.assertTrue(await self.fs.exists(foo=12)) + self.assertTrue(await self.fs.exists({"foo": 12})) + self.assertTrue(await self.fs.exists(foo={"$gt": 11})) + self.assertTrue(await self.fs.exists({"foo": {"$gt": 11}})) + + self.assertFalse(await self.fs.exists(foo=13)) + self.assertFalse(await self.fs.exists({"foo": 13})) + self.assertFalse(await self.fs.exists(foo={"$gt": 12})) + self.assertFalse(await self.fs.exists({"foo": {"$gt": 12}})) + + async def test_put_unicode(self): + with self.assertRaises(TypeError): + await self.fs.put("hello") + + oid = await self.fs.put("hello", encoding="utf-8") + self.assertEqual(b"hello", await (await self.fs.get(oid)).read()) + self.assertEqual("utf-8", (await self.fs.get(oid)).encoding) + + oid = await self.fs.put("aé", encoding="iso-8859-1") + self.assertEqual("aé".encode("iso-8859-1"), await (await self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (await self.fs.get(oid)).encoding) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.put(b"", filename="empty") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + f = await self.fs.get_last_version(filename="empty") + + async def iterate_file(grid_file): + async for _chunk in grid_file: + pass + return True + + self.assertTrue(await iterate_file(f)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=10 + ) + db = client.db + gfs = gridfs.AsyncGridFS(db) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.list() + + fs = gridfs.AsyncGridFS(db) + f = fs.new_file() + with self.assertRaises(ServerSelectionTimeoutError): + await f.close() + + async def test_gridfs_find(self): + await self.fs.put(b"test2", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test2+", filename="two") + await asyncio.sleep(0.01) + await self.fs.put(b"test1", filename="one") + await asyncio.sleep(0.01) + await self.fs.put(b"test2++", filename="two") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.__anext__() + await cursor.rewind() + items = await cursor.to_list() + self.assertEqual(len(items), 2) + await cursor.rewind() + items = await cursor.to_list(1) + self.assertEqual(len(items), 1) + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_delete_not_initialized(self): + # Creating a cursor with invalid arguments will not run __init__ + # but will still call __del__. + cursor = AsyncGridOutCursor.__new__(AsyncGridOutCursor) # Skip calling __init__ + with self.assertRaises(TypeError): + cursor.__init__(self.db.fs.files, {}, {"_id": True}) # type: ignore + cursor.__del__() # no error + + async def test_gridfs_find_one(self): + self.assertEqual(None, await self.fs.find_one()) + + id1 = await self.fs.put(b"test1", filename="file1") + res = await self.fs.find_one() + assert res is not None + self.assertEqual(b"test1", await res.read()) + + id2 = await self.fs.put(b"test2", filename="file2", meta="data") + res1 = await self.fs.find_one(id1) + assert res1 is not None + self.assertEqual(b"test1", await res1.read()) + res2 = await self.fs.find_one(id2) + assert res2 is not None + self.assertEqual(b"test2", await res2.read()) + + res3 = await self.fs.find_one({"filename": "file1"}) + assert res3 is not None + self.assertEqual(b"test1", await res3.read()) + + res4 = await self.fs.find_one(id2) + assert res4 is not None + self.assertEqual("data", res4.meta) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.put(data, filename="f") + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.get_version("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFS((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_md5(self): + gin = self.fs.new_file() + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.get(gin._id) + self.assertIsNone(gout.md5) + + _id = await self.fs.put(b"still no md5 sum") + gout = await self.fs.get(_id) + self.assertIsNone(gout.md5) + + +class TestGridfsReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + fs = gridfs.AsyncGridFS(rsc.gfsreplica, "gfsreplicatest") + + gin = fs.new_file() + self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) + + oid = await fs.put(b"foo") + content = await (await fs.get(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + fs = gridfs.AsyncGridFS(secondary_connection.gfsreplica, "gfssecondarytest") + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await fs.put(b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + fs = gridfs.AsyncGridFS(client.gfsreplica, "gfssecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await fs.get_last_version() + with self.assertRaises(NotPrimaryError): + await fs.put("data", encoding="utf-8") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs_bucket.py b/test/asynchronous/test_gridfs_bucket.py new file mode 100644 index 0000000000..fd9b9883bf --- /dev/null +++ b/test/asynchronous/test_gridfs_bucket.py @@ -0,0 +1,597 @@ +# +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio +import datetime +import itertools +import sys +import threading +import time +from io import BytesIO +from test.asynchronous.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_joinall +from test.utils_shared import one + +import gridfs +from bson.binary import Binary +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from gridfs.errors import CorruptGridFile, NoFile +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + ConfigurationError, + NotPrimaryError, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + + +class JustWrite(ConcurrentRunner): + def __init__(self, gfs, num): + super().__init__() + self.gfs = gfs + self.num = num + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = self.gfs.open_upload_stream("test") + await file.write(b"hello") + await file.close() + + +class JustRead(ConcurrentRunner): + def __init__(self, gfs, num, results): + super().__init__() + self.gfs = gfs + self.num = num + self.results = results + self.daemon = True + + async def run(self): + for _ in range(self.num): + file = await self.gfs.open_download_stream_by_name("test") + data = await file.read() + self.results.append(data) + assert data == b"hello" + + +class TestGridfs(AsyncIntegrationTest): + fs: gridfs.AsyncGridFSBucket + alt: gridfs.AsyncGridFSBucket + + async def asyncSetUp(self): + await super().asyncSetUp() + self.fs = gridfs.AsyncGridFSBucket(self.db) + self.alt = gridfs.AsyncGridFSBucket(self.db, bucket_name="alt") + await self.cleanup_colls( + self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks + ) + + async def test_basic(self): + oid = await self.fs.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + + await self.fs.delete(oid) + with self.assertRaises(NoFile): + await self.fs.open_download_stream(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_multi_chunk_delete(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + oid = await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete(oid) + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_delete_by_name(self): + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + gfs = gridfs.AsyncGridFSBucket(self.db) + await gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(5, await self.db.fs.chunks.count_documents({})) + await gfs.delete_by_name("test_filename") + self.assertEqual(0, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + async def test_empty_file(self): + oid = await self.fs.upload_from_stream("test_filename", b"") + self.assertEqual(b"", await (await self.fs.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.fs.files.count_documents({})) + self.assertEqual(0, await self.db.fs.chunks.count_documents({})) + + raw = await self.db.fs.files.find_one() + assert raw is not None + self.assertEqual(0, raw["length"]) + self.assertEqual(oid, raw["_id"]) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) + self.assertEqual(255 * 1024, raw["chunkSize"]) + self.assertNotIn("md5", raw) + + async def test_corrupt_chunk(self): + files_id = await self.fs.upload_from_stream("test_filename", b"foobar") + await self.db.fs.chunks.update_one( + {"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}} + ) + try: + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.read() + + out = await self.fs.open_download_stream(files_id) + with self.assertRaises(CorruptGridFile): + await out.readline() + finally: + await self.fs.delete(files_id) + + async def test_upload_ensures_index(self): + chunks = self.db.fs.chunks + files = self.db.fs.files + # Ensure the collections are removed. + await chunks.drop() + await files.drop() + await self.fs.upload_from_stream("filename", b"junk") + + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (await chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", + ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + + async def test_ensure_index_shell_compat(self): + files = self.db.fs.files + for i, j in itertools.combinations_with_replacement([1, 1.0, Int64(1)], 2): + # Create the index with different numeric types (as might be done + # from the mongo shell). + shell_index = [("filename", i), ("uploadDate", j)] + await self.db.command( + "createIndexes", + files.name, + indexes=[{"key": SON(shell_index), "name": "filename_1.0_uploadDate_1.0"}], + ) + + # No error. + await self.fs.upload_from_stream("filename", b"data") + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (await files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", + ) + await files.drop() + + async def test_alt_collection(self): + oid = await self.alt.upload_from_stream("test_filename", b"hello world") + self.assertEqual(b"hello world", await (await self.alt.open_download_stream(oid)).read()) + self.assertEqual(1, await self.db.alt.files.count_documents({})) + self.assertEqual(1, await self.db.alt.chunks.count_documents({})) + + await self.alt.delete(oid) + with self.assertRaises(NoFile): + await self.alt.open_download_stream(oid) + self.assertEqual(0, await self.db.alt.files.count_documents({})) + self.assertEqual(0, await self.db.alt.chunks.count_documents({})) + + with self.assertRaises(NoFile): + await self.alt.open_download_stream("foo") + await self.alt.upload_from_stream("foo", b"hello world") + self.assertEqual( + b"hello world", await (await self.alt.open_download_stream_by_name("foo")).read() + ) + + await self.alt.upload_from_stream("mike", b"") + await self.alt.upload_from_stream("test", b"foo") + await self.alt.upload_from_stream("hello world", b"") + + self.assertEqual( + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in await self.db.alt.files.find().to_list()}, + ) + + async def test_threaded_reads(self): + await self.fs.upload_from_stream("test", b"hello") + + threads = [] + results: list = [] + for i in range(10): + threads.append(JustRead(self.fs, 10, results)) + await threads[i].start() + + await async_joinall(threads) + + self.assertEqual(100 * [b"hello"], results) + + async def test_threaded_writes(self): + threads = [] + for i in range(10): + threads.append(JustWrite(self.fs, 10)) + await threads[i].start() + + await async_joinall(threads) + + fstr = await self.fs.open_download_stream_by_name("test") + self.assertEqual(await fstr.read(), b"hello") + + # Should have created 100 versions of 'test' file + self.assertEqual(100, await self.db.fs.files.count_documents({"filename": "test"})) + + async def test_get_last_version(self): + one = await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + two = self.fs.open_upload_stream("test") + await two.write(b"bar") + await two.close() + await asyncio.sleep(0.01) + two = two._id + three = await self.fs.upload_from_stream("test", b"baz") + + self.assertEqual(b"baz", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(three) + self.assertEqual(b"bar", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(two) + self.assertEqual(b"foo", await (await self.fs.open_download_stream_by_name("test")).read()) + await self.fs.delete(one) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test") + + async def test_get_version(self): + await self.fs.upload_from_stream("test", b"foo") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"bar") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("test", b"baz") + await asyncio.sleep(0.01) + + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=0)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=1)).read() + ) + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=2)).read() + ) + + self.assertEqual( + b"baz", await (await self.fs.open_download_stream_by_name("test", revision=-1)).read() + ) + self.assertEqual( + b"bar", await (await self.fs.open_download_stream_by_name("test", revision=-2)).read() + ) + self.assertEqual( + b"foo", await (await self.fs.open_download_stream_by_name("test", revision=-3)).read() + ) + + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("test", revision=-4) + + async def test_upload_from_stream(self): + oid = await self.fs.upload_from_stream( + "test_file", BytesIO(b"hello world"), chunk_size_bytes=1 + ) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + self.assertEqual(b"hello world", await (await self.fs.open_download_stream(oid)).read()) + + async def test_upload_from_stream_with_id(self): + oid = ObjectId() + await self.fs.upload_from_stream_with_id( + oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 + ) + self.assertEqual(b"custom id", await (await self.fs.open_download_stream(oid)).read()) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @async_client_context.require_failCommand_fail_point + async def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + async with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + await gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + async def test_upload_batching(self): + async with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + await gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_open_upload_stream(self): + gin = self.fs.open_upload_stream("from_stream") + await gin.write(b"from stream") + await gin.close() + self.assertEqual(b"from stream", await (await self.fs.open_download_stream(gin._id)).read()) + + async def test_open_upload_stream_with_id(self): + oid = ObjectId() + gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") + await gin.write(b"from stream with custom id") + await gin.close() + self.assertEqual( + b"from stream with custom id", await (await self.fs.open_download_stream(oid)).read() + ) + + async def test_missing_length_iter(self): + # Test fix that guards against PHP-237 + await self.fs.upload_from_stream("empty", b"") + doc = await self.db.fs.files.find_one({"filename": "empty"}) + assert doc is not None + doc.pop("length") + await self.db.fs.files.replace_one({"_id": doc["_id"]}, doc) + fstr = await self.fs.open_download_stream_by_name("empty") + + async def iterate_file(grid_file): + async for _ in grid_file: + pass + return True + + self.assertTrue(await iterate_file(fstr)) + + async def test_gridfs_lazy_connect(self): + client = await self.async_single_client( + "badhost", connect=False, serverSelectionTimeoutMS=0 + ) + cdb = client.db + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.delete(0) + + gfs = gridfs.AsyncGridFSBucket(cdb) + with self.assertRaises(ServerSelectionTimeoutError): + await gfs.upload_from_stream("test", b"") # Still no connection. + + async def test_gridfs_find(self): + await self.fs.upload_from_stream("two", b"test2") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2+") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("one", b"test1") + await asyncio.sleep(0.01) + await self.fs.upload_from_stream("two", b"test2++") + files = self.db.fs.files + self.assertEqual(3, await files.count_documents({"filename": "two"})) + self.assertEqual(4, await files.count_documents({})) + cursor = self.fs.find( + {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 + ) + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + await cursor.rewind() + gout = await cursor.next() + self.assertEqual(b"test1", await gout.read()) + gout = await cursor.next() + self.assertEqual(b"test2+", await gout.read()) + with self.assertRaises(StopAsyncIteration): + await cursor.next() + await cursor.close() + self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) + + async def test_grid_in_non_int_chunksize(self): + # Lua, and perhaps other buggy AsyncGridFS clients, store size as a float. + data = b"data" + await self.fs.upload_from_stream("f", data) + await self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) + + self.assertEqual(data, await (await self.fs.open_download_stream_by_name("f")).read()) + + async def test_unacknowledged(self): + # w=0 is prohibited. + with self.assertRaises(ConfigurationError): + gridfs.AsyncGridFSBucket((await self.async_rs_or_single_client(w=0)).pymongo_test) + + async def test_rename(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename(_id, "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + + async def test_rename_by_name(self): + _id = await self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("first_name")).read() + ) + + await self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + await self.fs.open_download_stream_by_name("first_name") + self.assertEqual( + b"testing", await (await self.fs.open_download_stream_by_name("second_name")).read() + ) + + @patch("gridfs.asynchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) + async def test_abort(self): + gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) + await gin.write(b"test1") + await gin.write(b"test2") + await gin.write(b"test3") + self.assertEqual(3, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + await gin.abort() + self.assertTrue(gin.closed) + with self.assertRaises(ValueError): + await gin.write(b"test4") + self.assertEqual(0, await self.db.fs.chunks.count_documents({"files_id": gin._id})) + + async def test_download_to_stream(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + oid = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + oid = await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream(oid, file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_download_to_stream_by_name(self): + file1 = BytesIO(b"hello world") + # Test with one chunk. + _ = await self.fs.upload_from_stream("one_chunk", file1) + self.assertEqual(1, await self.db.fs.chunks.count_documents({})) + file2 = BytesIO() + await self.fs.download_to_stream_by_name("one_chunk", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + # Test with many chunks. + await self.db.drop_collection("fs.files") + await self.db.drop_collection("fs.chunks") + file1.seek(0) + await self.fs.upload_from_stream("many_chunks", file1, chunk_size_bytes=1) + self.assertEqual(11, await self.db.fs.chunks.count_documents({})) + + file2 = BytesIO() + await self.fs.download_to_stream_by_name("many_chunks", file2) + file1.seek(0) + file2.seek(0) + self.assertEqual(file1.read(), file2.read()) + + async def test_md5(self): + gin = self.fs.open_upload_stream("no md5") + await gin.write(b"no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + gin = self.fs.open_upload_stream_with_id(ObjectId(), "also no md5") + await gin.write(b"also no md5 sum") + await gin.close() + self.assertIsNone(gin.md5) + + gout = await self.fs.open_download_stream(gin._id) + self.assertIsNone(gout.md5) + + +class TestGridfsBucketReplicaSet(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + + @classmethod + @async_client_context.require_connection + async def asyncTearDownClass(cls): + await async_client_context.client.drop_database("gfsbucketreplica") + + async def test_gridfs_replica_set(self): + rsc = await self.async_rs_client( + w=async_client_context.w, read_preference=ReadPreference.SECONDARY + ) + + gfs = gridfs.AsyncGridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") + oid = await gfs.upload_from_stream("test_filename", b"foo") + content = await (await gfs.open_download_stream(oid)).read() + self.assertEqual(b"foo", content) + + async def test_gridfs_secondary(self): + secondary_host, secondary_port = one(await self.client.secondaries) + secondary_connection = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY + ) + + # Should detect it's connected to secondary and not attempt to + # create index + gfs = gridfs.AsyncGridFSBucket( + secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest" + ) + + # This won't detect secondary, raises error + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"foo") + + async def test_gridfs_secondary_lazy(self): + # Should detect it's connected to secondary and not attempt to + # create index. + secondary_host, secondary_port = one(await self.client.secondaries) + client = await self.async_single_client( + secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False + ) + + # Still no connection. + gfs = gridfs.AsyncGridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") + + # Connects, doesn't create index. + with self.assertRaises(NoFile): + await gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + await gfs.upload_from_stream("test_filename", b"data") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_gridfs_spec.py b/test/asynchronous/test_gridfs_spec.py new file mode 100644 index 0000000000..f3dc14fbdc --- /dev/null +++ b/test/asynchronous/test_gridfs_spec.py @@ -0,0 +1,39 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the AsyncGridFS unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_heartbeat_monitoring.py b/test/asynchronous/test_heartbeat_monitoring.py new file mode 100644 index 0000000000..aa8a205021 --- /dev/null +++ b/test/asynchronous/test_heartbeat_monitoring.py @@ -0,0 +1,98 @@ +# Copyright 2016-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitoring of the server heartbeats.""" +from __future__ import annotations + +import sys +from test.asynchronous.utils import AsyncMockPool + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, client_knobs, unittest +from test.utils_shared import HeartbeatEventListener, async_wait_until + +from pymongo.asynchronous.monitor import Monitor +from pymongo.errors import ConnectionFailure +from pymongo.hello import Hello, HelloCompat + +_IS_SYNC = False + + +class TestHeartbeatMonitoring(AsyncIntegrationTest): + async def create_mock_monitor(self, responses, uri, expected_results): + listener = HeartbeatEventListener() + with client_knobs( + heartbeat_frequency=0.1, min_heartbeat_interval=0.1, events_queue_frequency=0.1 + ): + + class MockMonitor(Monitor): + async def _check_with_socket(self, *args, **kwargs): + if isinstance(responses[1], Exception): + raise responses[1] + return Hello(responses[1]), 99 + + _ = await self.async_single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=AsyncMockPool, + connect=True, + ) + + expected_len = len(expected_results) + # Wait for *at least* expected_len number of results. The + # monitor thread may run multiple times during the execution + # of this test. + await async_wait_until( + lambda: len(listener.events) >= expected_len, "publish all events" + ) + + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) + + async def test_standalone(self): + responses = ( + ("a", 27017), + {HelloCompat.LEGACY_CMD: True, "maxWireVersion": 4, "minWireVersion": 0, "ok": 1}, + ) + uri = "mongodb://a:27017" + expected_results = ["ServerHeartbeatStartedEvent", "ServerHeartbeatSucceededEvent"] + + await self.create_mock_monitor(responses, uri, expected_results) + + async def test_standalone_error(self): + responses = (("a", 27017), ConnectionFailure("SPECIAL MESSAGE")) + uri = "mongodb://a:27017" + # _check_with_socket failing results in a second attempt. + expected_results = [ + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + "ServerHeartbeatStartedEvent", + "ServerHeartbeatFailedEvent", + ] + + await self.create_mock_monitor(responses, uri, expected_results) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_index_management.py b/test/asynchronous/test_index_management.py new file mode 100644 index 0000000000..890788fc56 --- /dev/null +++ b/test/asynchronous/test_index_management.py @@ -0,0 +1,379 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import asyncio +import os +import pathlib +import sys +import time +import uuid +from typing import Any, Mapping + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, AsyncPyMongoTestCase, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import AllowListEventListener, OvertCommandListener + +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +pytestmark = pytest.mark.search_index + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(AsyncIntegrationTest): + async def test_inputs(self): + listener = AllowListEventListener("createSearchIndexes") + client = self.simple_client(event_listeners=[listener]) + coll = client.test.test + await coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model) + with self.assertRaises(OperationFailure): + await coll.create_search_index(model_kwargs) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + await coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(AsyncPyMongoTestCase): + db_name = "test_search_index_base" + + @classmethod + def setUpClass(cls) -> None: + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + async def asyncSetUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], + ) + await self.client.drop_database(_NAME) + self.db = self.client[self.db_name] + + async def asyncTearDown(self): + await self.client.drop_database(_NAME) + + async def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = await (await coll.list_search_indexes(name)).to_list() + if len(indices) and predicate(indices[0]): + return indices[0] + await asyncio.sleep(5) + + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + async def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + await (await coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + + async def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + await coll0.insert_one({}) + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + await coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = await (await coll0.list_search_indexes()).to_list() + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = await self.wait_for_ready(coll0, name1) + index2 = await self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + async def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + await coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = await (await coll0.list_search_indexes()).to_list() + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + await asyncio.sleep(5) + + async def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + await self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + await coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + await self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = (await (await coll0.list_search_indexes(_NAME)).to_list())[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + async def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + await coll0.drop_search_index("foo") + + async def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") + ) + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = await coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = await self.wait_for_ready(coll0, name) + + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + async def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + await coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=implicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = await coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = await coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = await (await coll0.list_search_indexes(name=explicit_vector_resp)).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + await coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_json_util_integration.py b/test/asynchronous/test_json_util_integration.py new file mode 100644 index 0000000000..32312cb9d3 --- /dev/null +++ b/test/asynchronous/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test.asynchronous import AsyncIntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = False + + +class TestJsonUtilRoundtrip(AsyncIntegrationTest): + async def test_cursor(self): + db = self.db + + await db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + await db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps(await (db.test.find()).to_list())) + for doc in docs: + self.assertIn(doc, reloaded_docs) diff --git a/test/asynchronous/test_load_balancer.py b/test/asynchronous/test_load_balancer.py new file mode 100644 index 0000000000..17d85841f9 --- /dev/null +++ b/test/asynchronous/test_load_balancer.py @@ -0,0 +1,194 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Load Balancer unified spec tests.""" +from __future__ import annotations + +import asyncio +import gc +import os +import pathlib +import sys +import threading +from asyncio import Event +from test.asynchronous.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.asynchronous.utils import async_get_pool + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import ( + async_wait_until, + create_async_event, +) + +_IS_SYNC = False + +pytestmark = pytest.mark.load_balancer + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +class TestLB(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + async def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") + pool = await async_get_pool(self.client) + n_conns = len(pool.conns) + await self.db.test.find_one({}) + self.assertEqual(len(pool.conns), n_conns) + await (await self.db.test.aggregate([{"$limit": 1}])).to_list() + self.assertEqual(len(pool.conns), n_conns) + + @async_client_context.require_load_balancer + async def test_unpin_committed_transaction(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + async with client.start_session() as session: + async with await session.start_transaction(): + self.assertEqual(pool.active_sockets, 0) + await coll.insert_one({}, session=session) + self.assertEqual(pool.active_sockets, 1) # Pinned. + self.assertEqual(pool.active_sockets, 1) # Still pinned. + self.assertEqual(pool.active_sockets, 0) # Unpinned. + + @async_client_context.require_failCommand_fail_point + async def test_cursor_gc(self): + async def create_resource(coll): + cursor = coll.find({}, batch_size=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + @async_client_context.require_failCommand_fail_point + async def test_command_cursor_gc(self): + async def create_resource(coll): + cursor = await coll.aggregate([], batchSize=3) + await anext(cursor) + return cursor + + await self._test_no_gc_deadlock(create_resource) + + async def _test_no_gc_deadlock(self, create_resource): + client = await self.async_rs_client() + pool = await async_get_pool(client) + coll = client[self.db.name].test + await coll.insert_many([{} for _ in range(10)]) + self.assertEqual(pool.active_sockets, 0) + # Cause the initial find attempt to fail to induce a reference cycle. + args = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find", "aggregate"], + "closeConnection": True, + }, + } + async with self.fail_point(args): + resource = await create_resource(coll) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the resource while the pool is locked to ensure we + # don't deadlock. + del resource + # On PyPy it can take a few rounds to collect the cursor. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await coll.delete_many({}) + + @async_client_context.require_transactions + async def test_session_gc(self): + client = await self.async_rs_client() + pool = await async_get_pool(client) + session = client.start_session() + await session.start_transaction() + await client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server + self.addAsyncCleanup(self.client.admin.command, "killSessions", [session.session_id]) + if async_client_context.load_balancer: + self.assertEqual(pool.active_sockets, 1) # Pinned. + + task = PoolLocker(pool) + await task.start() + self.assertTrue(await task.wait(task.locked, 5), "timed out") + # Garbage collect the session while the pool is locked to ensure we + # don't deadlock. + del session + # On PyPy it can take a few rounds to collect the session. + for _ in range(3): + gc.collect() + task.unlock.set() + await task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) + + await async_wait_until(lambda: pool.active_sockets == 0, "return socket") + # Run another operation to ensure the socket still works. + await client[self.db.name].test.delete_many({}) + + +class PoolLocker(ExceptionCatchingTask): + def __init__(self, pool): + super().__init__(target=self.lock_pool) + self.pool = pool + self.daemon = True + self.locked = create_async_event() + self.unlock = create_async_event() + + async def lock_pool(self): + async with self.pool.lock: + self.locked.set() + # Wait for the unlock flag. + unlock_pool = await self.wait(self.unlock, 10) + if not unlock_pool: + raise Exception("timed out waiting for unlock signal: deadlock?") + + async def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + await asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_locks.py b/test/asynchronous/test_locks.py new file mode 100644 index 0000000000..e5a0adfee6 --- /dev/null +++ b/test/asynchronous/test_locks.py @@ -0,0 +1,462 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for lock.py""" +from __future__ import annotations + +import asyncio +import sys +import unittest + +from pymongo.lock import _async_create_condition, _async_create_lock + +sys.path[0:0] = [""] + +if sys.version_info < (3, 13): + # Tests adapted from: https://github.com/python/cpython/blob/v3.13.0rc2/Lib/test/test_asyncio/test_locks.py + # Includes tests for: + # - https://github.com/python/cpython/issues/111693 + # - https://github.com/python/cpython/issues/112202 + class TestConditionStdlib(unittest.IsolatedAsyncioTestCase): + async def test_wait(self): + cond = _async_create_condition(_async_create_lock()) + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + await cond.acquire() + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + await cond.acquire() + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertFalse(cond.locked()) + + self.assertTrue(await cond.acquire()) + cond.notify() + await asyncio.sleep(0) + self.assertEqual([], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.notify(2) + await asyncio.sleep(0) + self.assertEqual([1], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2], result) + self.assertTrue(cond.locked()) + + cond.release() + await asyncio.sleep(0) + self.assertEqual([1, 2, 3], result) + self.assertTrue(cond.locked()) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_wait_cancel(self): + cond = _async_create_condition(_async_create_lock()) + await cond.acquire() + + wait = asyncio.create_task(cond.wait()) + asyncio.get_running_loop().call_soon(wait.cancel) + with self.assertRaises(asyncio.CancelledError): + await wait + self.assertFalse(cond._waiters) + self.assertTrue(cond.locked()) + + async def test_wait_cancel_contested(self): + cond = _async_create_condition(_async_create_lock()) + + await cond.acquire() + self.assertTrue(cond.locked()) + + wait_task = asyncio.create_task(cond.wait()) + await asyncio.sleep(0) + self.assertFalse(cond.locked()) + + # Notify, but contest the lock before cancelling + await cond.acquire() + self.assertTrue(cond.locked()) + cond.notify() + asyncio.get_running_loop().call_soon(wait_task.cancel) + asyncio.get_running_loop().call_soon(cond.release) + + try: + await wait_task + except asyncio.CancelledError: + # Should not happen, since no cancellation points + pass + + self.assertTrue(cond.locked()) + + async def test_wait_cancel_after_notify(self): + # See bpo-32841 + waited = False + + cond = _async_create_condition(_async_create_lock()) + + async def wait_on_cond(): + nonlocal waited + async with cond: + waited = True # Make sure this area was reached + await cond.wait() + + waiter = asyncio.create_task(wait_on_cond()) + await asyncio.sleep(0) # Start waiting + + await cond.acquire() + cond.notify() + await asyncio.sleep(0) # Get to acquire() + waiter.cancel() + await asyncio.sleep(0) # Activate cancellation + cond.release() + await asyncio.sleep(0) # Cancellation should occur + + self.assertTrue(waiter.cancelled()) + self.assertTrue(waited) + + async def test_wait_unacquired(self): + cond = _async_create_condition(_async_create_lock()) + with self.assertRaises(RuntimeError): + await cond.wait() + + async def test_wait_for(self): + cond = _async_create_condition(_async_create_lock()) + presult = False + + def predicate(): + return presult + + result = [] + + async def c1(result): + await cond.acquire() + if await cond.wait_for(predicate): + result.append(1) + cond.release() + return True + + t = asyncio.create_task(c1(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([], result) + + presult = True + await cond.acquire() + cond.notify() + cond.release() + await asyncio.sleep(0) + self.assertEqual([1], result) + + self.assertTrue(t.done()) + self.assertTrue(t.result()) + + async def test_wait_for_unacquired(self): + cond = _async_create_condition(_async_create_lock()) + + # predicate can return true immediately + res = await cond.wait_for(lambda: [1, 2, 3]) + self.assertEqual([1, 2, 3], res) + + with self.assertRaises(RuntimeError): + await cond.wait_for(lambda: False) + + async def test_notify(self): + cond = _async_create_condition(_async_create_lock()) + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + async def c3(result): + async with cond: + if await cond.wait(): + result.append(3) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + t3 = asyncio.create_task(c3(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify(1) + await asyncio.sleep(1) + self.assertEqual([1], result) + + async with cond: + cond.notify(1) + cond.notify(2048) + await asyncio.sleep(1) + self.assertEqual([1, 2, 3], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + self.assertTrue(t3.done()) + self.assertTrue(t3.result()) + + async def test_notify_all(self): + cond = _async_create_condition(_async_create_lock()) + + result = [] + + async def c1(result): + async with cond: + if await cond.wait(): + result.append(1) + return True + + async def c2(result): + async with cond: + if await cond.wait(): + result.append(2) + return True + + t1 = asyncio.create_task(c1(result)) + t2 = asyncio.create_task(c2(result)) + + await asyncio.sleep(0) + self.assertEqual([], result) + + async with cond: + cond.notify_all() + await asyncio.sleep(1) + self.assertEqual([1, 2], result) + + self.assertTrue(t1.done()) + self.assertTrue(t1.result()) + self.assertTrue(t2.done()) + self.assertTrue(t2.result()) + + async def test_context_manager(self): + cond = _async_create_condition(_async_create_lock()) + self.assertFalse(cond.locked()) + async with cond: + self.assertTrue(cond.locked()) + self.assertFalse(cond.locked()) + + async def test_timeout_in_block(self): + condition = _async_create_condition(_async_create_lock()) + async with condition: + with self.assertRaises(asyncio.TimeoutError): + await asyncio.wait_for(condition.wait(), timeout=0.5) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_wakeup(self): + # Test that a cancelled error, received when awaiting wakeup, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf( + sys.version_info < (3, 11), "raising the same cancelled error requires Python>=3.11" + ) + async def test_cancelled_error_re_aquire(self): + # Test that a cancelled error, received when re-aquiring lock, + # will be re-raised un-modified. + wake = False + raised = None + cond = _async_create_condition(_async_create_lock()) + + async def func(): + nonlocal raised + async with cond: + with self.assertRaises(asyncio.CancelledError) as err: + await cond.wait_for(lambda: wake) + raised = err.exception + raise raised + + task = asyncio.create_task(func()) + await asyncio.sleep(0) + # Task is waiting on the condition + await cond.acquire() + wake = True + cond.notify() + await asyncio.sleep(0) + # Task is now trying to re-acquire the lock, cancel it there. + task.cancel(msg="foo") # type: ignore[call-arg] + cond.release() + with self.assertRaises(asyncio.CancelledError) as err: + await task + self.assertEqual(err.exception.args, ("foo",)) + # We should have got the _same_ exception instance as the one + # originally raised. + self.assertIs(err.exception, raised) + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is awaiting initial + # wakeup on the wakeup queue. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # Cancel it while it is awaiting to be run. + # This cancellation could come from the outside + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + @unittest.skipIf(sys.version_info < (3, 11), "asyncio.timeout requires Python>=3.11") + async def test_cancelled_wakeup_relock(self): + # Test that a task cancelled at the "same" time as it is woken + # up as part of a Condition.notify() does not result in a lost wakeup. + # This test simulates a cancel while the target task is acquiring the lock + # again. + condition = _async_create_condition(_async_create_lock()) + state = 0 + + async def consumer(): + nonlocal state + async with condition: + while True: + await condition.wait_for(lambda: state != 0) + if state < 0: + return + state -= 1 + + # create two consumers + c = [asyncio.create_task(consumer()) for _ in range(2)] + # wait for them to settle + await asyncio.sleep(0.1) + async with condition: + # produce one item and wake up one + state += 1 + condition.notify(1) + + # now we sleep for a bit. This allows the target task to wake up and + # settle on re-aquiring the lock + await asyncio.sleep(0) + + # Cancel it while awaiting the lock + # This cancel could come the outside. + c[0].cancel() + + # now wait for the item to be consumed + # if it doesn't means that our "notify" didn"t take hold. + # because it raced with a cancel() + try: + async with asyncio.timeout(1): + await condition.wait_for(lambda: state == 0) + except TimeoutError: + pass + self.assertEqual(state, 0) + + # clean up + state = -1 + condition.notify_all() + await c[1] + + if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_logger.py b/test/asynchronous/test_logger.py new file mode 100644 index 0000000000..d024735fd8 --- /dev/null +++ b/test/asynchronous/test_logger.py @@ -0,0 +1,145 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from test import unittest +from test.asynchronous import AsyncIntegrationTest, async_client_context +from unittest.mock import patch + +from bson import json_util +from pymongo.errors import OperationFailure +from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH + +_IS_SYNC = False + + +# https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests +class TestLogger(AsyncIntegrationTest): + async def test_default_truncation_limit(self): + docs = [{"x": "y"} for _ in range(100)] + db = self.db + + with patch.dict("os.environ"): + os.environ.pop("MONGOB_LOG_MAX_DOCUMENT_LENGTH", None) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.test.insert_many(docs) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.test.find({}).to_list() + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + async def test_configured_truncation_limit(self): + cmd = {"hello": True} + db = self.db + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": "5"}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await db.command(cmd) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), 5 + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) + with self.assertRaises(OperationFailure): + await db.command({"notARealCommand": True}) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) + self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) + + async def test_truncation_multi_byte_codepoints(self): + document_lengths = ["20000", "20001", "20002"] + multi_byte_char_str_len = 50_000 + str_to_repeat = "界" + + multi_byte_char_str = "" + for i in range(multi_byte_char_str_len): + multi_byte_char_str += str_to_repeat + + for length in document_lengths: + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.insert_one({"x": multi_byte_char_str}) + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] + + cmd_started_log = cmd_started_log[:-3] + last_3_bytes = cmd_started_log.encode()[-3:].decode() + + self.assertEqual(last_3_bytes, str_to_repeat) + + async def test_logging_without_listeners(self): + c = await self.async_single_client() + self.assertEqual(len(c._event_listeners.event_listeners()), 0) + with self.assertLogs("pymongo.connection", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.serverSelection", level="DEBUG") as cm: + await c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + + @async_client_context.require_failCommand_fail_point + async def test_logging_retry_read_attempts(self): + await self.db.test.insert_one({"x": "1"}) + + async with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + @async_client_context.require_failCommand_fail_point + @async_client_context.require_retryable_writes + async def test_logging_retry_write_attempts(self): + async with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + await self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_max_staleness.py b/test/asynchronous/test_max_staleness.py new file mode 100644 index 0000000000..b6e15f9158 --- /dev/null +++ b/test/asynchronous/test_max_staleness.py @@ -0,0 +1,149 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test maxStalenessSeconds support.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +import warnings +from pathlib import Path + +from pymongo import AsyncMongoClient +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest +from test.asynchronous.utils_selection_tests import create_selection_tests + +from pymongo.errors import ConfigurationError +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestMaxStaleness(AsyncPyMongoTestCase): + async def test_max_staleness(self): + client = self.simple_client() + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary") + self.assertEqual(-1, client.read_preference.max_staleness) + + # These tests are specified in max-staleness-tests.rst. + with self.assertRaises(ConfigurationError): + # Default read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?maxStalenessSeconds=120") + + with self.assertRaises(ConfigurationError): + # Read pref "primary" can't be used with max staleness. + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) + self.assertEqual(120, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + self.assertEqual(1, client.read_preference.max_staleness) + + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + self.assertEqual(-1, client.read_preference.max_staleness) + + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") + self.assertEqual(-1, client.read_preference.max_staleness) + + with self.assertRaises(TypeError): + # Prohibit None. + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") + + async def test_max_staleness_float(self): + with self.assertRaises(TypeError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + + self.assertIn("must be an integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be an integer", str(ctx[0])) + + async def test_max_staleness_zero(self): + # Zero is too small. + with self.assertRaises(ValueError) as ctx: + await self.async_rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + + self.assertIn("must be a positive integer", str(ctx.exception)) + + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) + + # Option was ignored. + self.assertEqual(-1, client.read_preference.max_staleness) + self.assertIn("must be a positive integer", str(ctx[0])) + + @async_client_context.require_replica_set + async def test_last_write_date(self): + # From max-staleness-tests.rst, "Parse lastWriteDate". + client = await self.async_rs_or_single_client(heartbeatFrequencyMS=500) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + first = server.description.last_write_date + self.assertTrue(first) + # The first last_write_date may correspond to a internal server write, + # sleep so that the next write does not occur within the same second. + await asyncio.sleep(1) + await client.pymongo_test.test.insert_one({}) + # Wait for the server description to be updated. + await asyncio.sleep(1) + server = await client._topology.select_server(writable_server_selector, _Op.TEST) + second = server.description.last_write_date + assert first is not None + + assert second is not None + self.assertGreater(second, first) + self.assertLess(second, first + 10) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_mongos_load_balancing.py b/test/asynchronous/test_mongos_load_balancing.py new file mode 100644 index 0000000000..97170aa9e0 --- /dev/null +++ b/test/asynchronous/test_mongos_load_balancing.py @@ -0,0 +1,199 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test AsyncMongoClient's mongos load balancing using a mock.""" +from __future__ import annotations + +import asyncio +import sys +import threading +from test.asynchronous.helpers import ConcurrentRunner + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncMockClientTest, async_client_context, connected, unittest +from test.asynchronous.pymongo_mocks import AsyncMockClient +from test.utils_shared import async_wait_until + +from pymongo.errors import AutoReconnect, InvalidOperation +from pymongo.server_selectors import writable_server_selector +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + + +class SimpleOp(ConcurrentRunner): + def __init__(self, client): + super().__init__() + self.client = client + self.passed = False + + async def run(self): + await self.client.db.command("ping") + self.passed = True # No exception raised. + + +async def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed + + +async def writable_addresses(topology): + return { + server.description.address + for server in await topology.select_servers(writable_server_selector, _Op.TEST) + } + + +class TestMongosLoadBalancing(AsyncMockClientTest): + @async_client_context.require_connection + @async_client_context.require_no_load_balancer + async def asyncSetUp(self): + await super().asyncSetUp() + + def mock_client(self, **kwargs): + mock_client = AsyncMockClient( + standalones=[], + members=[], + mongoses=["a:1", "b:2", "c:3"], + host="a:1,b:2,c:3", + connect=False, + **kwargs, + ) + self.addAsyncCleanup(mock_client.aclose) + + # Latencies in seconds. + mock_client.mock_rtts["a:1"] = 0.020 + mock_client.mock_rtts["b:2"] = 0.025 + mock_client.mock_rtts["c:3"] = 0.045 + return mock_client + + async def test_lazy_connect(self): + # While connected() ensures we can trigger connection from the main + # thread and wait for the monitors, this test triggers connection from + # several threads at once to check for data races. + nthreads = 10 + client = self.mock_client() + self.assertEqual(0, len(client.nodes)) + + # Trigger initial connection. + await do_simple_op(client, nthreads) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + async def test_failover(self): + ntasks = 10 + client = await connected(self.mock_client(localThresholdMS=0.001)) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Our chosen mongos goes down. + client.kill_host("a:1") + + # Trigger failover to higher-latency nodes. AutoReconnect should be + # raised at most once in each thread. + passed = [] + + async def f(): + try: + await client.db.command("ping") + except AutoReconnect: + # Second attempt succeeds. + await client.db.command("ping") + + passed.append(True) + + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: + await t.start() + + for t in tasks: + await t.join() + + self.assertEqual(ntasks, len(passed)) + + # Down host removed from list. + self.assertEqual(2, len(client.nodes)) + + async def test_local_threshold(self): + client = await connected(self.mock_client(localThresholdMS=30)) + self.assertEqual(30, client.options.local_threshold_ms) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + topology = client._topology + + # All are within a 30-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, await writable_addresses(topology)) + + # No error + await client.admin.command("ping") + + client = await connected(self.mock_client(localThresholdMS=0)) + self.assertEqual(0, client.options.local_threshold_ms) + # No error + await client.db.command("ping") + # Our chosen mongos goes down. + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) + try: + await client.db.command("ping") + except: + pass + + # We eventually connect to a new mongos. + async def connect_to_new_mongos(): + try: + return await client.db.command("ping") + except AutoReconnect: + pass + + await async_wait_until(connect_to_new_mongos, "connect to a new mongos") + + async def test_load_balancing(self): + # Although the server selection JSON tests already prove that + # select_servers works for sharded topologies, here we do an end-to-end + # test of discovering servers' round trip times and configuring + # localThresholdMS. + client = await connected(self.mock_client()) + await async_wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") + + # Prohibited for topology type Sharded. + with self.assertRaises(InvalidOperation): + await client.address + + topology = client._topology + self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) + + # a and b are within the 15-ms latency window, see self.mock_client(). + self.assertEqual({("a", 1), ("b", 2)}, await writable_addresses(topology)) + + client.mock_rtts["a:1"] = 0.045 + + # Discover only b is within latency window. + async def predicate(): + return {("b", 2)} == await writable_addresses(topology) + + await async_wait_until( + predicate, + 'discover server "a" is too far', + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_monitor.py b/test/asynchronous/test_monitor.py new file mode 100644 index 0000000000..dde8976c06 --- /dev/null +++ b/test/asynchronous/test_monitor.py @@ -0,0 +1,125 @@ +# Copyright 2014-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the monitor module.""" +from __future__ import annotations + +import asyncio +import gc +import subprocess +import sys +import warnings +from functools import partial + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, connected, unittest +from test.asynchronous.utils import ( + async_wait_until, +) +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched + +from pymongo.periodic_executor import _EXECUTORS + +_IS_SYNC = False + + +def unregistered(ref): + gc.collect() + return ref not in _EXECUTORS + + +def get_executors(client): + executors = [] + for server in client._topology._servers.values(): + executors.append(server._monitor._executor) + executors.append(server._monitor._rtt_monitor._executor) + executors.append(client._kill_cursors_executor) + executors.append(client._topology._Topology__events_executor) + return [e for e in executors if e is not None] + + +class TestMonitor(AsyncIntegrationTest): + async def create_client(self): + listener = ServerAndTopologyEventListener() + client = await self.unmanaged_async_single_client(event_listeners=[listener]) + await connected(client) + return client + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) + async def test_cleanup_executors_on_client_del(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + + del executors + del client + + for ref, name in executor_refs: + await async_wait_until( + partial(unregistered, ref), f"unregister executor: {name}", timeout=5 + ) + + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call AsyncMongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + await async_wait_until(resource_warning_caught, "catch resource warning") + + async def test_cleanup_executors_on_client_close(self): + client = await self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + await client.close() + + for executor in executors: + await async_wait_until( + lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5 + ) + + @async_client_context.require_sync + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the AsyncMongoClient spawns a new thread + on process shutdown.""" + command = [ + sys.executable, + "-c", + "from pymongo import AsyncMongoClient; c = AsyncMongoClient()", + ] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_monitoring.py b/test/asynchronous/test_monitoring.py new file mode 100644 index 0000000000..6a9a5b8da7 --- /dev/null +++ b/test/asynchronous/test_monitoring.py @@ -0,0 +1,1270 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import copy +import datetime +import sys +import time +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + async_wait_until, +) + +from bson.int64 import Int64 +from bson.objectid import ObjectId +from bson.son import SON +from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure +from pymongo.read_preferences import ReadPreference +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class AsyncTestCommandMonitoring(AsyncIntegrationTest): + listener: EventListener + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + + @async_client_context.require_connection + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.listener.reset() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=False + ) + + async def test_started_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + async def test_succeeded_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual("ping", succeeded.command_name) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(1, succeeded.reply.get("ok")) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) + + async def test_failed_simple(self): + try: + await self.client.pymongo_test.command("oops!") + except OperationFailure: + pass + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("oops!", failed.command_name) + self.assertEqual(await self.client.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + + async def test_find_one(self): + await self.client.pymongo_test.test.find_one() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + async def test_find_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) + + self.listener.reset() + # Next batch. Exhausting the cursor could cause a getMore + # that returns id of 0 and no results. + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + csr = succeeded.reply["cursor"] + self.assertEqual(csr["id"], cursor_id) + self.assertEqual(csr["ns"], "pymongo_test.test") + self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_with_explain(self): + cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + res = await coll.find().explain() + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(cmd, started.command) + self.assertEqual("explain", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("explain", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(await self.client.address, succeeded.connection_id) + self.assertEqual(res, succeeded.reply) + + async def _test_find_options(self, query, expected_cmd): + coll = self.client.pymongo_test.test + await coll.drop() + await coll.create_index("x") + await coll.insert_many([{"x": i} for i in range(5)]) + + # Test that we publish the unwrapped command. + self.listener.reset() + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + + cursor = coll.find(**query) + + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(expected_cmd, started.command) + self.assertEqual("find", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(await self.client.address, succeeded.connection_id) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_find_options(self): + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } + + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } + + if async_client_context.version < (4, 1, 0, -1): + query["max_scan"] = 10 + cmd["maxScan"] = 10 + + await self._test_find_options(query, cmd) + + @async_client_context.require_version_max(3, 7, 2) + async def test_find_snapshot(self): + # Test "snapshot" parameter separately, can't combine with "sort". + query = {"filter": {}, "snapshot": True} + + cmd = {"find": "test", "filter": {}, "snapshot": True} + + await self._test_find_options(query, cmd) + + async def test_command_and_get_more(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) + self.listener.reset() + coll = self.client.pymongo_test.test + # Test that we publish the unwrapped command. + if await self.client.is_mongos: + coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) + cursor = await coll.aggregate([{"$project": {"_id": False, "x": 1}}], batchSize=4) + for _ in range(4): + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [ + ("aggregate", "test"), + ("pipeline", [{"$project": {"_id": False, "x": 1}}]), + ("cursor", {"batchSize": 4}), + ] + ), + started.command, + ) + self.assertEqual("aggregate", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("aggregate", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_cursor = { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{"x": 1} for _ in range(4)], + } + self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) + + self.listener.reset() + await anext(cursor) + try: + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), + started.command, + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("getMore", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "nextBatch": [{"x": 1} for _ in range(4)], + }, + "ok": 1.0, + } + self.assertEqualReply(expected_result, succeeded.reply) + finally: + # Exhaust the cursor to avoid kill cursors. + tuple(await cursor.to_list()) + + async def test_get_more_failure(self): + address = await self.client.address + coll = self.client.pymongo_test.test + cursor_id = Int64(12345) + cursor_doc = {"id": cursor_id, "firstBatch": [], "ns": coll.full_name} + cursor = AsyncCommandCursor(coll, cursor_doc, address) + try: + await anext(cursor) + except Exception: + pass + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test")]), started.command + ) + self.assertEqual("getMore", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual("getMore", failed.command_name) + self.assertIsInstance(failed.request_id, int) + self.assertEqual(cursor.address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + + @async_client_context.require_replica_set + @async_client_context.require_secondaries_count(1) + async def test_not_primary_error(self): + address = next(iter(await async_client_context.client.secondaries)) + client = await self.async_single_client(*address, event_listeners=[self.listener]) + # Clear authentication command results from the listener. + await client.admin.command("ping") + self.listener.reset() + error = None + try: + await client.pymongo_test.test.find_one_and_delete({}) + except NotPrimaryError as exc: + error = exc.errors + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertEqual("findAndModify", failed.command_name) + self.assertEqual(address, failed.connection_id) + self.assertEqual(0, failed.failure.get("ok")) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) + self.assertEqual(error, failed.failure) + + @async_client_context.require_no_mongos + async def test_exhaust(self): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) + self.listener.reset() + cursor = self.client.pymongo_test.test.find( + projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST + ) + await anext(cursor) + cursor_id = cursor.cursor_id + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON( + [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] + ), + started.command, + ) + self.assertEqual("find", started.command_name) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("find", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertEqual(cursor.address, succeeded.connection_id) + expected_result = { + "cursor": { + "id": cursor_id, + "ns": "pymongo_test.test", + "firstBatch": [{} for _ in range(5)], + }, + "ok": 1, + } + self.assertEqualReply(expected_result, succeeded.reply) + + self.listener.reset() + tuple(await cursor.to_list()) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertIsInstance(event, monitoring.CommandStartedEvent) + self.assertEqualCommand( + SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), + event.command, + ) + self.assertEqual("getMore", event.command_name) + self.assertEqual(cursor.address, event.connection_id) + self.assertEqual("pymongo_test", event.database_name) + self.assertIsInstance(event.request_id, int) + for event in self.listener.succeeded_events: + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) + self.assertEqual("getMore", event.command_name) + self.assertIsInstance(event.request_id, int) + self.assertEqual(cursor.address, event.connection_id) + # Last getMore receives a response with cursor id 0. + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) + + async def test_kill_cursors(self): + with client_knobs(kill_cursor_frequency=0.01): + await self.client.pymongo_test.test.drop() + await self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) + cursor = self.client.pymongo_test.test.find().batch_size(5) + await anext(cursor) + cursor_id = cursor.cursor_id + self.listener.reset() + await cursor.close() + await asyncio.sleep(2) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn(cursor_id, started.command["cursors"]) + self.assertEqual("killCursors", started.command_name) + self.assertIs(type(started.connection_id), tuple) + self.assertEqual(cursor.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual("killCursors", succeeded.command_name) + self.assertIsInstance(succeeded.request_id, int) + self.assertIs(type(succeeded.connection_id), tuple) + self.assertEqual(cursor.address, succeeded.connection_id) + # There could be more than one cursor_id here depending on + # when the thread last ran. + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] + ) + + async def test_non_bulk_writes(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + # Implied write concern insert_one + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # Unacknowledged insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=0)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 0}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqualReply(succeeded.reply, {"ok": 1}) + + # Explicit write concern insert_one + self.listener.reset() + coll = coll.with_options(write_concern=WriteConcern(w=1)) + res = await coll.insert_one({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": res.inserted_id, "x": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_many + self.listener.reset() + res = await coll.delete_many({"x": 1}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 1}), ("limit", 0)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(res.deleted_count, reply.get("n")) + + # replace_one + self.listener.reset() + oid = ObjectId() + res = await coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": oid}), + ("u", {"_id": oid, "x": 1}), + ("multi", False), + ("upsert", True), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) + + # update_one + self.listener.reset() + res = await coll.update_one({"x": 1}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 1}), + ("u", {"$inc": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # update_many + self.listener.reset() + res = await coll.update_many({"x": 2}, {"$inc": {"x": 1}}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"x": 2}), + ("u", {"$inc": {"x": 1}}), + ("multi", True), + ("upsert", False), + ] + ) + ], + ), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("update", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + # delete_one + self.listener.reset() + _ = await coll.delete_one({"x": 3}) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"x": 3}), ("limit", 1)])]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("delete", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(1, reply.get("n")) + + self.assertEqual(0, await coll.count_documents({})) + + # write errors + await coll.insert_one({"_id": 1}) + try: + self.listener.reset() + await coll.insert_one({"_id": 1}) + except OperationFailure: + pass + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON( + [ + ("insert", coll.name), + ("ordered", True), + ("documents", [{"_id": 1}]), + ("writeConcern", {"w": 1}), + ] + ) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("insert", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + reply = succeeded.reply + self.assertEqual(1, reply.get("ok")) + self.assertEqual(0, reply.get("n")) + errors = reply.get("writeErrors") + self.assertIsInstance(errors, list) + error = errors[0] + self.assertEqual(0, error.get("index")) + self.assertIsInstance(error.get("code"), int) + self.assertIsInstance(error.get("errmsg"), str) + + async def test_insert_many(self): + # This always uses the bulk API. + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + big = "x" * (1024 * 1024 * 4) + docs = [{"_id": i, "big": big} for i in range(6)] + await coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + count = 0 + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + reply = succeed.reply + self.assertEqual(1, reply.get("ok")) + count += reply.get("n", 0) + self.assertEqual(documents, docs) + self.assertEqual(6, count) + + async def test_insert_many_unacknowledged(self): + coll = self.client.pymongo_test.test + await coll.drop() + unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) + self.listener.reset() + + # Force two batches on legacy servers. + big = "x" * (1024 * 1024 * 12) + docs = [{"_id": i, "big": big} for i in range(6)] + await unack_coll.insert_many(docs) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + documents = [] + operation_id = started[0].operation_id + self.assertIsInstance(operation_id, int) + for start, succeed in zip(started, succeeded): + self.assertIsInstance(start, monitoring.CommandStartedEvent) + cmd = sanitize_cmd(start.command) + cmd.pop("writeConcern", None) + self.assertEqual(["insert", "ordered", "documents"], list(cmd.keys())) + self.assertEqual(coll.name, cmd["insert"]) + self.assertIs(True, cmd["ordered"]) + documents.extend(cmd["documents"]) + self.assertEqual("pymongo_test", start.database_name) + self.assertEqual("insert", start.command_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + self.assertEqual(1, succeed.reply.get("ok")) + self.assertEqual(documents, docs) + + async def check(): + return await coll.count_documents({}) == 6 + + await async_wait_until(check, "insert documents with w=0") + + async def test_bulk_write(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + UpdateOne({"_id": 1}, {"$set": {"x": 1}}), + DeleteOne({"_id": 1}), + ] + ) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + self.assertEqual(3, len(pairs)) + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + + expected = SON([("insert", coll.name), ("ordered", True), ("documents", [{"_id": 1}])]) + self.assertEqualCommand(expected, started[0].command) + expected = SON( + [ + ("update", coll.name), + ("ordered", True), + ( + "updates", + [ + SON( + [ + ("q", {"_id": 1}), + ("u", {"$set": {"x": 1}}), + ("multi", False), + ("upsert", False), + ] + ) + ], + ), + ] + ) + self.assertEqualCommand(expected, started[1].command) + expected = SON( + [ + ("delete", coll.name), + ("ordered", True), + ("deletes", [SON([("q", {"_id": 1}), ("limit", 1)])]), + ] + ) + self.assertEqualCommand(expected, started[2].command) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_network_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_network_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + }, + } + async with self.fail_point(insert_network_error): + with self.assertRaises(AutoReconnect): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["errtype"], "AutoReconnect") + self.assertTrue(event.failure["errmsg"]) + + @async_client_context.require_failCommand_fail_point + async def test_bulk_write_command_error(self): + coll = self.client.pymongo_test.test + self.listener.reset() + + insert_command_error = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "await acloseAsyncConnection": False, + "errorCode": 10107, # Not primary + }, + } + async with self.fail_point(insert_command_error): + with self.assertRaises(NotPrimaryError): + await coll.bulk_write([InsertOne({"_id": 1})]) + failed = self.listener.failed_events + self.assertEqual(1, len(failed)) + event = failed[0] + self.assertEqual(event.command_name, "insert") + self.assertIsInstance(event.failure, dict) + self.assertEqual(event.failure["code"], 10107) + self.assertTrue(event.failure["errmsg"]) + + async def test_write_errors(self): + coll = self.client.pymongo_test.test + await coll.drop() + self.listener.reset() + + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + InsertOne({"_id": 1}), + DeleteOne({"_id": 1}), + ], + ordered=False, + ) + except OperationFailure: + pass + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) + operation_id = started[0].operation_id + pairs = list(zip(started, succeeded)) + errors = [] + for start, succeed in pairs: + self.assertIsInstance(start, monitoring.CommandStartedEvent) + self.assertEqual("pymongo_test", start.database_name) + self.assertIsInstance(start.request_id, int) + self.assertEqual(await self.client.address, start.connection_id) + self.assertIsInstance(succeed, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeed.duration_micros, int) + self.assertEqual(start.command_name, succeed.command_name) + self.assertEqual(start.request_id, succeed.request_id) + self.assertEqual(start.connection_id, succeed.connection_id) + self.assertEqual(start.operation_id, operation_id) + self.assertEqual(succeed.operation_id, operation_id) + if "writeErrors" in succeed.reply: + errors.extend(succeed.reply["writeErrors"]) + + self.assertEqual(2, len(errors)) + fields = {"index", "code", "errmsg"} + for error in errors: + self.assertLessEqual(fields, set(error)) + + async def test_first_batch_helper(self): + # Regardless of server version and use of helpers._first_batch + # this test should still pass. + self.listener.reset() + tuple(await (await self.client.pymongo_test.test.list_indexes()).to_list()) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + expected = SON([("listIndexes", "test"), ("cursor", {})]) + self.assertEqualCommand(expected, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("listIndexes", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await self.client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) + + self.listener.reset() + + @async_client_context.require_version_max(6, 1, 99) + async def test_sensitive_commands(self): + listener = EventListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners + + listener.reset() + cmd = SON([("getnonce", 1)]) + listeners.publish_command_start(cmd, "pymongo_test", 12345, await client.address, None) # type: ignore[arg-type] + delta = datetime.timedelta(milliseconds=100) + listeners.publish_command_success( + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + await self.client.address, # type: ignore[arg-type] + None, + database_name="pymongo_test", + ) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqual({}, started.command) + self.assertEqual("pymongo_test", started.database_name) + self.assertEqual("getnonce", started.command_name) + self.assertIsInstance(started.request_id, int) + self.assertEqual(await client.address, started.connection_id) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertEqual(succeeded.duration_micros, 100000) + self.assertEqual(started.command_name, succeeded.command_name) + self.assertEqual(started.request_id, succeeded.request_id) + self.assertEqual(started.connection_id, succeeded.connection_id) + self.assertEqual({}, succeeded.reply) + + +class AsyncTestGlobalListener(AsyncIntegrationTest): + listener: EventListener + saved_listeners: Any + + @classmethod + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() + # We plan to call register(), which internally modifies _LISTENERS. + cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) + monitoring.register(cls.listener) + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener.reset() + self.client = await self.async_single_client() + # Get one (authenticated) socket in the pool. + await self.client.pymongo_test.command("ping") + + @classmethod + def tearDownClass(cls): + monitoring._LISTENERS = cls.saved_listeners + + async def test_simple(self): + await self.client.pymongo_test.command("ping") + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertEqualCommand(SON([("ping", 1)]), started.command) + self.assertEqual("ping", started.command_name) + self.assertEqual(await self.client.address, started.connection_id) + self.assertEqual("pymongo_test", started.database_name) + self.assertIsInstance(started.request_id, int) + + +class AsyncTestEventClasses(unittest.IsolatedAsyncioTestCase): + def test_command_event_repr(self): + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" + event = monitoring.CommandStartedEvent( + {"ping": 1}, db_name, request_id, connection_id, operation_id + ) + self.assertEqual( + repr(event), + "", + ) + delta = datetime.timedelta(milliseconds=100) + event = monitoring.CommandSucceededEvent( + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.CommandFailedEvent( + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_heartbeat_event_repr(self): + connection_id = ("localhost", 27017) + event = monitoring.ServerHeartbeatStartedEvent(connection_id) + self.assertEqual( + repr(event), "" + ) + delta = 0.1 + event = monitoring.ServerHeartbeatSucceededEvent( + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerHeartbeatFailedEvent( + delta, + "ERROR", # type: ignore[arg-type] + connection_id, + ) + self.assertEqual( + repr(event), + "", + ) + + def test_server_event_repr(self): + server_address = ("localhost", 27017) + topology_id = ObjectId("000000000000000000000001") + event = monitoring.ServerOpeningEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.ServerClosedEvent(server_address, topology_id) + self.assertEqual( + repr(event), + "", + ) + + def test_topology_event_repr(self): + topology_id = ObjectId("000000000000000000000001") + event = monitoring.TopologyOpenedEvent(topology_id) + self.assertEqual(repr(event), "") + event = monitoring.TopologyDescriptionChangedEvent( + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, + ) + self.assertEqual( + repr(event), + "", + ) + event = monitoring.TopologyClosedEvent(topology_id) + self.assertEqual(repr(event), "") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_on_demand_csfle.py b/test/asynchronous/test_on_demand_csfle.py new file mode 100644 index 0000000000..55394ddeb8 --- /dev/null +++ b/test/asynchronous/test_on_demand_csfle.py @@ -0,0 +1,115 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +import pytest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context + +from bson.codec_options import CodecOptions +from pymongo.asynchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + AsyncClientEncryption, + EncryptionError, +) + +_IS_SYNC = False + +pytestmark = pytest.mark.kms + + +class TestonDemandGCPCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(AsyncIntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @async_client_context.require_version_min(4, 2, -1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + await self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + async def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = AsyncClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=async_client_context.client, + codec_options=CodecOptions(), + ) + await self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/asynchronous/test_pooling.py b/test/asynchronous/test_pooling.py new file mode 100644 index 0000000000..3193d9e3d5 --- /dev/null +++ b/test/asynchronous/test_pooling.py @@ -0,0 +1,614 @@ +# Copyright 2009-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test built in connection-pooling with threads.""" +from __future__ import annotations + +import asyncio +import gc +import random +import socket +import sys +import time +from test.asynchronous.utils import async_get_pool, async_joinall, flaky + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.son import SON +from pymongo import AsyncMongoClient, message, timeout +from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.utils_shared import delay + +from pymongo.asynchronous.pool import Pool, PoolOptions +from pymongo.socket_checker import SocketChecker + +_IS_SYNC = False + + +N = 10 +DB = "pymongo-pooling-tests" + + +async def gc_collect_until_done(tasks, timeout=60): + start = time.time() + running = list(tasks) + while running: + assert (time.time() - start) < timeout, "Tasks timed out" + for t in running: + await t.join(0.1) + if not t.is_alive(): + running.remove(t) + gc.collect() + + +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a AsyncMongoClient.""" + + def __init__(self, client): + super().__init__() + self.daemon = True # Don't hang whole test if task hangs. + self.client = client + self.db = self.client[DB] + self.passed = False + + async def run(self): + await self.run_mongo_thread() + self.passed = True + + async def run_mongo_thread(self): + raise NotImplementedError + + +class InsertOneAndFind(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + rand = random.randint(0, N) + _id = (await self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (await self.db.sf.find_one(_id))["x"] + + +class Unique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + await self.db.unique.insert_one({}) # no error + + +class NonUnique(MongoTask): + async def run_mongo_thread(self): + for _ in range(N): + try: + await self.db.unique.insert_one({"_id": "jesse"}) + except DuplicateKeyError: + pass + else: + raise AssertionError("Should have raised DuplicateKeyError") + + +class SocketGetter(MongoTask): + """Utility for TestPooling. + + Checks out a socket and holds it forever. Used in + test_no_wait_queue_timeout. + """ + + def __init__(self, client, pool): + super().__init__(client) + self.state = "init" + self.pool = pool + self.sock = None + + async def run_mongo_thread(self): + self.state = "get_socket" + + # Call 'pin_cursor' so we can hold the socket. + async with self.pool.checkout() as sock: + sock.pin_cursor() + self.sock = sock + + self.state = "connection" + + async def release_conn(self): + if self.sock: + await self.sock.unpin() + self.sock = None + return True + return False + + +async def run_cases(client, cases): + tasks = [] + n_runs = 5 + + for case in cases: + for _i in range(n_runs): + t = case(client) + await t.start() + tasks.append(t) + + for t in tasks: + await t.join() + + for t in tasks: + assert t.passed, "%s.run() threw an exception" % repr(t) + + +class _TestPoolingBase(AsyncIntegrationTest): + """Base class for all connection-pool tests.""" + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = await self.async_rs_or_single_client() + db = self.c[DB] + await db.unique.drop() + await db.test.drop() + await db.unique.insert_one({"_id": "jesse"}) + await db.test.insert_many([{} for _ in range(10)]) + + async def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (await async_client_context.host, await async_client_context.port) + # Start the pool with the correct ssl options. + pool_options = async_client_context.client._topology_settings.pool_options + kwargs["ssl_context"] = pool_options._ssl_context + kwargs["tls_allow_invalid_hostnames"] = pool_options.tls_allow_invalid_hostnames + kwargs["server_api"] = pool_options.server_api + pool = Pool(pair, PoolOptions(*args, **kwargs)) + await pool.ready() + return pool + + +class TestPooling(_TestPoolingBase): + async def test_max_pool_size_validation(self): + host, port = await async_client_context.host, await async_client_context.port + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize=-1) + + self.assertRaises(ValueError, AsyncMongoClient, host=host, port=port, maxPoolSize="foo") + + c = AsyncMongoClient(host=host, port=port, maxPoolSize=100, connect=False) + self.assertEqual(c.options.pool_options.max_pool_size, 100) + + async def test_no_disconnect(self): + await run_cases(self.c, [NonUnique, Unique, InsertOneAndFind]) + + async def test_pool_reuses_open_socket(self): + # Test Pool's _check_closed() method doesn't close a healthy socket. + cx_pool = await self.create_pool(max_pool_size=10) + cx_pool._check_interval_seconds = 0 # Always check. + async with cx_pool.checkout() as conn: + pass + + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_get_socket_and_exception(self): + # get_socket() returns socket after a non-network error. + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + with self.assertRaises(ZeroDivisionError): + async with cx_pool.checkout() as conn: + 1 / 0 + + # Socket was returned, not closed. + async with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + async def test_pool_removes_closed_socket(self): + # Test that Pool removes explicitly closed socket. + cx_pool = await self.create_pool() + + async with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + await conn.close_conn(None) + + self.assertEqual(0, len(cx_pool.conns)) + + async def test_pool_removes_dead_socket(self): + # Test that Pool removes dead socket and the socket doesn't return + # itself PYTHON-344 + cx_pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + await conn.conn.close() + self.assertTrue(conn.conn_closed()) + + async with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) + + self.assertEqual(1, len(cx_pool.conns)) + + # Semaphore was released. + async with cx_pool.checkout(): + pass + + async def test_socket_closed(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + self.assertFalse(socket_checker.socket_closed(s)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_socket_checker(self): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((await async_client_context.host, await async_client_context.port)) + socket_checker = SocketChecker() + # Socket has nothing to read. + self.assertFalse(socket_checker.select(s, read=True)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0)) + self.assertFalse(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + # Make the socket readable + _, msg, _ = message._query( + 0, "admin.$cmd", 0, -1, SON([("ping", 1)]), None, DEFAULT_CODEC_OPTIONS + ) + s.sendall(msg) + # Block until the socket is readable. + self.assertTrue(socket_checker.select(s, read=True, timeout=None)) + self.assertTrue(socket_checker.select(s, read=True)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0)) + self.assertTrue(socket_checker.select(s, read=True, timeout=0.05)) + # Socket is still writable. + self.assertTrue(socket_checker.select(s, write=True, timeout=None)) + self.assertTrue(socket_checker.select(s, write=True)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0)) + self.assertTrue(socket_checker.select(s, write=True, timeout=0.05)) + s.close() + self.assertTrue(socket_checker.socket_closed(s)) + + async def test_return_socket_after_reset(self): + pool = await self.create_pool() + async with pool.checkout() as sock: + self.assertEqual(pool.active_sockets, 1) + self.assertEqual(pool.operation_count, 1) + await pool.reset() + + self.assertTrue(sock.closed) + self.assertEqual(0, len(pool.conns)) + self.assertEqual(pool.active_sockets, 0) + self.assertEqual(pool.operation_count, 0) + + async def test_pool_check(self): + # Test that Pool recovers from two connection failures in a row. + # This exercises code at the end of Pool._check(). + cx_pool = await self.create_pool(max_pool_size=1, connect_timeout=1, wait_queue_timeout=1) + cx_pool._check_interval_seconds = 0 # Always check. + self.addAsyncCleanup(cx_pool.close) + + async with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's + # closed. + await conn.conn.close() + + # Swap pool's address with a bad one. + address, cx_pool.address = cx_pool.address, ("foo.com", 1234) + with self.assertRaises(AutoReconnect): + async with cx_pool.checkout(): + pass + + # Back to normal, semaphore was correctly released. + cx_pool.address = address + async with cx_pool.checkout(): + pass + + async def test_wait_queue_timeout(self): + wait_queue_timeout = 2 # Seconds + pool = await self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) + self.addAsyncCleanup(pool.close) + + async with pool.checkout(): + start = time.time() + with self.assertRaises(ConnectionFailure): + async with pool.checkout(): + pass + + duration = time.time() - start + self.assertLess( + abs(wait_queue_timeout - duration), + 1, + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", + ) + + async def test_no_wait_queue_timeout(self): + # Verify get_socket() with no wait_queue_timeout blocks forever. + pool = await self.create_pool(max_pool_size=1) + self.addAsyncCleanup(pool.close) + + # Reach max_size. + async with pool.checkout() as s1: + t = SocketGetter(self.c, pool) + await t.start() + while t.state != "get_socket": + await asyncio.sleep(0.1) + + await asyncio.sleep(1) + self.assertEqual(t.state, "get_socket") + + while t.state != "connection": + await asyncio.sleep(0.1) + + self.assertEqual(t.state, "connection") + self.assertEqual(t.sock, s1) + # Cleanup + await t.release_conn() + await t.join() + await pool.close() + + async def test_checkout_more_than_max_pool_size(self): + pool = await self.create_pool(max_pool_size=2) + + socks = [] + for _ in range(2): + # Call 'pin_cursor' so we can hold the socket. + async with pool.checkout() as sock: + sock.pin_cursor() + socks.append(sock) + + tasks = [] + for _ in range(10): + t = SocketGetter(self.c, pool) + await t.start() + tasks.append(t) + await asyncio.sleep(1) + for t in tasks: + self.assertEqual(t.state, "get_socket") + # Cleanup + for socket_info in socks: + await socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if await t.release_conn(): + to_remove.append(t) + await t.join() + for t in to_remove: + tasks.remove(t) + await asyncio.sleep(0.05) + await pool.close() + + async def test_maxConnecting(self): + client = await self.async_rs_or_single_client() + await self.client.test.test.insert_one({}) + self.addAsyncCleanup(self.client.test.test.delete_many, {}) + pool = await async_get_pool(client) + docs = [] + + # Run 50 short running operations + async def find_one(): + docs.append(await client.test.test.find_one({})) + + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + await task.start() + for task in tasks: + await task.join(10) + + self.assertEqual(len(docs), 50) + self.assertLessEqual(len(pool.conns), 50) + # TLS and auth make connection establishment more expensive than + # the query which leads to more threads hitting maxConnecting. + # The end result is fewer total connections and better latency. + if async_client_context.tls and async_client_context.auth_enabled: + self.assertLessEqual(len(pool.conns), 30) + else: + self.assertLessEqual(len(pool.conns), 50) + # MongoDB 4.4.1 with auth + ssl: + # maxConnecting = 2: 6 connections in ~0.231+ seconds + # maxConnecting = unbounded: 50 connections in ~0.642+ seconds + # + # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: + # maxConnecting = 2: 15-22 connections in ~0.108+ seconds + # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds + print(len(pool.conns)) + + @async_client_context.require_failCommand_appName + async def test_csot_timeout_message(self): + client = await self.async_rs_or_single_client(appName="connectionTimeoutApp") + # Mock an operation failing due to pymongo.timeout(). + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + await client.db.t.find_one({"$where": delay(2)}) + + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) + + @async_client_context.require_failCommand_appName + async def test_socket_timeout_message(self): + client = await self.async_rs_or_single_client( + socketTimeoutMS=500, appName="connectionTimeoutApp" + ) + # Mock an operation failing due to socketTimeoutMS. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + await client.db.t.insert_one({"x": 1}) + + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.db.t.find_one({"$where": delay(2)}) + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), + ) + + @async_client_context.require_failCommand_appName + async def test_connection_timeout_message(self): + # Mock a connection creation failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + client = await self.async_rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + await client.admin.command("ping") + pool = await async_get_pool(client) + await pool.reset_without_pause() + async with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + await client.admin.command("ping") + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), + ) + + +class TestPoolMaxSize(_TestPoolingBase): + async def test_max_pool_size(self): + max_pool_size = 4 + c = await self.async_rs_or_single_client(maxPoolSize=max_pool_size) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + # ntasks had better be much larger than max_pool_size to ensure that + # max_pool_size connections are actually required at some point in this + # test's execution. + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + assert len(cx_pool.conns) <= max_pool_size + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(0, cx_pool.requests) + + async def test_max_pool_size_none(self): + c = await self.async_rs_or_single_client(maxPoolSize=None) + collection = c[DB].test + + # Need one document. + await collection.drop() + await collection.insert_one({}) + + cx_pool = await async_get_pool(c) + ntasks = 10 + tasks = [] + lock = _async_create_lock() + self.n_passed = 0 + + async def f(): + for _ in range(5): + await collection.find_one({"$where": delay(0.1)}) + + async with lock: + self.n_passed += 1 + + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) + await t.start() + + await async_joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) + self.assertEqual(cx_pool.max_pool_size, float("inf")) + + async def test_max_pool_size_zero(self): + c = await self.async_rs_or_single_client(maxPoolSize=0) + pool = await async_get_pool(c) + self.assertEqual(pool.max_pool_size, float("inf")) + + async def test_max_pool_size_with_connection_failure(self): + # The pool acquires its semaphore before attempting to connect; ensure + # it releases the semaphore on connection failure. + test_pool = Pool( + ("somedomainthatdoesntexist.org", 27017), + PoolOptions(max_pool_size=1, connect_timeout=1, socket_timeout=1, wait_queue_timeout=1), + ) + await test_pool.ready() + + # First call to get_socket fails; if pool doesn't release its semaphore + # then the second call raises "ConnectionFailure: Timed out waiting for + # socket from pool" instead of AutoReconnect. + for _i in range(2): + with self.assertRaises(AutoReconnect) as context: + async with test_pool.checkout(): + pass + + # Testing for AutoReconnect instead of ConnectionFailure, above, + # is sufficient right *now* to catch a semaphore leak. But that + # seems error-prone, so check the message too. + self.assertNotIn("waiting for socket from pool", str(context.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_raw_bson.py b/test/asynchronous/test_raw_bson.py new file mode 100644 index 0000000000..70832ea668 --- /dev/null +++ b/test/asynchronous/test_raw_bson.py @@ -0,0 +1,219 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import datetime +import sys +import uuid + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest + +from bson import Code, DBRef, decode, encode +from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation +from bson.codec_options import CodecOptions +from bson.errors import InvalidBSON +from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument +from bson.son import SON + +_IS_SYNC = False + + +class TestRawBSONDocument(AsyncIntegrationTest): + # {'_id': ObjectId('556df68b6e32ab21a95e0785'), + # 'name': 'Sherlock', + # 'addresses': [{'street': 'Baker Street'}]} + bson_string = ( + b"Z\x00\x00\x00\x07_id\x00Um\xf6\x8bn2\xab!\xa9^\x07\x85\x02name\x00\t" + b"\x00\x00\x00Sherlock\x00\x04addresses\x00&\x00\x00\x00\x030\x00\x1e" + b"\x00\x00\x00\x02street\x00\r\x00\x00\x00Baker Street\x00\x00\x00\x00" + ) + document = RawBSONDocument(bson_string) + + async def asyncTearDown(self): + if async_client_context.connected: + await self.client.pymongo_test.test_raw.drop() + + def test_decode(self): + self.assertEqual("Sherlock", self.document["name"]) + first_address = self.document["addresses"][0] + self.assertIsInstance(first_address, RawBSONDocument) + self.assertEqual("Baker Street", first_address["street"]) + + def test_raw(self): + self.assertEqual(self.bson_string, self.document.raw) + + def test_empty_doc(self): + doc = RawBSONDocument(encode({})) + with self.assertRaises(KeyError): + doc["does-not-exist"] + + def test_invalid_bson_sequence(self): + bson_byte_sequence = encode({"a": 1}) + encode({}) + with self.assertRaisesRegex(InvalidBSON, "invalid object length"): + RawBSONDocument(bson_byte_sequence) + + def test_invalid_bson_eoo(self): + invalid_bson_eoo = encode({"a": 1})[:-1] + b"\x01" + with self.assertRaisesRegex(InvalidBSON, "bad eoo"): + RawBSONDocument(invalid_bson_eoo) + + @async_client_context.require_connection + async def test_round_trip(self): + db = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ) + await db.test_raw.insert_one(self.document) + result = await db.test_raw.find_one(self.document["_id"]) + assert result is not None + self.assertIsInstance(result, RawBSONDocument) + self.assertEqual(dict(self.document.items()), dict(result.items())) + + @async_client_context.require_connection + async def test_round_trip_raw_uuid(self): + coll = self.client.get_database("pymongo_test").test_raw + uid = uuid.uuid4() + doc = {"_id": 1, "bin4": Binary(uid.bytes, 4), "bin3": Binary(uid.bytes, 3)} + raw = RawBSONDocument(encode(doc)) + await coll.insert_one(raw) + self.assertEqual(await coll.find_one(), doc) + uuid_coll = coll.with_options( + codec_options=coll.codec_options.with_options( + uuid_representation=UuidRepresentation.STANDARD + ) + ) + self.assertEqual( + await uuid_coll.find_one(), {"_id": 1, "bin4": uid, "bin3": Binary(uid.bytes, 3)} + ) + + # Test that the raw bytes haven't changed. + raw_coll = coll.with_options(codec_options=DEFAULT_RAW_BSON_OPTIONS) + self.assertEqual(await raw_coll.find_one(), raw) + + def test_with_codec_options(self): + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"-\x00\x00\x00\x05_id\x00\x10\x00\x00\x00\x03eI_\x97\x8f\xabo\x02" + b"\xff`L\x87\xad\x85\xbf\x9f\tdate\x00\x8a\xd6\xb9\xbaM" + b"\x01\x00\x00\x00" + ) + document = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + self.assertEqual(uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), document["_id"]) + + @async_client_context.require_connection + async def test_round_trip_codec_options(self): + doc = { + "date": datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + "_id": uuid.UUID("026fab8f-975f-4965-9fbf-85ad874c60ff"), + } + db = self.client.pymongo_test + coll = db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ) + await coll.insert_one(doc) + raw_java_legacy = CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ) + coll = db.get_collection("test_raw", codec_options=raw_java_legacy) + self.assertEqual( + RawBSONDocument(encode(doc, codec_options=raw_java_legacy)), await coll.find_one() + ) + + @async_client_context.require_connection + async def test_raw_bson_document_embedded(self): + doc = {"embedded": self.document} + db = self.client.pymongo_test + await db.test_raw.insert_one(doc) + result = await db.test_raw.find_one() + assert result is not None + self.assertEqual(decode(self.document.raw), result["embedded"]) + + # Make sure that CodecOptions are preserved. + # {'embedded': [ + # {'date': datetime.datetime(2015, 6, 3, 18, 40, 50, 826000), + # '_id': UUID('026fab8f-975f-4965-9fbf-85ad874c60ff')} + # ]} + # encoded with JAVA_LEGACY uuid representation. + bson_string = ( + b"D\x00\x00\x00\x04embedded\x005\x00\x00\x00\x030\x00-\x00\x00\x00" + b"\tdate\x00\x8a\xd6\xb9\xbaM\x01\x00\x00\x05_id\x00\x10\x00\x00" + b"\x00\x03eI_\x97\x8f\xabo\x02\xff`L\x87\xad\x85\xbf\x9f\x00\x00" + b"\x00" + ) + rbd = RawBSONDocument( + bson_string, + codec_options=CodecOptions( + uuid_representation=JAVA_LEGACY, document_class=RawBSONDocument + ), + ) + + await db.test_raw.drop() + await db.test_raw.insert_one(rbd) + result = await db.get_collection( + "test_raw", codec_options=CodecOptions(uuid_representation=JAVA_LEGACY) + ).find_one() + assert result is not None + self.assertEqual(rbd["embedded"][0]["_id"], result["embedded"][0]["_id"]) + + @async_client_context.require_connection + async def test_write_response_raw_bson(self): + coll = self.client.get_database( + "pymongo_test", codec_options=CodecOptions(document_class=RawBSONDocument) + ).test_raw + + # No Exceptions raised while handling write response. + await coll.insert_one(self.document) + await coll.delete_one(self.document) + await coll.insert_many([self.document]) + await coll.delete_many(self.document) + await coll.update_one(self.document, {"$set": {"a": "b"}}, upsert=True) + await coll.update_many(self.document, {"$set": {"b": "c"}}) + + def test_preserve_key_ordering(self): + keyvaluepairs = [ + ("a", 1), + ("b", 2), + ("c", 3), + ] + rawdoc = RawBSONDocument(encode(SON(keyvaluepairs))) + + for rkey, elt in zip(rawdoc, keyvaluepairs): + self.assertEqual(rkey, elt[0]) + + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_concern.py b/test/asynchronous/test_read_concern.py new file mode 100644 index 0000000000..8659bf80b2 --- /dev/null +++ b/test/asynchronous/test_read_concern.py @@ -0,0 +1,122 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context +from test.utils_shared import OvertCommandListener + +from bson.son import SON +from pymongo.errors import OperationFailure +from pymongo.read_concern import ReadConcern + +_IS_SYNC = False + + +class TestReadConcern(AsyncIntegrationTest): + listener: OvertCommandListener + + @async_client_context.require_connection + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + await async_client_context.client.pymongo_test.create_collection("coll") + + async def asyncTearDown(self): + await async_client_context.client.pymongo_test.drop_collection("coll") + + def test_read_concern(self): + rc = ReadConcern() + self.assertIsNone(rc.level) + self.assertTrue(rc.ok_for_legacy) + + rc = ReadConcern("majority") + self.assertEqual("majority", rc.level) + self.assertFalse(rc.ok_for_legacy) + + rc = ReadConcern("local") + self.assertEqual("local", rc.level) + self.assertTrue(rc.ok_for_legacy) + + self.assertRaises(TypeError, ReadConcern, 42) + + async def test_read_concern_uri(self): + uri = f"mongodb://{await async_client_context.pair}/?readConcernLevel=majority" + client = await self.async_rs_or_single_client(uri, connect=False) + self.assertEqual(ReadConcern("majority"), client.read_concern) + + async def test_invalid_read_concern(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("unknown")) + # We rely on the server to validate read concern. + with self.assertRaises(OperationFailure): + await coll.find_one() + + async def test_find_command(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await coll.find({"field": "value"}).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await coll.find({"field": "value"}).to_list() + self.assertEqualCommand( + SON( + [ + ("find", "coll"), + ("filter", {"field": "value"}), + ("readConcern", {"level": "local"}), + ] + ), + self.listener.started_events[0].command, + ) + + async def test_command_cursor(self): + # readConcern not sent in command if not specified. + coll = self.db.coll + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + self.listener.reset() + + # Explicitly set readConcern to 'local'. + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await (await coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) + + async def test_aggregate_out(self): + coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) + await ( + await coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}]) + ).to_list() + + # Aggregate with $out supports readConcern MongoDB 4.2 onwards. + if async_client_context.version >= (4, 1): + self.assertIn("readConcern", self.listener.started_events[0].command) + else: + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_preferences.py b/test/asynchronous/test_read_preferences.py new file mode 100644 index 0000000000..d18887da40 --- /dev/null +++ b/test/asynchronous/test_read_preferences.py @@ -0,0 +1,742 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the replica_set_connection module.""" +from __future__ import annotations + +import contextlib +import copy +import pickle +import random +import sys +from typing import Any + +from pymongo.operations import _Op + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + connected, + unittest, +) +from test.utils_shared import ( + OvertCommandListener, + _ignore_deprecations, + async_wait_until, + one, +) +from test.version import Version + +from bson.son import SON +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ConfigurationError, OperationFailure +from pymongo.message import _maybe_add_read_preference +from pymongo.read_preferences import ( + MovingAverage, + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + Secondary, + SecondaryPreferred, +) +from pymongo.server_description import ServerDescription +from pymongo.server_selectors import Selection, readable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class TestSelections(AsyncIntegrationTest): + @async_client_context.require_connection + async def test_bool(self): + client = await self.async_single_client() + + async def predicate(): + return await client.address + + await async_wait_until(predicate, "discover primary") + selection = Selection.from_topology_description(client._topology.description) + + self.assertTrue(selection) + self.assertFalse(selection.with_server_descriptions([])) + + +class TestReadPreferenceObjects(unittest.TestCase): + prefs = [ + Primary(), + PrimaryPreferred(), + Secondary(), + Nearest(tag_sets=[{"a": 1}, {"b": 2}]), + SecondaryPreferred(max_staleness=30), + ] + + def test_pickle(self): + for pref in self.prefs: + self.assertEqual(pref, pickle.loads(pickle.dumps(pref))) + + def test_copy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.copy(pref)) + + def test_deepcopy(self): + for pref in self.prefs: + self.assertEqual(pref, copy.deepcopy(pref)) + + +class TestReadPreferencesBase(AsyncIntegrationTest): + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + # Insert some data so we can use cursors in read_from_which_host + await self.client.pymongo_test.test.drop() + await self.client.get_database( + "pymongo_test", write_concern=WriteConcern(w=async_client_context.w) + ).test.insert_many([{"_id": i} for i in range(10)]) + + self.addAsyncCleanup(self.client.pymongo_test.test.drop) + + async def read_from_which_host(self, client): + """Do a find() on the client and return which host was used""" + cursor = client.pymongo_test.test.find() + await anext(cursor) + return cursor.address + + async def read_from_which_kind(self, client): + """Do a find() on the client and return 'primary' or 'secondary' + depending on which the client used. + """ + address = await self.read_from_which_host(client) + if address == await client.primary: + return "primary" + elif address in await client.secondaries: + return "secondary" + else: + self.fail( + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" + ) + + async def assertReadsFrom(self, expected, **kwargs): + c = await self.async_rs_client(**kwargs) + + async def predicate(): + return len(c.nodes - await c.arbiters) == async_client_context.w + + await async_wait_until(predicate, "discovered all nodes") + + used = await self.read_from_which_kind(c) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") + + +class TestSingleSecondaryOk(TestReadPreferencesBase): + async def test_reads_from_secondary(self): + host, port = next(iter(await self.client.secondaries)) + # Direct connection to a secondary. + client = await self.async_single_client(host, port) + self.assertFalse(await client.is_primary) + + # Regardless of read preference, we should be able to do + # "reads" with a direct connection to a secondary. + # See server-selection.rst#topology-type-single. + self.assertEqual(client.read_preference, ReadPreference.PRIMARY) + + db = client.pymongo_test + coll = db.test + + # Test find and find_one. + self.assertIsNotNone(await coll.find_one()) + self.assertEqual(10, len(await coll.find().to_list())) + + # Test some database helpers. + self.assertIsNotNone(await db.list_collection_names()) + self.assertIsNotNone(await db.validate_collection("test")) + self.assertIsNotNone(await db.command("ping")) + + # Test some collection helpers. + self.assertEqual(10, await coll.count_documents({})) + self.assertEqual(10, len(await coll.distinct("_id"))) + self.assertIsNotNone(await coll.aggregate([])) + self.assertIsNotNone(await coll.index_information()) + + +class TestReadPreferences(TestReadPreferencesBase): + async def test_mode_validation(self): + for mode in ( + ReadPreference.PRIMARY, + ReadPreference.PRIMARY_PREFERRED, + ReadPreference.SECONDARY, + ReadPreference.SECONDARY_PREFERRED, + ReadPreference.NEAREST, + ): + self.assertEqual( + mode, (await self.async_rs_client(read_preference=mode)).read_preference + ) + + with self.assertRaises(TypeError): + await self.async_rs_client(read_preference="foo") + + async def test_tag_sets_validation(self): + S = Secondary(tag_sets=[{}]) + self.assertEqual( + [{}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}]) + self.assertEqual( + [{"k": "v"}], (await self.async_rs_client(read_preference=S)).read_preference.tag_sets + ) + + S = Secondary(tag_sets=[{"k": "v"}, {}]) + self.assertEqual( + [{"k": "v"}, {}], + (await self.async_rs_client(read_preference=S)).read_preference.tag_sets, + ) + + self.assertRaises(ValueError, Secondary, tag_sets=[]) + + # One dict not ok, must be a list of dicts + self.assertRaises(TypeError, Secondary, tag_sets={"k": "v"}) + + self.assertRaises(TypeError, Secondary, tag_sets="foo") + + self.assertRaises(TypeError, Secondary, tag_sets=["foo"]) + + async def test_threshold_validation(self): + self.assertEqual( + 17, + ( + await self.async_rs_client(localThresholdMS=17, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 42, + ( + await self.async_rs_client(localThresholdMS=42, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 666, + ( + await self.async_rs_client(localThresholdMS=666, connect=False) + ).options.local_threshold_ms, + ) + + self.assertEqual( + 0, + ( + await self.async_rs_client(localThresholdMS=0, connect=False) + ).options.local_threshold_ms, + ) + + with self.assertRaises(ValueError): + await self.async_rs_client(localthresholdms=-1) + + async def test_zero_latency(self): + ping_times: set = set() + # Generate unique ping times. + while len(ping_times) < len(self.client.nodes): + ping_times.add(random.random()) + for ping_time, host in zip(ping_times, self.client.nodes): + ServerDescription._host_to_round_trip_time[host] = ping_time + try: + client = await connected( + await self.async_rs_client(readPreference="nearest", localThresholdMS=0) + ) + await async_wait_until( + lambda: client.nodes == self.client.nodes, "discovered all nodes" + ) + host = await self.read_from_which_host(client) + for _ in range(5): + self.assertEqual(host, await self.read_from_which_host(client)) + finally: + ServerDescription._host_to_round_trip_time.clear() + + async def test_primary(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY) + + async def test_primary_with_tags(self): + # Tags not allowed with PRIMARY + with self.assertRaises(ConfigurationError): + await self.async_rs_client(tag_sets=[{"dc": "ny"}]) + + async def test_primary_preferred(self): + await self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) + + async def test_secondary(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY) + + async def test_secondary_preferred(self): + await self.assertReadsFrom("secondary", read_preference=ReadPreference.SECONDARY_PREFERRED) + + async def test_nearest(self): + # With high localThresholdMS, expect to read from any + # member + c = await self.async_rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds + + data_members = {await self.client.primary} | await self.client.secondaries + + # This is a probabilistic test; track which members we've read from so + # far, and keep reading until we've used all the members or give up. + # Chance of using only 2 of 3 members 10k times if there's no bug = + # 3 * (2/3)**10000, very low. + used: set = set() + i = 0 + while data_members.difference(used) and i < 10000: + address = await self.read_from_which_host(c) + used.add(address) + i += 1 + + not_used = data_members.difference(used) + latencies = ", ".join( + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in await (await c._get_topology()).select_servers( + readable_server_selector, _Op.TEST + ) + ) + + self.assertFalse( + not_used, + "Expected to use primary and all secondaries for mode NEAREST," + f" but didn't use {not_used}\nlatencies: {latencies}", + ) + + +class ReadPrefTester(AsyncMongoClient): + def __init__(self, *args, **kwargs): + self.has_read_from = set() + client_options = async_client_context.client_options + client_options.update(kwargs) + super().__init__(*args, **client_options) + + async def _conn_for_reads(self, read_preference, session, operation): + context = await super()._conn_for_reads(read_preference, session, operation) + return context + + @contextlib.asynccontextmanager + async def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + async with context as (conn, read_preference): + await self.record_a_read(conn.address) + yield conn, read_preference + + async def record_a_read(self, address): + server = await (await self._get_topology()).select_server_by_address(address, _Op.TEST, 0) + self.has_read_from.add(server) + + +_PREF_MAP = [ + (Primary, SERVER_TYPE.RSPrimary), + (PrimaryPreferred, SERVER_TYPE.RSPrimary), + (Secondary, SERVER_TYPE.RSSecondary), + (SecondaryPreferred, SERVER_TYPE.RSSecondary), + (Nearest, "any"), +] + + +class TestCommandAndReadPreference(AsyncIntegrationTest): + c: ReadPrefTester + client_version: Version + + @async_client_context.require_secondaries_count(1) + async def asyncSetUp(self): + await super().asyncSetUp() + self.c = ReadPrefTester( + # Ignore round trip times, to test ReadPreference modes only. + localThresholdMS=1000 * 1000, + ) + self.client_version = await Version.async_from_client(self.c) + # mapReduce fails if the collection does not exist. + coll = self.c.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.insert_one({}) + + async def asyncTearDown(self): + await self.c.drop_database("pymongo_test") + await self.c.close() + + async def executed_on_which_server(self, client, fn, *args, **kwargs): + """Execute fn(*args, **kwargs) and return the Server instance used.""" + client.has_read_from.clear() + await fn(*args, **kwargs) + self.assertEqual(1, len(client.has_read_from)) + return one(client.has_read_from) + + async def assertExecutedOn(self, server_type, client, fn, *args, **kwargs): + server = await self.executed_on_which_server(client, fn, *args, **kwargs) + self.assertEqual( + SERVER_TYPE._fields[server_type], SERVER_TYPE._fields[server.description.server_type] + ) + + async def _test_fn(self, server_type, fn): + for _ in range(10): + if server_type == "any": + used = set() + for _ in range(1000): + server = await self.executed_on_which_server(self.c, fn) + used.add(server.description.address) + if len(used) == len(await self.c.secondaries) + 1: + # Success + break + + assert await self.c.primary is not None + unused = (await self.c.secondaries).union({await self.c.primary}).difference(used) + if unused: + self.fail("Some members not used for NEAREST: %s" % (unused)) + else: + await self.assertExecutedOn(server_type, self.c, fn) + + async def _test_primary_helper(self, func): + # Helpers that ignore read preference. + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): + for mode, server_type in _PREF_MAP: + new_coll = coll.with_options(read_preference=mode()) + + async def func(): + return await getattr(new_coll, meth)(*args, **kwargs) + + if secondary_ok: + await self._test_fn(server_type, func) + else: + await self._test_fn(SERVER_TYPE.RSPrimary, func) + + async def test_command(self): + # Test that the generic command helper obeys the read preference + # passed to it. + for mode, server_type in _PREF_MAP: + + async def func(): + return await self.c.pymongo_test.command("dbStats", read_preference=mode()) + + await self._test_fn(server_type, func) + + async def test_create_collection(self): + # create_collection runs listCollections on the primary to check if + # the collection already exists. + async def func(): + return await self.c.pymongo_test.create_collection( + "some_collection%s" % random.randint(0, sys.maxsize) + ) + + await self._test_primary_helper(func) + + async def test_count_documents(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) + + async def test_estimated_document_count(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "estimated_document_count") + + async def test_distinct(self): + await self._test_coll_helper(True, self.c.pymongo_test.test, "distinct", "a") + + async def test_aggregate(self): + await self._test_coll_helper( + True, self.c.pymongo_test.test, "aggregate", [{"$project": {"_id": 1}}] + ) + + async def test_aggregate_write(self): + # 5.0 servers support $out on secondaries. + secondary_ok = async_client_context.version.at_least(5, 0) + await self._test_coll_helper( + secondary_ok, + self.c.pymongo_test.test, + "aggregate", + [{"$project": {"_id": 1}}, {"$out": "agg_write_test"}], + ) + + +class TestMovingAverage(unittest.TestCase): + def test_moving_average(self): + avg = MovingAverage() + self.assertIsNone(avg.get()) + avg.add_sample(10) + self.assertAlmostEqual(10, avg.get()) # type: ignore + avg.add_sample(20) + self.assertAlmostEqual(12, avg.get()) # type: ignore + avg.add_sample(30) + self.assertAlmostEqual(15.6, avg.get()) # type: ignore + + +class TestMongosAndReadPreference(AsyncIntegrationTest): + def test_read_preference_document(self): + pref = Primary() + self.assertEqual(pref.document, {"mode": "primary"}) + + pref = PrimaryPreferred() + self.assertEqual(pref.document, {"mode": "primaryPreferred"}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "primaryPreferred", "tags": [{"dc": "sf"}]}) + pref = PrimaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "primaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Secondary() + self.assertEqual(pref.document, {"mode": "secondary"}) + pref = Secondary(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}]}) + pref = Secondary(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "secondary", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + pref = SecondaryPreferred() + self.assertEqual(pref.document, {"mode": "secondaryPreferred"}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}]}) + pref = SecondaryPreferred(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, + {"mode": "secondaryPreferred", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30}, + ) + + pref = Nearest() + self.assertEqual(pref.document, {"mode": "nearest"}) + pref = Nearest(tag_sets=[{"dc": "sf"}]) + self.assertEqual(pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}]}) + pref = Nearest(tag_sets=[{"dc": "sf"}], max_staleness=30) + self.assertEqual( + pref.document, {"mode": "nearest", "tags": [{"dc": "sf"}], "maxStalenessSeconds": 30} + ) + + with self.assertRaises(TypeError): + # Float is prohibited. + Nearest(max_staleness=1.5) # type: ignore + + with self.assertRaises(ValueError): + Nearest(max_staleness=0) + + with self.assertRaises(ValueError): + Nearest(max_staleness=-2) + + def test_read_preference_document_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for mode, cls in cases.items(): + with self.assertRaises(TypeError): + cls(hedge=[]) # type: ignore + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) + + async def test_send_hedge(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + if await async_client_context.supports_secondary_read_pref: + cases["secondary"] = Secondary + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + await client.admin.command("ping") + for _mode, cls in cases.items(): + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) + coll = client.test.get_collection("test", read_preference=pref) + listener.reset() + await coll.find_one() + started = listener.started_events + self.assertEqual(len(started), 1, started) + cmd = started[0].command + if async_client_context.is_rs or async_client_context.is_mongos: + self.assertIn("$readPreference", cmd) + self.assertEqual(cmd["$readPreference"], pref.document) + else: + self.assertNotIn("$readPreference", cmd) + + def test_maybe_add_read_preference(self): + # Primary doesn't add $readPreference + out = _maybe_add_read_preference({}, Primary()) + self.assertEqual(out, {}) + + pref = PrimaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = PrimaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Secondary() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Secondary(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + # SecondaryPreferred without tag_sets or max_staleness doesn't add + # $readPreference + pref = SecondaryPreferred() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, {}) + pref = SecondaryPreferred(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = SecondaryPreferred(max_staleness=120) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + pref = Nearest() + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + criteria = SON([("$query", {}), ("$orderby", SON([("_id", 1)]))]) + pref = Nearest() + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + pref = Nearest(tag_sets=[{"dc": "nyc"}]) + out = _maybe_add_read_preference(criteria, pref) + self.assertEqual( + out, + SON( + [ + ("$query", {}), + ("$orderby", SON([("_id", 1)])), + ("$readPreference", pref.document), + ] + ), + ) + + @async_client_context.require_mongos + async def test_mongos(self): + res = await async_client_context.client.config.shards.find_one() + assert res is not None + shard = res["host"] + num_members = shard.count(",") + 1 + if num_members == 1: + raise SkipTest("Need a replica set shard to test.") + coll = async_client_context.client.pymongo_test.get_collection( + "test", write_concern=WriteConcern(w=num_members) + ) + await coll.drop() + res = await coll.insert_many([{} for _ in range(5)]) + first_id = res.inserted_ids[0] + last_id = res.inserted_ids[-1] + + # Note - this isn't a perfect test since there's no way to + # tell what shard member a query ran on. + for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): + qcoll = coll.with_options(read_preference=pref) + results = await qcoll.find().sort([("_id", 1)]).to_list() + self.assertEqual(first_id, results[0]["_id"]) + self.assertEqual(last_id, results[-1]["_id"]) + results = await qcoll.find().sort([("_id", -1)]).to_list() + self.assertEqual(first_id, results[-1]["_id"]) + self.assertEqual(last_id, results[0]["_id"]) + + @async_client_context.require_mongos + async def test_mongos_max_staleness(self): + # Sanity check that we're sending maxStalenessSeconds + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=120) + ) + # No error + await coll.find_one() + + coll = async_client_context.client.pymongo_test.get_collection( + "test", read_preference=SecondaryPreferred(max_staleness=10) + ) + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=120 + ) + ).pymongo_test.test + # No error + await coll.find_one() + + coll = ( + await self.async_single_client( + readPreference="secondaryPreferred", maxStalenessSeconds=10 + ) + ).pymongo_test.test + try: + await coll.find_one() + except OperationFailure as exc: + self.assertEqual(160, exc.code) + else: + self.fail("mongos accepted invalid staleness") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_read_write_concern_spec.py b/test/asynchronous/test_read_write_concern_spec.py new file mode 100644 index 0000000000..b5cb32932f --- /dev/null +++ b/test/asynchronous/test_read_write_concern_spec.py @@ -0,0 +1,348 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the read and write concern tests.""" +from __future__ import annotations + +import json +import os +import sys +import warnings +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.unified_format import generate_test_classes +from test.utils_shared import OvertCommandListener + +from pymongo import DESCENDING +from pymongo.asynchronous.mongo_client import AsyncMongoClient +from pymongo.errors import ( + BulkWriteError, + ConfigurationError, + WriteConcernError, + WriteError, + WTimeoutError, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") + + +class TestReadWriteConcernSpec(AsyncIntegrationTest): + async def test_omit_default_read_write_concern(self): + listener = OvertCommandListener() + # Client with default readConcern and writeConcern + client = await self.async_rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + await collection.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(collection.drop) + self.addAsyncCleanup(client.pymongo_test.collection2.drop) + # Commands MUST NOT send the default read/write concern to the server. + + async def rename_and_drop(): + # Ensure collection exists. + await collection.insert_one({}) + await collection.rename("collection2") + await client.pymongo_test.collection2.drop() + + async def insert_command_default_write_concern(): + await collection.database.command( + "insert", "collection", documents=[{}], write_concern=WriteConcern() + ) + + async def aggregate_op(): + await (await collection.aggregate([])).to_list() + + ops = [ + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ("command", insert_command_default_write_concern), + ] + + for name, f in ops: + listener.reset() + await f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): + self.assertNotIn( + "readConcern", + event.command, + f"{name} sent default readConcern with {event.command_name}", + ) + self.assertNotIn( + "writeConcern", + event.command, + f"{name} sent default writeConcern with {event.command_name}", + ) + + async def assertWriteOpsRaise(self, write_concern, expected_exception): + wc = write_concern.document + # Set socket timeout to avoid indefinite stalls + client = await self.async_rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) + db = client.get_database("pymongo_test") + coll = db.test + + async def insert_command(): + await coll.database.command( + "insert", + "new_collection", + documents=[{}], + writeConcern=write_concern.document, + parse_write_concern_error=True, + ) + + ops = [ + ("insert_one", lambda: coll.insert_one({})), + ("insert_many", lambda: coll.insert_many([{}, {}])), + ("update_one", lambda: coll.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: coll.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: coll.delete_one({})), + ("delete_many", lambda: coll.delete_many({})), + ("bulk_write", lambda: coll.bulk_write([InsertOne({})])), + ("command", insert_command), + ("aggregate", lambda: coll.aggregate([{"$out": "out"}])), + # SERVER-46668 Delete all the documents in the collection to + # workaround a hang in createIndexes. + ("delete_many", lambda: coll.delete_many({})), + ("create_index", lambda: coll.create_index([("a", DESCENDING)])), + ("create_indexes", lambda: coll.create_indexes([IndexModel("b")])), + ("drop_index", lambda: coll.drop_index([("a", DESCENDING)])), + ("create", lambda: db.create_collection("new")), + ("rename", lambda: coll.rename("new")), + ("drop", lambda: db.new.drop()), + ] + # SERVER-47194: dropDatabase does not respect wtimeout in 3.6. + if async_client_context.version[:2] != (3, 6): + ops.append(("drop_database", lambda: client.drop_database(db))) + + for name, f in ops: + # Ensure insert_many and bulk_write still raise BulkWriteError. + if name in ("insert_many", "bulk_write"): + expected = BulkWriteError + else: + expected = expected_exception + with self.assertRaises(expected, msg=name) as cm: + await f() + if expected == BulkWriteError: + bulk_result = cm.exception.details + assert bulk_result is not None + wc_errors = bulk_result["writeConcernErrors"] + self.assertTrue(wc_errors) + + @async_client_context.require_replica_set + async def test_raise_write_concern_error(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + assert async_client_context.w is not None + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w + 1, wtimeout=1), WriteConcernError + ) + + @async_client_context.require_secondaries_count(1) + @async_client_context.require_test_commands + async def test_raise_wtimeout(self): + self.addAsyncCleanup(async_client_context.client.drop_database, "pymongo_test") + self.addAsyncCleanup(self.enable_replication, async_client_context.client) + # Disable replication to guarantee a wtimeout error. + await self.disable_replication(async_client_context.client) + await self.assertWriteOpsRaise( + WriteConcern(w=async_client_context.w, wtimeout=1), WTimeoutError + ) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) + @async_client_context.require_failCommand_fail_point + async def test_error_includes_errInfo(self): + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + "errInfo": {"writeConcern": {"w": 2, "wtimeout": 0, "provenance": "clientSupplied"}}, + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + async with self.fail_point(cause_wce): + # Write concern error on insert includes errInfo. + with self.assertRaises(WriteConcernError) as ctx: + await self.db.test.insert_one({}) + self.assertEqual(ctx.exception.details, expected_wce) + + # Test bulk_write as well. + with self.assertRaises(BulkWriteError) as ctx: + await self.db.test.bulk_write([InsertOne({})]) + expected_details = { + "writeErrors": [], + "writeConcernErrors": [expected_wce], + "nInserted": 1, + "nUpserted": 0, + "nMatched": 0, + "nModified": 0, + "nRemoved": 0, + "upserted": [], + } + self.assertEqual(ctx.exception.details, expected_details) + + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) + @async_client_context.require_version_min(4, 9) + async def test_write_error_details_exposes_errinfo(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + db = client.errinfotest + self.addAsyncCleanup(client.drop_database, "errinfotest") + validator = {"x": {"$type": "string"}} + await db.create_collection("test", validator=validator) + with self.assertRaises(WriteError) as ctx: + await db.test.insert_one({"x": 1}) + self.assertEqual(ctx.exception.code, 121) + self.assertIsNotNone(ctx.exception.details) + assert ctx.exception.details is not None + self.assertIsNotNone(ctx.exception.details.get("errInfo")) + for event in listener.succeeded_events: + if event.command_name == "insert": + self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) + break + else: + self.fail("Couldn't find insert event.") + + +def normalize_write_concern(concern): + result = {} + for key in concern: + if key.lower() == "wtimeoutms": + result["wtimeout"] = concern[key] + elif key == "journal": + result["j"] = concern[key] + else: + result[key] = concern[key] + return result + + +def create_connection_string_test(test_case): + def run_test(self): + uri = test_case["uri"] + valid = test_case["valid"] + warning = test_case["warning"] + + if not valid: + if warning is False: + self.assertRaises( + (ConfigurationError, ValueError), AsyncMongoClient, uri, connect=False + ) + else: + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + self.assertRaises(UserWarning, AsyncMongoClient, uri, connect=False) + else: + client = AsyncMongoClient(uri, connect=False) + if "writeConcern" in test_case: + document = client.write_concern.document + self.assertEqual(document, normalize_write_concern(test_case["writeConcern"])) + if "readConcern" in test_case: + document = client.read_concern.document + self.assertEqual(document, test_case["readConcern"]) + + return run_test + + +def create_document_test(test_case): + def run_test(self): + valid = test_case["valid"] + + if "writeConcern" in test_case: + normalized = normalize_write_concern(test_case["writeConcern"]) + if not valid: + self.assertRaises((ConfigurationError, ValueError), WriteConcern, **normalized) + else: + write_concern = WriteConcern(**normalized) + self.assertEqual(write_concern.document, test_case["writeConcernDocument"]) + self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) + self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) + if "readConcern" in test_case: + # Any string for 'level' is equally valid + read_concern = ReadConcern(**test_case["readConcern"]) + self.assertEqual(read_concern.document, test_case["readConcernDocument"]) + self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) + + return run_test + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + if dirname == "operation": + # This directory is tested by TestOperations. + continue + elif dirname == "connection-string": + create_test = create_connection_string_test + else: + create_test = create_document_test + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as test_stream: + test_cases = json.load(test_stream)["tests"] + + fname = os.path.splitext(filename)[0] + for test_case in test_cases: + new_test = create_test(test_case) + test_name = "test_{}_{}_{}".format( + dirname.replace("-", "_"), + fname.replace("-", "_"), + str(test_case["description"].lower().replace(" ", "_")), + ) + + new_test.__name__ = test_name + setattr(TestReadWriteConcernSpec, new_test.__name__, new_test) + + +create_tests() + + +# Generate unified tests. +# PyMongo does not support MapReduce. +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "operation"), + module=__name__, + expected_failures=["MapReduce .*"], + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_reads.py b/test/asynchronous/test_retryable_reads.py new file mode 100644 index 0000000000..47ac91b0f5 --- /dev/null +++ b/test/asynchronous/test_retryable_reads.py @@ -0,0 +1,266 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable reads spec.""" +from __future__ import annotations + +import os +import pprint +import sys +import threading +from test.asynchronous.utils import async_set_fail_point + +from pymongo.errors import OperationFailure + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncPyMongoTestCase, + async_client_context, + client_knobs, + unittest, +) +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, +) + +from pymongo.monitoring import ( + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) + +_IS_SYNC = False + + +class TestClientOptions(AsyncPyMongoTestCase): + async def test_default(self): + client = self.simple_client(connect=False) + self.assertEqual(client.options.retry_reads, True) + + async def test_kwargs(self): + client = self.simple_client(retryReads=True, connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client(retryReads=False, connect=False) + self.assertEqual(client.options.retry_reads, False) + + async def test_uri(self): + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) + self.assertEqual(client.options.retry_reads, True) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) + self.assertEqual(client.options.retry_reads, False) + + +class FindThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.find_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flaky on PyPy") + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [FindThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + +class TestRetryableReads(AsyncIntegrationTest): + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + event_listeners=[listener], + retryReads=True, + ) + + with self.assertRaises(OperationFailure): + await client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available( + self + ): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = async_client_context.mongos_seeds().split(",")[0] + mongos_client = await self.async_rs_or_single_client(host) + await async_set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + await async_set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + + @async_client_context.require_failCommand_fail_point + async def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + await client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + async with self.fail_point(fail_command): + await operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_reads_unified.py b/test/asynchronous/test_retryable_reads_unified.py new file mode 100644 index 0000000000..e62d606810 --- /dev/null +++ b/test/asynchronous/test_retryable_reads_unified.py @@ -0,0 +1,46 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Reads unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") + +# Generate unified tests. +# PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + expected_failures=["ListDatabaseObjects .*", "ListCollectionObjects .*", "MapReduce .*"], + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_writes.py b/test/asynchronous/test_retryable_writes.py new file mode 100644 index 0000000000..ddb1d39eb7 --- /dev/null +++ b/test/asynchronous/test_retryable_writes.py @@ -0,0 +1,605 @@ +# Copyright 2017-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test retryable writes.""" +from __future__ import annotations + +import asyncio +import copy +import pprint +import sys +import threading +from test.asynchronous.utils import async_set_fail_point, flaky + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + SkipTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import client_knobs +from test.utils_shared import ( + CMAPListener, + DeprecationFilter, + EventListener, + OvertCommandListener, +) +from test.version import Version + +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.int64 import Int64 +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo.errors import ( + AutoReconnect, + ConnectionFailure, + OperationFailure, + ServerSelectionTimeoutError, + WriteConcernError, +) +from pymongo.monitoring import ( + CommandSucceededEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + PoolClearedEvent, +) +from pymongo.operations import ( + DeleteMany, + DeleteOne, + InsertOne, + ReplaceOne, + UpdateMany, + UpdateOne, +) +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + async_client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) + + +def retryable_single_statement_ops(coll): + return [ + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), + (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({})]], {}), + (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), + ] + + +def non_retryable_single_statement_ops(coll): + return [ + ( + coll.bulk_write, + [[UpdateOne({}, {"$set": {"a": 1}}), UpdateMany({}, {"$set": {"a": 1}})]], + {}, + ), + (coll.bulk_write, [[DeleteOne({}), DeleteMany({})]], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_many, [{}], {}), + ] + + +class IgnoreDeprecationsTest(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + deprecation_filter: DeprecationFilter + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.deprecation_filter = DeprecationFilter() + + async def asyncTearDown(self) -> None: + await super().asyncTearDown() + self.deprecation_filter.stop() + + +class TestRetryableWrites(IgnoreDeprecationsTest): + listener: OvertCommandListener + knobs: client_knobs + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[self.listener] + ) + self.db = self.client.pymongo_test + + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) + ) + + async def asyncTearDown(self): + if async_client_context.is_rs and async_client_context.test_commands_enabled: + await self.client.admin.command( + SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) + ) + self.knobs.disable() + await super().asyncTearDown() + + async def test_supported_single_statement_no_retry(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=False, event_listeners=[listener]) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + await method(*args, **kwargs) + for event in listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_supported_single_statement_unsupported_cluster(self): + if async_client_context.is_rs or async_client_context.is_mongos: + raise SkipTest("This cluster supports retryable writes") + + for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + + for event in self.listener.started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_unsupported_single_statement(self): + coll = self.db.retryable_write_test + await coll.insert_many([{}, {}]) + coll_w0 = coll.with_options(write_concern=WriteConcern(w=0)) + for method, args, kwargs in non_retryable_single_statement_ops( + coll + ) + retryable_single_statement_ops(coll_w0): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() + await method(*args, **kwargs) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) + for event in started_events: + self.assertNotIn( + "txnNumber", + event.command, + f"{msg} sent txnNumber with {event.command_name}", + ) + + async def test_server_selection_timeout_not_retried(self): + """A ServerSelectionTimeoutError is not retried.""" + listener = OvertCommandListener() + client = self.simple_client( + "somedomainthatdoesntexist.org", + serverSelectionTimeoutMS=1, + retryWrites=True, + event_listeners=[listener], + ) + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + with self.assertRaises(ServerSelectionTimeoutError, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 0, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_retry_timeout_raises_original_error(self): + """A ServerSelectionTimeoutError on the retry attempt raises the + original error. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def mock_select_server(*args, **kwargs): + server = select_server(*args, **kwargs) + + def raise_error(*args, **kwargs): + raise ServerSelectionTimeoutError("No primary available for writes") + + # Raise ServerSelectionTimeout on the retry attempt. + topology.select_server = raise_error + return server + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() + topology.select_server = mock_select_server + with self.assertRaises(ConnectionFailure, msg=msg): + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting(self): + """Test retry succeeds after failures during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + self.listener.reset() + bulk_result = await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + UpdateOne({"_id": 1, "l": large}, {"$unset": {"l": 1}, "$inc": {"count": 1}}), + UpdateOne({"_id": 2, "l": large}, {"$set": {"foo": "bar"}}), + DeleteOne({"l": large}), + DeleteOne({"l": large}), + ] + ) + # Each command should fail and be retried. + # With OP_MSG 3 inserts are one batch. 2 updates another. + # 2 deletes a third. + self.assertEqual(len(self.listener.started_events), 6) + self.assertEqual(await coll.find_one(), {"_id": 1, "count": 1}) + # Assert the final result + expected_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 3, + "nUpserted": 0, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [], + } + self.assertEqual(bulk_result.bulk_api_result, expected_result) + + @async_client_context.require_replica_set + @async_client_context.require_test_commands + async def test_batch_splitting_retry_fails(self): + """Test retry fails during batch splitting.""" + large = "s" * 1024 * 1024 * 15 + coll = self.db.retryable_write_test + await coll.delete_many({}) + await self.client.admin.command( + SON( + [ + ("configureFailPoint", "onPrimaryTransactionalWrite"), + ("mode", {"skip": 3}), # The number of _documents_ to skip. + ("data", {"failBeforeCommitExceptionCode": 1}), + ] + ) + ) + self.listener.reset() + async with self.client.start_session() as session: + initial_txn = session._transaction_id + try: + await coll.bulk_write( + [ + InsertOne({"_id": 1, "l": large}), + InsertOne({"_id": 2, "l": large}), + InsertOne({"_id": 3, "l": large}), + InsertOne({"_id": 4, "l": large}), + ], + session=session, + ) + except ConnectionFailure: + pass + else: + self.fail("bulk_write should have failed") + + started = self.listener.started_events + self.assertEqual(len(started), 3) + self.assertEqual(len(self.listener.succeeded_events), 1) + expected_txn = Int64(initial_txn + 1) + self.assertEqual(started[0].command["txnNumber"], expected_txn) + self.assertEqual(started[0].command["lsid"], session.session_id) + expected_txn = Int64(initial_txn + 2) + self.assertEqual(started[1].command["txnNumber"], expected_txn) + self.assertEqual(started[1].command["lsid"], session.session_id) + started[1].command.pop("$clusterTime") + started[2].command.pop("$clusterTime") + self.assertEqual(started[1].command, started[2].command) + final_txn = session._transaction_id + self.assertEqual(final_txn, expected_txn) + self.assertEqual(await coll.find_one(projection={"_id": True}), {"_id": 1}) + + @async_client_context.require_multiple_mongoses + @async_client_context.require_failCommand_fail_point + async def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in async_client_context.mongos_seeds().split(","): + client = await self.async_rs_or_single_client(mongos) + await async_set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + async_client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + await client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + await async_set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + +class TestWriteConcernError(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + fail_insert: dict + + @async_client_context.require_replica_set + @async_client_context.require_failCommand_fail_point + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": { + "failCommands": ["insert"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}, + }, + } + + @async_client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_RetryableWriteError_error_label(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + + # Ensure collection exists. + await client.pymongo_test.testcoll.insert_one({}) + + async with self.fail_point(self.fail_insert): + with self.assertRaises(WriteConcernError) as cm: + await client.pymongo_test.testcoll.insert_one({}) + self.assertTrue(cm.exception.has_error_label("RetryableWriteError")) + + if async_client_context.version >= Version(4, 4): + # In MongoDB 4.4+ we rely on the server returning the error label. + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) + + @async_client_context.require_version_min(4, 4) + async def test_RetryableWriteError_error_label_RawBSONDocument(self): + # using RawBSONDocument should not cause errorLabel parsing to fail + async with self.fail_point(self.fail_insert): + async with self.client.start_session() as s: + s._start_retryable_write() + result = await self.client.pymongo_test.command( + "insert", + "testcoll", + documents=[{"_id": 1}], + txnNumber=s._transaction_id, + session=s, + codec_options=DEFAULT_CODEC_OPTIONS.with_options( + document_class=RawBSONDocument + ), + ) + + self.assertIn("writeConcernError", result) + self.assertIn("RetryableWriteError", result["errorLabels"]) + + +class InsertThread(threading.Thread): + def __init__(self, collection): + super().__init__() + self.daemon = True + self.collection = collection + self.passed = False + + async def run(self): + await self.collection.insert_one({}) + self.passed = True + + +class TestPoolPausedError(AsyncIntegrationTest): + # Pools don't get paused in load balanced mode. + RUN_ON_LOAD_BALANCER = False + + @async_client_context.require_sync + @async_client_context.require_failCommand_blockConnection + @async_client_context.require_retryable_writes + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") + async def test_pool_paused_error_is_retryable(self): + cmap_listener = CMAPListener() + cmd_listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) + for _ in range(10): + cmap_listener.reset() + cmd_listener.reset() + threads = [InsertThread(client.pymongo_test.test) for _ in range(2)] + fail_command = { + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "blockConnection": True, + "blockTimeMS": 1000, + "errorCode": 91, + "errorLabels": ["RetryableWriteError"], + }, + } + async with self.fail_point(fail_command): + for thread in threads: + thread.start() + for thread in threads: + thread.join() + for thread in threads: + self.assertTrue(thread.passed) + # It's possible that SDAM can rediscover the server and mark the + # pool ready before the thread in the wait queue has a chance + # to run. Repeat the test until the thread actually encounters + # a PoolClearedError. + if cmap_listener.event_count(ConnectionCheckOutFailedEvent): + break + + # Via CMAP monitoring, assert that the first check out succeeds. + cmap_events = cmap_listener.events_by_type( + (ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, PoolClearedEvent) + ) + msg = pprint.pformat(cmap_listener.events) + self.assertIsInstance(cmap_events[0], ConnectionCheckedOutEvent, msg) + self.assertIsInstance(cmap_events[1], PoolClearedEvent, msg) + self.assertIsInstance(cmap_events[2], ConnectionCheckOutFailedEvent, msg) + self.assertEqual(cmap_events[2].reason, ConnectionCheckOutFailedReason.CONN_ERROR, msg) + self.assertIsInstance(cmap_events[3], ConnectionCheckedOutEvent, msg) + + # Connection check out failures are not reflected in command + # monitoring because we only publish command events _after_ checking + # out a connection. + started = cmd_listener.started_events + msg = pprint.pformat(cmd_listener.results) + self.assertEqual(3, len(started), msg) + succeeded = cmd_listener.succeeded_events + self.assertEqual(2, len(succeeded), msg) + failed = cmd_listener.failed_events + self.assertEqual(1, len(failed), msg) + + @async_client_context.require_sync + @async_client_context.require_failCommand_fail_point + @async_client_context.require_replica_set + @async_client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + async def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = await self.async_rs_or_single_client( + retryWrites=True, event_listeners=[cmd_listener] + ) + await client.test.test.drop() + cmd_listener.reset() + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + await client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + await client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + + +# TODO: Make this a real integration test where we stepdown the primary. +class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): + @async_client_context.require_replica_set + async def test_increment_transaction_id_without_sending_command(self): + """Test that the txnNumber field is properly incremented, even when + the first attempt fails before sending the command. + """ + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(retryWrites=True, event_listeners=[listener]) + topology = client._topology + select_server = topology.select_server + + def raise_connection_err_select_server(*args, **kwargs): + # Raise ConnectionFailure on the first attempt and perform + # normal selection on the retry attempt. + topology.select_server = select_server + raise ConnectionFailure("Connection refused") + + for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): + listener.reset() + topology.select_server = raise_connection_err_select_server + async with client.start_session() as session: + kwargs = copy.deepcopy(kwargs) + kwargs["session"] = session + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + initial_txn_id = session._transaction_id + + # Each operation should fail on the first attempt and succeed + # on the second. + await method(*args, **kwargs) + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command + sent_txn_id = retry_cmd["txnNumber"] + final_txn_id = session._transaction_id + self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) + self.assertEqual(sent_txn_id, final_txn_id, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_retryable_writes_unified.py b/test/asynchronous/test_retryable_writes_unified.py new file mode 100644 index 0000000000..bb493e6010 --- /dev/null +++ b/test/asynchronous/test_retryable_writes_unified.py @@ -0,0 +1,39 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Retryable Writes unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_run_command.py b/test/asynchronous/test_run_command.py new file mode 100644 index 0000000000..3ac8c32706 --- /dev/null +++ b/test/asynchronous/test_run_command.py @@ -0,0 +1,41 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" +from __future__ import annotations + +import os +import unittest +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_sdam_monitoring_spec.py b/test/asynchronous/test_sdam_monitoring_spec.py new file mode 100644 index 0000000000..71ec6c6b46 --- /dev/null +++ b/test/asynchronous/test_sdam_monitoring_spec.py @@ -0,0 +1,374 @@ +# Copyright 2016 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the sdam monitoring spec tests.""" +from __future__ import annotations + +import asyncio +import json +import os +import sys +import time +from pathlib import Path + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs, unittest +from test.utils_shared import ( + ServerAndTopologyEventListener, + async_wait_until, + server_name_to_type, +) + +from bson.json_util import object_hook +from pymongo import AsyncMongoClient, monitoring +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.monitor import Monitor +from pymongo.common import clean_node +from pymongo.errors import ConnectionFailure, NotPrimaryError +from pymongo.hello import Hello +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TOPOLOGY_TYPE + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") + + +def compare_server_descriptions(expected, actual): + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type + ): + return False + expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} + + +def compare_topology_descriptions(expected, actual): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: + return False + expected = expected["servers"] + actual = actual.server_descriptions() + if len(expected) != len(actual): + return False + for exp_server in expected: + for _address, actual_server in actual.items(): + if compare_server_descriptions(exp_server, actual_server): + break + else: + return False + return True + + +def compare_events(expected_dict, actual): + if not expected_dict: + return False, "Error: Bad expected value in YAML test" + if not actual: + return False, "Error: Event published was None" + + expected_type, expected = list(expected_dict.items())[0] + + if expected_type == "server_opening_event": + if not isinstance(actual, monitoring.ServerOpeningEvent): + return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "server_description_changed_event": + if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): + return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + if not compare_server_descriptions(expected["newDescription"], actual.new_description): + return (False, "New ServerDescription incorrect in ServerDescriptionChangedEvent") + if not compare_server_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous ServerDescription incorrect in ServerDescriptionChangedEvent", + ) + + elif expected_type == "server_closed_event": + if not isinstance(actual, monitoring.ServerClosedEvent): + return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) + if expected["address"] != "{}:{}".format(*actual.server_address): + return ( + False, + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), + ) + + elif expected_type == "topology_opening_event": + if not isinstance(actual, monitoring.TopologyOpenedEvent): + return False, "Expected TopologyOpenedEvent, got %s" % (actual.__class__) + + elif expected_type == "topology_description_changed_event": + if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): + return ( + False, + "Expected TopologyDescriptionChangedEvent, got %s" % (actual.__class__), + ) + if not compare_topology_descriptions(expected["newDescription"], actual.new_description): + return ( + False, + "New TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + if not compare_topology_descriptions( + expected["previousDescription"], actual.previous_description + ): + return ( + False, + "Previous TopologyDescription incorrect in TopologyDescriptionChangedEvent", + ) + + elif expected_type == "topology_await aclosed_event": + if not isinstance(actual, monitoring.TopologyClosedEvent): + return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) + + else: + return False, f"Incorrect event: expected {expected_type}, actual {actual}" + + return True, "" + + +def compare_multiple_events(i, expected_results, actual_results): + events_in_a_row = [] + j = i + while j < len(expected_results) and isinstance(actual_results[j], actual_results[i].__class__): + events_in_a_row.append(actual_results[j]) + j += 1 + message = "" + for event in events_in_a_row: + for k in range(i, j): + passed, message = compare_events(expected_results[k], event) + if passed: + expected_results[k] = None + break + else: + return i, False, message + return j, True, "" + + +class TestAllScenarios(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.all_listener = ServerAndTopologyEventListener() + + +def create_test(scenario_def): + async def run_scenario(self): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): + await _run_scenario(self) + + async def _run_scenario(self): + class NoopMonitor(Monitor): + """Override the _run method to do nothing.""" + + async def _run(self): + await asyncio.sleep(0.05) + + m = AsyncMongoClient( + host=scenario_def["uri"], + port=27017, + event_listeners=[self.all_listener], + _monitor_class=NoopMonitor, + ) + topology = await m._get_topology() + + try: + for phase in scenario_def["phases"]: + for source, response in phase.get("responses", []): + source_address = clean_node(source) + await topology.on_change( + ServerDescription( + address=source_address, hello=Hello(response), round_trip_time=0 + ) + ) + + expected_results = phase["outcome"]["events"] + expected_len = len(expected_results) + await async_wait_until( + lambda: len(self.all_listener.results) >= expected_len, + "publish all events", + timeout=15, + ) + + # Wait some time to catch possible lagging extra events. + await async_wait_until(lambda: topology._events.empty(), "publish lagging events") + + i = 0 + while i < expected_len: + result = ( + self.all_listener.results[i] if len(self.all_listener.results) > i else None + ) + # The order of ServerOpening/ClosedEvents doesn't matter + if isinstance( + result, (monitoring.ServerOpeningEvent, monitoring.ServerClosedEvent) + ): + i, passed, message = compare_multiple_events( + i, expected_results, self.all_listener.results + ) + self.assertTrue(passed, message) + else: + self.assertTrue(*compare_events(expected_results[i], result)) + i += 1 + + # Assert no extra events. + extra_events = self.all_listener.results[expected_len:] + if extra_events: + self.fail(f"Extra events {extra_events!r}") + + self.all_listener.reset() + finally: + await m.close() + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream, object_hook=object_hook) + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{os.path.splitext(filename)[0]}" + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + + +class TestSdamMonitoring(AsyncIntegrationTest): + knobs: client_knobs + listener: ServerAndTopologyEventListener + test_client: AsyncMongoClient + coll: AsyncCollection + + @classmethod + def setUpClass(cls): + # Speed up the tests by decreasing the event publish frequency. + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) + cls.knobs.enable() + cls.listener = ServerAndTopologyEventListener() + + @classmethod + def tearDownClass(cls): + cls.knobs.disable() + + @async_client_context.require_failCommand_fail_point + async def asyncSetUp(self): + await super().asyncSetUp() + + retry_writes = async_client_context.supports_transactions() + self.test_client = await self.async_rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + await self.coll.insert_one({}) + self.listener.reset() + + async def asyncTearDown(self): + await super().asyncTearDown() + + async def _test_app_error(self, fail_command_opts, expected_error): + address = await self.test_client.address + + # Test that an application error causes a ServerDescriptionChangedEvent + # to be published. + data = {"failCommands": ["insert"]} + data.update(fail_command_opts) + fail_insert = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": data, + } + async with self.fail_point(fail_insert): + if self.test_client.options.retry_writes: + await self.coll.insert_one({}) + else: + with self.assertRaises(expected_error): + await self.coll.insert_one({}) + await self.coll.insert_one({}) + + def marked_unknown(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.new_description.is_server_type_known + ) + + def discovered_node(event): + return ( + isinstance(event, monitoring.ServerDescriptionChangedEvent) + and event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown_and_rediscovered(): + return ( + len(self.listener.matching(marked_unknown)) >= 1 + and len(self.listener.matching(discovered_node)) >= 1 + ) + + # Topology events are not published synchronously + await async_wait_until(marked_unknown_and_rediscovered, "rediscover node") + + # Expect a single ServerDescriptionChangedEvent for the network error. + marked_unknown_events = self.listener.matching(marked_unknown) + self.assertEqual(len(marked_unknown_events), 1, marked_unknown_events) + self.assertIsInstance(marked_unknown_events[0].new_description.error, expected_error) + + async def test_network_error_publishes_events(self): + await self._test_app_error({"closeConnection": True}, ConnectionFailure) + + # In 4.4+, not primary errors from failCommand don't cause SDAM state + # changes because topologyVersion is not incremented. + @async_client_context.require_version_max(4, 3) + async def test_not_primary_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 10107, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + async def test_shutdown_error_publishes_events(self): + await self._test_app_error( + {"errorCode": 91, "closeConnection": False, "errorLabels": ["RetryableWriteError"]}, + NotPrimaryError, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection.py b/test/asynchronous/test_server_selection.py new file mode 100644 index 0000000000..f570662b85 --- /dev/null +++ b/test/asynchronous/test_server_selection.py @@ -0,0 +1,212 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +from pymongo import AsyncMongoClient, ReadPreference +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.errors import ServerSelectionTimeoutError +from pymongo.hello import HelloCompat +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.typings import strip_optional + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.utils import async_wait_until +from test.asynchronous.utils_selection_tests import ( + create_selection_tests, + get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, + make_server_description, +) +from test.utils_shared import ( + FunctionCallRecorder, + OvertCommandListener, +) + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) + + +class SelectionStoreSelector: + """No-op selector that keeps track of what was passed to it.""" + + def __init__(self): + self.selection = None + + def __call__(self, selection): + self.selection = selection + return selection + + +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore + pass + + +class TestCustomServerSelectorFunction(AsyncIntegrationTest): + @async_client_context.require_replica_set + async def test_functional_select_max_port_number_host(self): + # Selector that returns server with highest port number. + def custom_selector(servers): + ports = [s.address[1] for s in servers] + idx = ports.index(max(ports)) + return [servers[idx]] + + # Initialize client with appropriate listeners. + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) + coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll + self.addAsyncCleanup(client.drop_database, "testdb") + + # Wait the node list to be fully populated. + async def all_hosts_started(): + return len((await client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( + client._topology._description.readable_servers + ) + + await async_wait_until(all_hosts_started, "receive heartbeat from all hosts") + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) + + # Insert 1 record and access it 10 times. + await coll.insert_one({"name": "John Doe"}) + for _ in range(10): + await coll.find_one({"name": "John Doe"}) + + # Confirm all find commands are run against appropriate host. + for command in listener.started_events: + if command.command_name == "find": + self.assertEqual(command.connection_id[1], expected_port) + + async def test_invalid_server_selector(self): + # Client initialization must fail if server_selector is not callable. + for selector_candidate in [[], 10, "string", {}]: + with self.assertRaisesRegex(ValueError, "must be a callable"): + AsyncMongoClient(connect=False, server_selector=selector_candidate) + + # None value for server_selector is OK. + AsyncMongoClient(connect=False, server_selector=None) + + @async_client_context.require_replica_set + async def test_selector_called(self): + selector = FunctionCallRecorder(lambda x: x) + + # Client setup. + mongo_client = await self.async_rs_or_single_client(server_selector=selector) + test_collection = mongo_client.testdb.test_collection + self.addAsyncCleanup(mongo_client.drop_database, "testdb") + + # Do N operations and test selector is called at least N-1 times due to fast path. + await test_collection.insert_one({"age": 20, "name": "John"}) + await test_collection.insert_one({"age": 31, "name": "Jane"}) + await test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) + await test_collection.find_one({"name": "Roe"}) + self.assertGreaterEqual(selector.call_count, 3) + + @async_client_context.require_replica_set + async def test_latency_threshold_application(self): + selector = SelectionStoreSelector() + + scenario_def: dict = { + "topology_description": { + "type": "ReplicaSetWithPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSPrimary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that all but one server is too slow. + rtt_times = [srv["avg_rtt_ms"] for srv in scenario_def["topology_description"]["servers"]] + min_rtt_idx = rtt_times.index(min(rtt_times)) + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no filtering based on latency + # prior to custom server selection logic kicking in. + server = await topology.select_server(ReadPreference.NEAREST, _Op.TEST) + assert selector.selection is not None + self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) + + # Ensure proper filtering based on latency after custom selection. + self.assertEqual(server.description.address, seeds[min_rtt_idx]) + + @async_client_context.require_replica_set + async def test_server_selector_bypassed(self): + selector = FunctionCallRecorder(lambda x: x) + + scenario_def = { + "topology_description": { + "type": "ReplicaSetNoPrimary", + "servers": [ + {"address": "b:27017", "avg_rtt_ms": 10000, "type": "RSSecondary", "tag": {}}, + {"address": "c:27017", "avg_rtt_ms": 20000, "type": "RSSecondary", "tag": {}}, + {"address": "a:27017", "avg_rtt_ms": 30000, "type": "RSSecondary", "tag": {}}, + ], + } + } + + # Create & populate Topology such that no server is writeable. + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + settings = get_topology_settings_dict( + heartbeat_frequency=1, local_threshold_ms=1, seeds=seeds, server_selector=selector + ) + topology = Topology(TopologySettings(**settings)) + await topology.open() + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Invoke server selection and assert no calls to our custom selector. + with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): + await topology.select_server( + writable_server_selector, _Op.TEST, server_selection_timeout=0.1 + ) + self.assertEqual(selector.call_count, 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_in_window.py b/test/asynchronous/test_server_selection_in_window.py new file mode 100644 index 0000000000..dd0ff734f7 --- /dev/null +++ b/test/asynchronous/test_server_selection_in_window.py @@ -0,0 +1,180 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations + +import asyncio +import os +import threading +from pathlib import Path +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.asynchronous.helpers import ConcurrentRunner +from test.asynchronous.utils import flaky +from test.asynchronous.utils_selection_tests import create_topology +from test.asynchronous.utils_spec_runner import AsyncSpecTestCreator +from test.utils_shared import ( + CMAPListener, + OvertCommandListener, + async_wait_until, +) + +from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) + + +class TestAllScenarios(unittest.IsolatedAsyncioTestCase): + async def run_scenario(self, scenario_def): + topology = await create_topology(scenario_def) + + # Update mock operation_count state: + for mock in scenario_def["mocked_topology_state"]: + address = clean_node(mock["address"]) + server = topology.get_server_by_address(address) + server.pool.operation_count = mock["operation_count"] + + pref = ReadPreference.NEAREST + counts = {address: 0 for address in topology._description.server_descriptions()} + + # Number of times to repeat server selection + iterations = scenario_def["iterations"] + for _ in range(iterations): + server = await topology.select_server(pref, _Op.TEST, server_selection_timeout=0) + counts[server.description.address] += 1 + + # Verify expected_frequencies + outcome = scenario_def["outcome"] + tolerance = outcome["tolerance"] + expected_frequencies = outcome["expected_frequencies"] + for host_str, freq in expected_frequencies.items(): + address = clean_node(host_str) + actual_freq = float(counts[address]) / iterations + if freq == 0: + # Should be exactly 0. + self.assertEqual(actual_freq, 0) + else: + # Should be within 'tolerance'. + self.assertAlmostEqual(actual_freq, freq, delta=tolerance) + + +def create_test(scenario_def, test, name): + async def run_scenario(self): + await self.run_scenario(scenario_def) + + return run_scenario + + +class CustomSpecTestCreator(AsyncSpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + Server selection in_window tests do not have a 'tests' field. + The whole file represents a single test case. + """ + return [scenario_def] + + +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() + + +class FinderTask(ConcurrentRunner): + def __init__(self, collection, iterations): + super().__init__() + self.daemon = True + self.collection = collection + self.iterations = iterations + self.passed = False + + async def run(self): + for _ in range(self.iterations): + await self.collection.find_one({}) + self.passed = True + + +class TestProse(AsyncIntegrationTest): + async def frequencies(self, client, listener, n_finds=10): + coll = client.test.test + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + await task.start() + for task in tasks: + await task.join() + for task in tasks: + self.assertTrue(task.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_TASKS) + nodes = client.nodes + self.assertEqual(len(nodes), 2) + freqs = {address: 0.0 for address in nodes} + for event in events: + freqs[event.connection_id] += 1 + for address in freqs: + freqs[address] = freqs[address] / float(len(events)) + return freqs + + @async_client_context.require_failCommand_appName + @async_client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") + async def test_load_balancing(self): + listener = OvertCommandListener() + cmap_listener = CMAPListener() + # PYTHON-2584: Use a large localThresholdMS to avoid the impact of + # varying RTTs. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), + appName="loadBalancingTest", + event_listeners=[listener, cmap_listener], + localThresholdMS=30000, + minPoolSize=10, + ) + await async_wait_until(lambda: len(client.nodes) == 2, "discover both nodes") + # Wait for both pools to be populated. + await cmap_listener.async_wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. + delay_finds = { + "configureFailPoint": "failCommand", + "mode": {"times": 10000}, + "data": { + "failCommands": ["find"], + "blockConnection": True, + "blockTimeMS": 500, + "appName": "loadBalancingTest", + }, + } + async with self.fail_point(delay_finds): + nodes = async_client_context.client.nodes + self.assertEqual(len(nodes), 1) + delayed_server = next(iter(nodes)) + freqs = await self.frequencies(client, listener) + self.assertLessEqual(freqs[delayed_server], 0.25) + listener.reset() + freqs = await self.frequencies(client, listener, n_finds=150) + self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_logging.py b/test/asynchronous/test_server_selection_logging.py new file mode 100644 index 0000000000..6b0975318a --- /dev/null +++ b/test/asynchronous/test_server_selection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") + + +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_server_selection_rtt.py b/test/asynchronous/test_server_selection_rtt.py new file mode 100644 index 0000000000..1f8f6bc7df --- /dev/null +++ b/test/asynchronous/test_server_selection_rtt.py @@ -0,0 +1,77 @@ +# Copyright 2015 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the topology module.""" +from __future__ import annotations + +import json +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous import AsyncPyMongoTestCase + +from pymongo.read_preferences import MovingAverage + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") + + +class TestAllScenarios(AsyncPyMongoTestCase): + pass + + +def create_test(scenario_def): + def run_scenario(self): + moving_average = MovingAverage() + + if scenario_def["avg_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["avg_rtt_ms"]) + + if scenario_def["new_rtt_ms"] != "NULL": + moving_average.add_sample(scenario_def["new_rtt_ms"]) + + self.assertAlmostEqual(moving_average.get(), scenario_def["new_avg_rtt"]) + + return run_scenario + + +def create_tests(): + for dirpath, _, filenames in os.walk(TEST_PATH): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json.load(scenario_stream) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + +create_tests() + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_session.py b/test/asynchronous/test_session.py new file mode 100644 index 0000000000..ff0feebafc --- /dev/null +++ b/test/asynchronous/test_session.py @@ -0,0 +1,1251 @@ +# Copyright 2017 MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the client_session module.""" +from __future__ import annotations + +import asyncio +import copy +import sys +import time +from inspect import iscoroutinefunction +from io import BytesIO +from test.asynchronous.helpers import ExceptionCatchingTask +from typing import Any, Callable, List, Set, Tuple + +from pymongo.synchronous.mongo_client import MongoClient + +sys.path[0:0] = [""] + +from test.asynchronous import ( + AsyncIntegrationTest, + AsyncUnitTest, + SkipTest, + async_client_context, + unittest, +) +from test.asynchronous.helpers import client_knobs +from test.utils_shared import ( + EventListener, + HeartbeatEventListener, + OvertCommandListener, + async_wait_until, +) + +from bson import DBRef +from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket +from pymongo import ASCENDING, AsyncMongoClient, _csot, monitoring +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.common import _MAX_END_SESSIONS +from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure +from pymongo.operations import IndexModel, InsertOne, UpdateOne +from pymongo.read_concern import ReadConcern + +_IS_SYNC = False + + +# Ignore auth commands like saslStart, so we can assert lsid is in all commands. +class SessionTestListener(EventListener): + def started(self, event): + if not event.command_name.startswith("sasl"): + super().started(event) + + def succeeded(self, event): + if not event.command_name.startswith("sasl"): + super().succeeded(event) + + def failed(self, event): + if not event.command_name.startswith("sasl"): + super().failed(event) + + def first_command_started(self): + assert len(self.started_events) >= 1, "No command-started events" + + return self.started_events[0] + + +def session_ids(client): + return [s.session_id for s in copy.copy(client._topology._session_pool)] + + +class TestSession(AsyncIntegrationTest): + client2: AsyncMongoClient + sensitive_commands: Set[str] + + @async_client_context.require_sessions + async def asyncSetUp(self): + await super().asyncSetUp() + # Create a second client so we can make sure clients cannot share + # sessions. + self.client2 = await self.async_rs_or_single_client() + + # Redact no commands, so we can test user-admin commands have "lsid". + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + monitoring._SENSITIVE_COMMANDS.clear() + + self.listener = SessionTestListener() + self.session_checker_listener = SessionTestListener() + self.client = await self.async_rs_or_single_client( + event_listeners=[self.listener, self.session_checker_listener] + ) + self.db = self.client.pymongo_test + self.initial_lsids = {s["id"] for s in session_ids(self.client)} + + async def asyncTearDown(self): + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) + await self.client.drop_database("pymongo_test") + used_lsids = self.initial_lsids.copy() + for event in self.session_checker_listener.started_events: + if "lsid" in event.command: + used_lsids.add(event.command["lsid"]["id"]) + + current_lsids = {s["id"] for s in session_ids(self.client)} + self.assertLessEqual(used_lsids, current_lsids) + + await super().asyncTearDown() + + async def _test_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + async with client.start_session() as s: + listener.reset() + s._materialize() + last_use = s._server_session.last_use + start = time.monotonic() + self.assertLessEqual(last_use, start) + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{f.__name__} sent wrong lsid with {event.command_name}", + ) + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f(*args, **kw) + + # Test a session cannot be used on another client. + async with self.client2.start_session() as s: + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaisesRegex( + InvalidOperation, + "Can only use session with the AsyncMongoClient that started it", + ): + await f(*args, **kw) + + # No explicit session. + for f, args, kw in ops: + listener.reset() + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + lsids = [] + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + lsids.append(event.command["lsid"]) + + if not (sys.platform.startswith("java") or "PyPy" in sys.version): + # Server session was returned to pool. Ignore interpreters with + # non-deterministic GC. + for lsid in lsids: + self.assertIn( + lsid, + session_ids(client), + f"{f.__name__} did not return implicit session to pool", + ) + + async def test_implicit_sessions_checkout(self): + # "To confirm that implicit sessions only allocate their server session after a + # successful connection checkout" test from Driver Sessions Spec. + succeeded = False + lsid_set = set() + listener = OvertCommandListener() + client = await self.async_rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race condition that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): + cursor = client.db.test.find({}) + ops: List[Tuple[Callable, List[Any]]] = [ + (client.db.test.find_one, [{"_id": 1}]), + (client.db.test.delete_one, [{}]), + (client.db.test.update_one, [{}, {"$set": {"x": 2}}]), + (client.db.test.bulk_write, [[UpdateOne({}, {"$set": {"x": 2}})]]), + (client.db.test.find_one_and_delete, [{}]), + (client.db.test.find_one_and_update, [{}, {"$set": {"x": 1}}]), + (client.db.test.find_one_and_replace, [{}, {}]), + (client.db.test.aggregate, [[{"$limit": 1}]]), + (client.db.test.find, []), + (client.server_info, []), + (client.db.aggregate, [[{"$listLocalSessions": {}}, {"$limit": 1}]]), + (cursor.distinct, ["_id"]), + (client.db.list_collections, []), + ] + tasks = [] + listener.reset() + + async def target(op, *args): + if iscoroutinefunction(op): + res = await op(*args) + else: + res = op(*args) + if isinstance(res, (AsyncCursor, AsyncCommandCursor)): + await res.to_list() + + for op, args in ops: + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) + ) + await tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + await t.join() + self.assertIsNone(t.exc) + lsid_set.clear() + for i in listener.started_events: + if i.command.get("lsid"): + lsid_set.add(i.command.get("lsid")["id"]) + if len(lsid_set) == 1: + # Break on first success. + succeeded = True + break + self.assertTrue(succeeded, lsid_set) + + async def test_pool_lifo(self): + # "Pool is LIFO" test from Driver Sessions Spec. + a = self.client.start_session() + b = self.client.start_session() + a_id = a.session_id + b_id = b.session_id + await a.end_session() + await b.end_session() + + s = self.client.start_session() + self.assertEqual(b_id, s.session_id) + self.assertNotEqual(a_id, s.session_id) + + s2 = self.client.start_session() + self.assertEqual(a_id, s2.session_id) + self.assertNotEqual(b_id, s2.session_id) + + await s.end_session() + await s2.end_session() + + async def test_end_session(self): + # We test elsewhere that using an ended session throws InvalidOperation. + client = self.client + s = client.start_session() + self.assertFalse(s.has_ended) + self.assertIsNotNone(s.session_id) + + await s.end_session() + self.assertTrue(s.has_ended) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + s.session_id + + async def test_end_sessions(self): + # Use a new client so that the tearDown hook does not error. + listener = SessionTestListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + # Start many sessions. + sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] + for s in sessions: + s._materialize() + for s in sessions: + await s.end_session() + + # Closing the client should end all sessions and clear the pool. + self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) + await client.close() + self.assertEqual(len(client._topology._session_pool), 0) + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] + self.assertEqual(len(end_sessions), 2) + + # Closing again should not send any commands. + listener.reset() + await client.close() + self.assertEqual(len(listener.started_events), 0) + + async def test_client(self): + client = self.client + ops: list = [ + (client.server_info, [], {}), + (client.list_database_names, [], {}), + (client.drop_database, ["pymongo_test"], {}), + ] + + await self._test_ops(client, *ops) + + async def test_database(self): + client = self.client + db = client.pymongo_test + ops: list = [ + (db.command, ["ping"], {}), + (db.create_collection, ["collection"], {}), + (db.list_collection_names, [], {}), + (db.validate_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + (db.dereference, [DBRef("collection", 1)], {}), + ] + await self._test_ops(client, *ops) + + @staticmethod + def collection_write_ops(coll): + """Generate database write ops for tests.""" + return [ + (coll.drop, [], {}), + (coll.bulk_write, [[InsertOne({})]], {}), + (coll.insert_one, [{}], {}), + (coll.insert_many, [[{}, {}]], {}), + (coll.replace_one, [{}, {}], {}), + (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.update_many, [{}, {"$set": {"a": 1}}], {}), + (coll.delete_one, [{}], {}), + (coll.delete_many, [{}], {}), + (coll.find_one_and_replace, [{}, {}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), + (coll.find_one_and_delete, [{}, {}], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + + async def test_collection(self): + client = self.client + coll = client.pymongo_test.collection + + # Test some collection methods - the rest are in test_cursor. + ops = self.collection_write_ops(coll) + ops.extend( + [ + (coll.distinct, ["a"], {}), + (coll.find_one, [], {}), + (coll.count_documents, [{}], {}), + (coll.list_indexes, [], {}), + (coll.index_information, [], {}), + (coll.options, [], {}), + (coll.aggregate, [[]], {}), + ] + ) + + await self._test_ops(client, *ops) + + async def test_cursor_clone(self): + coll = self.client.pymongo_test.collection + # Ensure some batches. + await coll.insert_many({} for _ in range(10)) + self.addAsyncCleanup(coll.drop) + + async with self.client.start_session() as s: + cursor = coll.find(session=s) + self.assertIs(cursor.session, s) + clone = cursor.clone() + self.assertIs(clone.session, s) + + # No explicit session. + cursor = coll.find(batch_size=2) + await anext(cursor) + # Session is "owned" by cursor. + self.assertIsNone(cursor.session) + self.assertIsNotNone(cursor._session) + clone = cursor.clone() + await anext(clone) + self.assertIsNone(clone.session) + self.assertIsNotNone(clone._session) + self.assertIsNot(cursor._session, clone._session) + await cursor.close() + await clone.close() + + async def test_cursor(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + await coll.insert_many([{} for _ in range(1000)]) + + # Test all cursor methods. + if _IS_SYNC: + # getitem is only supported in the synchronous API + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + else: + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + + for name, f in ops: + async with client.start_session() as s: + listener.reset() + await f(session=s) + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{name} sent no lsid with {event.command_name}", + ) + + self.assertEqual( + s.session_id, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f(session=s) + + # No explicit session. + for name, f in ops: + listener.reset() + await f(session=None) + event0 = listener.first_command_started() + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") + + lsid = event0.command["lsid"] + + for event in listener.started_events[1:]: + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" + ) + + self.assertEqual( + lsid, + event.command["lsid"], + f"{name} sent wrong lsid with {event.command_name}", + ) + + async def test_gridfs(self): + client = self.client + fs = AsyncGridFS(client.pymongo_test) + + async def new_file(session=None): + grid_file = fs.new_file(_id=1, filename="f", session=session) + # 1 MB, 5 chunks, to test that each chunk is fetched with same lsid. + await grid_file.write(b"a" * 1048576) + await grid_file.close() + + async def find(session=None): + files = await fs.find({"_id": 1}, session=session).to_list() + for f in files: + await f.read() + + async def get(session=None): + await (await fs.get(1, session=session)).read() + + async def get_version(session=None): + await (await fs.get_version("f", session=session)).read() + + async def get_last_version(session=None): + await (await fs.get_last_version("f", session=session)).read() + + async def find_list(session=None): + await fs.find(session=session).to_list() + + await self._test_ops( + client, + (new_file, [], {}), + (fs.put, [b"data"], {}), + (get, [], {}), + (get_version, [], {}), + (get_last_version, [], {}), + (fs.list, [], {}), + (fs.find_one, [1], {}), + (find_list, [], {}), + (fs.exists, [1], {}), + (find, [], {}), + (fs.delete, [1], {}), + ) + + async def test_gridfs_bucket(self): + client = self.client + bucket = AsyncGridFSBucket(client.pymongo_test) + + async def upload(session=None): + stream = bucket.open_upload_stream("f", session=session) + await stream.write(b"a" * 1048576) + await stream.close() + + async def upload_with_id(session=None): + stream = bucket.open_upload_stream_with_id(1, "f1", session=session) + await stream.write(b"a" * 1048576) + await stream.close() + + async def open_download_stream(session=None): + stream = await bucket.open_download_stream(1, session=session) + await stream.read() + + async def open_download_stream_by_name(session=None): + stream = await bucket.open_download_stream_by_name("f", session=session) + await stream.read() + + async def find(session=None): + files = await bucket.find({"_id": 1}, session=session).to_list() + for f in files: + await f.read() + + sio = BytesIO() + + await self._test_ops( + client, + (upload, [], {}), + (upload_with_id, [], {}), + (bucket.upload_from_stream, ["f", b"data"], {}), + (bucket.upload_from_stream_with_id, [2, "f", b"data"], {}), + (open_download_stream, [], {}), + (open_download_stream_by_name, [], {}), + (bucket.download_to_stream, [1, sio], {}), + (bucket.download_to_stream_by_name, ["f", sio], {}), + (find, [], {}), + (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), + # Delete both files so _test_ops can run these operations twice. + (bucket.delete, [1], {}), + (bucket.delete_by_name, ["f"], {}), + ) + + async def test_gridfsbucket_cursor(self): + client = self.client + bucket = AsyncGridFSBucket(client.pymongo_test) + + for file_id in 1, 2: + stream = bucket.open_upload_stream_with_id(file_id, str(file_id)) + await stream.write(b"a" * 1048576) + await stream.close() + + async with client.start_session() as s: + cursor = bucket.find(session=s) + async for f in cursor: + await f.read() + + self.assertFalse(s.has_ended) + + self.assertTrue(s.has_ended) + + # No explicit session. + cursor = bucket.find(batch_size=1) + files = [await cursor.next()] + + s = cursor._session + self.assertFalse(s.has_ended) + cursor.__del__() + + self.assertTrue(s.has_ended) + self.assertIsNone(cursor._session) + + # Files are still valid, they use their own sessions. + for f in files: + await f.read() + + # Explicit session. + async with client.start_session() as s: + cursor = bucket.find(session=s) + assert cursor.session is not None + s = cursor.session + files = await cursor.to_list() + cursor.__del__() + self.assertFalse(s.has_ended) + + for f in files: + await f.read() + + for f in files: + # Attempt to read the file again. + await f.seek(0) + with self.assertRaisesRegex(InvalidOperation, "ended session"): + await f.read() + + async def test_aggregate(self): + client = self.client + coll = client.pymongo_test.collection + + async def agg(session=None): + await (await coll.aggregate([], batchSize=2, session=session)).to_list() + + # With empty collection. + await self._test_ops(client, (agg, [], {})) + + # Now with documents. + await coll.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(coll.drop) + await self._test_ops(client, (agg, [], {})) + + async def test_killcursors(self): + client = self.client + coll = client.pymongo_test.collection + await coll.insert_many([{} for _ in range(10)]) + + async def explicit_close(session=None): + cursor = coll.find(batch_size=2, session=session) + await anext(cursor) + await cursor.close() + + await self._test_ops(client, (explicit_close, [], {})) + + async def test_aggregate_error(self): + listener = self.listener + client = self.client + coll = client.pymongo_test.collection + # 3.6.0 mongos only validates the aggregate pipeline when the + # database exists. + await coll.insert_one({}) + listener.reset() + + with self.assertRaises(OperationFailure): + await coll.aggregate([{"$badOperation": {"bar": 1}}]) + + event = listener.first_command_started() + self.assertEqual(event.command_name, "aggregate") + lsid = event.command["lsid"] + # Session was returned to pool despite error. + self.assertIn(lsid, session_ids(client)) + + async def _test_cursor_helper(self, create_cursor, close_cursor): + coll = self.client.pymongo_test.collection + await coll.insert_many([{} for _ in range(1000)]) + + cursor = await create_cursor(coll, None) + await anext(cursor) + # Session is "owned" by cursor. + session = cursor._session + self.assertIsNotNone(session) + lsid = session.session_id + await anext(cursor) + + # Cursor owns its session unto death. + self.assertNotIn(lsid, session_ids(self.client)) + await close_cursor(cursor) + self.assertIn(lsid, session_ids(self.client)) + + # An explicit session is not ended by cursor.close() or list(cursor). + async with self.client.start_session() as s: + cursor = await create_cursor(coll, s) + await anext(cursor) + await close_cursor(cursor) + self.assertFalse(s.has_ended) + lsid = s.session_id + + self.assertTrue(s.has_ended) + self.assertIn(lsid, session_ids(self.client)) + + async def test_cursor_close(self): + async def find(coll, session): + return coll.find(session=session) + + await self._test_cursor_helper(find, lambda cursor: cursor.close()) + + async def test_command_cursor_close(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + await self._test_cursor_helper(aggregate, lambda cursor: cursor.close()) + + async def test_cursor_del(self): + async def find(coll, session): + return coll.find(session=session) + + async def delete(cursor): + return cursor.__del__() + + await self._test_cursor_helper(find, delete) + + async def test_command_cursor_del(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + async def delete(cursor): + return cursor.__del__() + + await self._test_cursor_helper(aggregate, delete) + + async def test_cursor_exhaust(self): + async def find(coll, session): + return coll.find(session=session) + + await self._test_cursor_helper(find, lambda cursor: cursor.to_list()) + + async def test_command_cursor_exhaust(self): + async def aggregate(coll, session): + return await coll.aggregate([], session=session) + + await self._test_cursor_helper(aggregate, lambda cursor: cursor.to_list()) + + async def test_cursor_limit_reached(self): + async def find(coll, session): + return coll.find(limit=4, batch_size=2, session=session) + + await self._test_cursor_helper( + find, + lambda cursor: cursor.to_list(), + ) + + async def test_command_cursor_limit_reached(self): + async def aggregate(coll, session): + return await coll.aggregate([], batchSize=900, session=session) + + await self._test_cursor_helper( + aggregate, + lambda cursor: cursor.to_list(), + ) + + async def _test_unacknowledged_ops(self, client, *ops): + listener = client.options.event_listeners[0] + + for f, args, kw in ops: + async with client.start_session() as s: + listener.reset() + # In case "f" modifies its inputs. + args = copy.copy(args) + kw = copy.copy(kw) + kw["session"] = s + with self.assertRaises( + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" + ): + await f(*args, **kw) + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + # Should not run any command before raising an error. + self.assertFalse(listener.started_events, f"{f.__name__} sent command") + + self.assertTrue(s.has_ended) + + # Unacknowledged write without a session does not send an lsid. + for f, args, kw in ops: + listener.reset() + await f(*args, **kw) + self.assertGreaterEqual(len(listener.started_events), 1) + + if f.__name__ == "create_collection": + # create_collection runs listCollections first. + event = listener.started_events.pop(0) + self.assertEqual("listCollections", event.command_name) + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", + ) + + for event in listener.started_events: + self.assertNotIn( + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" + ) + + async def test_unacknowledged_writes(self): + # Ensure the collection exists. + await self.client.pymongo_test.test_unacked_writes.insert_one({}) + client = await self.async_rs_or_single_client(w=0, event_listeners=[self.listener]) + db = client.pymongo_test + coll = db.test_unacked_writes + ops: list = [ + (client.drop_database, [db.name], {}), + (db.create_collection, ["collection"], {}), + (db.drop_collection, ["collection"], {}), + ] + ops.extend(self.collection_write_ops(coll)) + await self._test_unacknowledged_ops(client, *ops) + + async def drop_db(): + try: + await self.client.drop_database(db.name) + return True + except OperationFailure as exc: + # Try again on BackgroundOperationInProgressForDatabase and + # BackgroundOperationInProgressForNamespace. + if exc.code in (12586, 12587): + return False + raise + + await async_wait_until(drop_db, "dropped database after w=0 writes") + + async def test_snapshot_incompatible_with_causal_consistency(self): + async with self.client.start_session(causal_consistency=False, snapshot=False): + pass + async with self.client.start_session(causal_consistency=False, snapshot=True): + pass + async with self.client.start_session(causal_consistency=True, snapshot=False): + pass + with self.assertRaises(ConfigurationError): + async with self.client.start_session(causal_consistency=True, snapshot=True): + pass + + async def test_session_not_copyable(self): + client = self.client + async with client.start_session() as s: + self.assertRaises(TypeError, lambda: copy.copy(s)) + + +class TestCausalConsistency(AsyncUnitTest): + listener: SessionTestListener + client: AsyncMongoClient + + @async_client_context.require_sessions + async def asyncSetUp(self): + await super().asyncSetUp() + self.listener = SessionTestListener() + self.client = await self.async_rs_or_single_client(event_listeners=[self.listener]) + + @async_client_context.require_no_standalone + async def test_core(self): + async with self.client.start_session() as sess: + self.assertIsNone(sess.cluster_time) + self.assertIsNone(sess.operation_time) + self.listener.reset() + await self.client.pymongo_test.test.find_one(session=sess) + started = self.listener.started_events[0] + cmd = started.command + self.assertIsNone(cmd.get("readConcern")) + op_time = sess.operation_time + self.assertIsNotNone(op_time) + succeeded = self.listener.succeeded_events[0] + reply = succeeded.reply + self.assertEqual(op_time, reply.get("operationTime")) + + # No explicit session + await self.client.pymongo_test.test.insert_one({}) + self.assertEqual(sess.operation_time, op_time) + self.listener.reset() + try: + await self.client.pymongo_test.command("doesntexist", session=sess) + except: + pass + failed = self.listener.failed_events[0] + failed_op_time = failed.failure.get("operationTime") + # Some older builds of MongoDB 3.5 / 3.6 return None for + # operationTime when a command fails. Make sure we don't + # change operation_time to None. + if failed_op_time is None: + self.assertIsNotNone(sess.operation_time) + else: + self.assertEqual(sess.operation_time, failed_op_time) + + async with self.client.start_session() as sess2: + self.assertIsNone(sess2.cluster_time) + self.assertIsNone(sess2.operation_time) + self.assertRaises(TypeError, sess2.advance_cluster_time, 1) + self.assertRaises(ValueError, sess2.advance_cluster_time, {}) + self.assertRaises(TypeError, sess2.advance_operation_time, 1) + # No error + assert sess.cluster_time is not None + assert sess.operation_time is not None + sess2.advance_cluster_time(sess.cluster_time) + sess2.advance_operation_time(sess.operation_time) + self.assertEqual(sess.cluster_time, sess2.cluster_time) + self.assertEqual(sess.operation_time, sess2.operation_time) + + async def _test_reads(self, op, exception=None): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + if exception: + with self.assertRaises(exception): + await op(coll, sess) + else: + await op(coll, sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @async_client_context.require_no_standalone + async def test_reads(self): + # Make sure the collection exists. + await self.client.pymongo_test.test.insert_one({}) + + async def aggregate(coll, session): + return await (await coll.aggregate([], session=session)).to_list() + + async def aggregate_raw(coll, session): + return await (await coll.aggregate_raw_batches([], session=session)).to_list() + + async def find_raw(coll, session): + return await coll.find_raw_batches({}, session=session).to_list() + + await self._test_reads(aggregate) + await self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) + await self._test_reads(lambda coll, session: coll.find_one({}, session=session)) + await self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) + await self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) + await self._test_reads(aggregate_raw) + await self._test_reads(find_raw) + + with self.assertRaises(ConfigurationError): + await self._test_reads( + lambda coll, session: coll.estimated_document_count(session=session) + ) + + async def _test_writes(self, op): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await op(coll, sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + await coll.find_one({}, session=sess) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertEqual(operation_time, act) + + @async_client_context.require_no_standalone + async def test_writes(self): + await self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + await self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) + await self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) + await self._test_writes( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + await self._test_writes(lambda coll, session: coll.delete_one({}, session=session)) + await self._test_writes(lambda coll, session: coll.delete_many({}, session=session)) + await self._test_writes( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + await self._test_writes( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + await self._test_writes( + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + await self._test_writes(lambda coll, session: coll.create_index("foo", session=session)) + await self._test_writes( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + await self._test_writes(lambda coll, session: coll.drop_index("foo_1", session=session)) + await self._test_writes(lambda coll, session: coll.drop_indexes(session=session)) + + async def _test_no_read_concern(self, op): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + self.listener.reset() + await op(coll, sess) + rc = self.listener.started_events[0].command.get("readConcern") + self.assertIsNone(rc) + + @async_client_context.require_no_standalone + async def test_writes_do_not_include_read_concern(self): + await self._test_no_read_concern( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) + await self._test_no_read_concern( + lambda coll, session: coll.insert_many([{}], session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.replace_one({"_id": 1}, {"x": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.update_one({}, {"$set": {"X": 1}}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.update_many({}, {"$set": {"x": 1}}, session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.delete_one({}, session=session)) + await self._test_no_read_concern( + lambda coll, session: coll.delete_many({}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_replace({"x": 1}, {"y": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_update( + {"y": 1}, {"$set": {"x": 1}}, session=session + ) + ) + await self._test_no_read_concern( + lambda coll, session: coll.find_one_and_delete({"x": 1}, session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.create_index("foo", session=session) + ) + await self._test_no_read_concern( + lambda coll, session: coll.create_indexes( + [IndexModel([("bar", ASCENDING)])], session=session + ) + ) + await self._test_no_read_concern( + lambda coll, session: coll.drop_index("foo_1", session=session) + ) + await self._test_no_read_concern(lambda coll, session: coll.drop_indexes(session=session)) + + # Not a write, but explain also doesn't support readConcern. + await self._test_no_read_concern( + lambda coll, session: coll.find({}, session=session).explain() + ) + + @async_client_context.require_no_standalone + async def test_get_more_does_not_include_read_concern(self): + coll = self.client.pymongo_test.test + async with self.client.start_session() as sess: + await coll.find_one({}, session=sess) + operation_time = sess.operation_time + self.assertIsNotNone(operation_time) + await coll.insert_many([{}, {}]) + cursor = coll.find({}).batch_size(1) + await anext(cursor) + self.listener.reset() + await cursor.to_list() + started = self.listener.started_events[0] + self.assertEqual(started.command_name, "getMore") + self.assertIsNone(started.command.get("readConcern")) + + async def test_session_not_causal(self): + async with self.client.start_session(causal_consistency=False) as s: + await self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @async_client_context.require_standalone + async def test_server_not_causal(self): + async with self.client.start_session(causal_consistency=True) as s: + await self.client.pymongo_test.test.insert_one({}, session=s) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}, session=s) + act = ( + self.listener.started_events[0] + .command.get("readConcern", {}) + .get("afterClusterTime") + ) + self.assertIsNone(act) + + @async_client_context.require_no_standalone + async def test_read_concern(self): + async with self.client.start_session(causal_consistency=True) as s: + coll = self.client.pymongo_test.test + await coll.insert_one({}, session=s) + self.listener.reset() + await coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertIsNone(read_concern.get("level")) + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + coll = coll.with_options(read_concern=ReadConcern("majority")) + self.listener.reset() + await coll.find_one({}, session=s) + read_concern = self.listener.started_events[0].command.get("readConcern") + self.assertIsNotNone(read_concern) + self.assertEqual(read_concern.get("level"), "majority") + self.assertIsNotNone(read_concern.get("afterClusterTime")) + + @async_client_context.require_no_standalone + async def test_cluster_time_with_server_support(self): + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNotNone(after_cluster_time) + + @async_client_context.require_standalone + async def test_cluster_time_no_server_support(self): + await self.client.pymongo_test.test.insert_one({}) + self.listener.reset() + await self.client.pymongo_test.test.find_one({}) + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") + self.assertIsNone(after_cluster_time) + + +class TestClusterTime(AsyncIntegrationTest): + async def asyncSetUp(self): + await super().asyncSetUp() + if "$clusterTime" not in (await async_client_context.hello): + raise SkipTest("$clusterTime not supported") + + # Sessions prose test: 3) $clusterTime in commands + async def test_cluster_time(self): + listener = SessionTestListener() + client = await self.async_rs_or_single_client(event_listeners=[listener]) + collection = client.pymongo_test.collection + # Prepare for tests of find() and aggregate(). + await collection.insert_many([{} for _ in range(10)]) + self.addAsyncCleanup(collection.drop) + self.addAsyncCleanup(client.pymongo_test.collection2.drop) + + async def rename_and_drop(): + # Ensure collection exists. + await collection.insert_one({}) + await collection.rename("collection2") + await client.pymongo_test.collection2.drop() + + async def insert_and_find(): + cursor = collection.find().batch_size(1) + for _ in range(10): + # Advance the cluster time. + await collection.insert_one({}) + await anext(cursor) + + await cursor.close() + + async def insert_and_aggregate(): + cursor = (await collection.aggregate([], batchSize=1)).batch_size(1) + for _ in range(5): + # Advance the cluster time. + await collection.insert_one({}) + await anext(cursor) + + await cursor.close() + + async def aggregate(): + await (await collection.aggregate([])).to_list() + + ops = [ + # Tests from Driver Sessions Spec. + ("ping", lambda: client.admin.command("ping")), + ("aggregate", lambda: aggregate()), + ("find", lambda: collection.find().to_list()), + ("insert_one", lambda: collection.insert_one({})), + # Additional PyMongo tests. + ("insert_and_find", insert_and_find), + ("insert_and_aggregate", insert_and_aggregate), + ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), + ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), + ("delete_one", lambda: collection.delete_one({})), + ("delete_many", lambda: collection.delete_many({})), + ("bulk_write", lambda: collection.bulk_write([InsertOne({})])), + ("rename_and_drop", rename_and_drop), + ] + + for _name, f in ops: + listener.reset() + # Call f() twice, insert to advance clusterTime, call f() again. + await f() + await f() + await collection.insert_one({}) + await f() + + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): + self.assertIn( + "$clusterTime", + event.command, + f"{f.__name__} sent no $clusterTime with {event.command_name}", + ) + + if i > 0: + succeeded = listener.succeeded_events[i - 1] + self.assertIn( + "$clusterTime", + succeeded.reply, + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", + ) + + self.assertTrue( + event.command["$clusterTime"]["clusterTime"] + >= succeeded.reply["$clusterTime"]["clusterTime"], + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", + ) + + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + async def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = await self.async_single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (await c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + await self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + async def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + await async_wait_until( + next_heartbeat, "never found pair of heartbeat started + succeeded events" + ) + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + await c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_sessions_unified.py b/test/asynchronous/test_sessions_unified.py new file mode 100644 index 0000000000..b4cbac5704 --- /dev/null +++ b/test/asynchronous/test_sessions_unified.py @@ -0,0 +1,40 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Sessions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_srv_polling.py b/test/asynchronous/test_srv_polling.py new file mode 100644 index 0000000000..3d4aed1bc1 --- /dev/null +++ b/test/asynchronous/test_srv_polling.py @@ -0,0 +1,387 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the SRV support tests.""" +from __future__ import annotations + +import asyncio +import sys +import time +from test.asynchronous.utils import flaky +from test.utils_shared import FunctionCallRecorder +from typing import Any + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, client_knobs, unittest +from test.asynchronous.utils import async_wait_until + +import pymongo +from pymongo import common +from pymongo.asynchronous.srv_resolver import _have_dnspython +from pymongo.errors import ConfigurationError + +_IS_SYNC = False + +WAIT_TIME = 0.1 + + +class SrvPollingKnobs: + def __init__( + self, + ttl_time=None, + min_srv_rescan_interval=None, + nodelist_callback=None, + count_resolver_calls=False, + ): + self.ttl_time = ttl_time + self.min_srv_rescan_interval = min_srv_rescan_interval + self.nodelist_callback = nodelist_callback + self.count_resolver_calls = count_resolver_calls + + self.old_min_srv_rescan_interval = None + self.old_dns_resolver_response = None + + def enable(self): + self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL + self.old_dns_resolver_response = ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) + + if self.min_srv_rescan_interval is not None: + common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval + + async def mock_get_hosts_and_min_ttl(resolver, *args): + assert self.old_dns_resolver_response is not None + nodes, ttl = await self.old_dns_resolver_response(resolver) + if self.nodelist_callback is not None: + nodes = self.nodelist_callback() + if self.ttl_time is not None: + ttl = self.ttl_time + return nodes, ttl + + patch_func: Any + if self.count_resolver_calls: + patch_func = FunctionCallRecorder(mock_get_hosts_and_min_ttl) + else: + patch_func = mock_get_hosts_and_min_ttl + + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + + def __enter__(self): + self.enable() + + def disable(self): + common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response + ) + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + +class TestSrvPolling(AsyncPyMongoTestCase): + BASE_SRV_RESPONSE = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27018), + ] + + CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" + + async def asyncSetUp(self): + # Patch timeouts to ensure short rescan SRV interval. + self.client_knobs = client_knobs( + heartbeat_frequency=WAIT_TIME, + min_heartbeat_interval=WAIT_TIME, + events_queue_frequency=WAIT_TIME, + ) + self.client_knobs.enable() + + async def asyncTearDown(self): + self.client_knobs.disable() + + def get_nodelist(self, client): + return client._topology.description.server_descriptions().keys() + + async def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology eventually sees all nodes in the + expected_nodelist. + """ + + def predicate(): + nodelist = self.get_nodelist(client) + if set(expected_nodelist) == set(nodelist): + return True + return False + + await async_wait_until(predicate, "see expected nodelist", timeout=timeout) + + async def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): + """Check if the client._topology ever deviates from seeing all nodes + in the expected_nodelist. Consistency is checked after sleeping for + (WAIT_TIME * 10) seconds. Also check that the resolver is called at + least once. + """ + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return ( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) + return False + + await async_wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) + + nodelist = self.get_nodelist(client) + if set(expected_nodelist) != set(nodelist): + msg = "Client nodelist %s changed unexpectedly (expected %s)" + raise self.fail(msg % (nodelist, expected_nodelist)) + self.assertGreaterEqual( + pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + 1, + "resolver was never called", + ) + return True + + async def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) + if callable(dns_response): + dns_resolver_response = dns_response + else: + + def dns_resolver_response(): + return dns_response + + if expect_change: + assertion_method = self.assert_nodelist_change + count_resolver_calls = False + expected_response = dns_response + else: + assertion_method = self.assert_nodelist_nochange + count_resolver_calls = True + expected_response = self.BASE_SRV_RESPONSE + + # Patch timeouts to ensure short test running times. + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + await self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) + # Patch list of hosts returned by DNS query. + with SrvPollingKnobs( + nodelist_callback=dns_resolver_response, count_resolver_calls=count_resolver_calls + ): + await assertion_method(expected_response, client) + + # Close the client early to avoid affecting the next scenario run. + await client.close() + + async def test_addition(self): + response = self.BASE_SRV_RESPONSE[:] + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_removal(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + await self.run_scenario(response, True) + + async def test_replace_one(self): + response = self.BASE_SRV_RESPONSE[:] + response.remove(("localhost.test.build.10gen.cc", 27018)) + response.append(("localhost.test.build.10gen.cc", 27019)) + await self.run_scenario(response, True) + + async def test_replace_both_with_one(self): + response = [("localhost.test.build.10gen.cc", 27019)] + await self.run_scenario(response, True) + + async def test_replace_both_with_two(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + await self.run_scenario(response, True) + + async def test_dns_failures(self): + from dns import exception + + for exc in (exception.FormError, exception.TooBig, exception.Timeout): + + def response_callback(*args): + raise exc("DNS Failure!") + + await self.run_scenario(response_callback, False) + + @flaky(reason="PYTHON-5500", max_runs=3) + async def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + await self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + + async def test_dns_record_lookup_empty(self): + response: list = [] + await self.run_scenario(response, False) + + async def _test_recover_from_initial(self, initial_callback): + # Construct a valid final response callback distinct from base. + response_final = self.BASE_SRV_RESPONSE[:] + response_final.pop() + + def final_callback(): + return response_final + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=initial_callback, + count_resolver_calls=True, + ): + # Client uses unpatched method to get initial nodelist + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + # Invalid DNS resolver response should not change nodelist. + await self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME, nodelist_callback=final_callback + ): + # Nodelist should reflect new valid DNS resolver response. + await self.assert_nodelist_change(response_final, client) + + @flaky(reason="PYTHON-5315") + async def test_recover_from_initially_empty_seedlist(self): + def empty_seedlist(): + return [] + + await self._test_recover_from_initial(empty_seedlist) + + @flaky(reason="PYTHON-5315") + async def test_recover_from_initially_erroring_seedlist(self): + def erroring_seedlist(): + raise ConfigurationError + + await self._test_recover_from_initial(erroring_seedlist) + + async def test_10_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27017), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_11_all_dns_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_12_new_dns_randomly_selected(self): + response = [ + ("localhost.test.build.10gen.cc", 27020), + ("localhost.test.build.10gen.cc", 27019), + ("localhost.test.build.10gen.cc", 27017), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await asyncio.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + final_topology = set(client.topology_description.server_descriptions()) + self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) + self.assertEqual(len(final_topology), 2) + + async def test_does_not_flipflop(self): + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + await client.aconnect() + old = set(client.topology_description.server_descriptions()) + await asyncio.sleep(4 * WAIT_TIME) + new = set(client.topology_description.server_descriptions()) + self.assertSetEqual(old, new) + + async def test_srv_service_name(self): + # Construct a valid final response callback distinct from base. + response = [ + ("localhost.test.build.10gen.cc.", 27019), + ("localhost.test.build.10gen.cc.", 27020), + ] + + def nodelist_callback(): + return response + + with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): + client = self.simple_client( + "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" + ) + await client.aconnect() + with SrvPollingKnobs(nodelist_callback=nodelist_callback): + await self.assert_nodelist_change(response, client) + + async def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = self.simple_client(self.CONNECTION_STRING) + await client.aconnect() + with self.assertRaises(AssertionError): + await self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_ssl.py b/test/asynchronous/test_ssl.py new file mode 100644 index 0000000000..0ce3e8bbac --- /dev/null +++ b/test/asynchronous/test_ssl.py @@ -0,0 +1,691 @@ +# Copyright 2011-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for SSL support.""" +from __future__ import annotations + +import os +import pathlib +import socket +import sys + +sys.path[0:0] = [""] + +from test.asynchronous import ( + HAVE_IPADDRESS, + AsyncIntegrationTest, + AsyncPyMongoTestCase, + SkipTest, + async_client_context, + connected, + remove_all_users, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + cat_files, + ignore_deprecations, +) +from urllib.parse import quote_plus + +from pymongo import AsyncMongoClient, ssl_support +from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context +from pymongo.write_concern import WriteConcern + +_HAVE_PYOPENSSL = False +try: + # All of these must be available to use PyOpenSSL + import OpenSSL + import requests + import service_identity + + # Ensure service_identity>=18.1 is installed + from service_identity.pyopenssl import verify_ip_address + + from pymongo.ocsp_support import _load_trusted_ca_certs + + _HAVE_PYOPENSSL = True +except ImportError: + _load_trusted_ca_certs = None # type: ignore + + +if HAVE_SSL: + import ssl + +_IS_SYNC = False + +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + +CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") +CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") +CA_PEM = os.path.join(CERT_PATH, "ca.pem") +CA_BUNDLE_PEM = os.path.join(CERT_PATH, "trusted-ca.pem") +CRL_PEM = os.path.join(CERT_PATH, "crl.pem") +MONGODB_X509_USERNAME = "C=US,ST=New York,L=New York City,O=MDB,OU=Drivers,CN=client" + +# To fully test this start a mongod instance (built with SSL support) like so: +# mongod --dbpath /path/to/data/directory --sslOnNormalPorts \ +# --sslPEMKeyFile /path/to/pymongo/test/certificates/server.pem \ +# --sslCAFile /path/to/pymongo/test/certificates/ca.pem \ +# --sslWeakCertificateValidation +# Also, make sure you have 'server' as an alias for localhost in /etc/hosts +# +# Note: For all replica set tests to pass, the replica set configuration must +# use 'localhost' for the hostname of all hosts. + + +class TestClientSSL(AsyncPyMongoTestCase): + @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") + def test_no_ssl_module(self): + # Explicit + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) + + # Implied + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + @ignore_deprecations + def test_config_ssl(self): + # Tests various ssl configurations + self.assertRaises(ValueError, self.simple_client, ssl="foo") + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) + + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) + + # Test invalid combinations + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False + ) + self.assertRaises( + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False + ) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + def test_use_pyopenssl_when_available(self): + self.assertTrue(HAVE_PYSSL) + + @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") + def test_load_trusted_ca_certs(self): + trusted_ca_certs = _load_trusted_ca_certs(CA_BUNDLE_PEM) + self.assertEqual(2, len(trusted_ca_certs)) + + +class TestSSL(AsyncIntegrationTest): + saved_port: int + + async def assertClientWorks(self, client): + coll = client.pymongo_test.ssl_test.with_options( + write_concern=WriteConcern(w=async_client_context.w) + ) + await coll.drop() + await coll.insert_one({"ssl": True}) + self.assertTrue((await coll.find_one())["ssl"]) + await coll.drop() + + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def asyncSetUp(self): + await super().asyncSetUp() + # MongoClient should connect to the primary by default. + self.saved_port = AsyncMongoClient.PORT + AsyncMongoClient.PORT = await async_client_context.port + + async def asyncTearDown(self): + AsyncMongoClient.PORT = self.saved_port + + @async_client_context.require_tls + async def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") + # Expects the server to be running with ssl and with + # no --sslPEMKeyFile or with --sslWeakCertificateValidation + await self.assertClientWorks(self.client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_tlsCertificateKeyFilePassword(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, + tlsCertificateKeyFilePassword="qwerty", + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=5000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = ( + "mongodb://localhost/?ssl=true" + "&tlsCertificateKeyFile=%s&tlsCertificateKeyFilePassword=qwerty" + "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" + ) + await connected( + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_implicitly_set(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + + # test that setting tlsCertificateKeyFile causes ssl to be set to True + client = self.simple_client( + await async_client_context.host, + await async_client_context.port, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + client = self.simple_client( + await async_client_context.pair, + replicaSet=response["setName"], + w=len(response["hosts"]), + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_validation(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + client = self.simple_client( + "localhost", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + response = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" in response: + if response["primary"].split(":")[0] != "localhost": + raise SkipTest( + "No hosts in the replicaset for 'localhost'. " + "Cannot validate hostname in the certificate" + ) + + client = self.simple_client( + "localhost", + replicaSet=response["setName"], + w=len(response["hosts"]), + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + + await self.assertClientWorks(client) + + if HAVE_IPADDRESS: + client = self.simple_client( + "127.0.0.1", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + ) + await self.assertClientWorks(client) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_auth + @ignore_deprecations + async def test_cert_ssl_uri_support(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" + "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" + ) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + await self.assertClientWorks(client) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_cert_ssl_validation_hostname_matching(self): + # Expects the server to be running with server.pem and ca.pem + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) + self.assertFalse(ctx.check_hostname) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + self.assertTrue(ctx.check_hostname) + + response = await self.client.admin.command(HelloCompat.LEGACY_CMD) + + with self.assertRaises(ConnectionFailure) as cm: + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) + + await connected( + self.simple_client( + "server", + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + if "setName" in response: + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + await connected( + self.simple_client( + "server", + replicaSet=response["setName"], + ssl=True, + tlsCertificateKeyFile=CLIENT_PEM, + tlsAllowInvalidCertificates=False, + tlsCAFile=CA_PEM, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=500, + **self.credentials, # type: ignore[arg-type] + ) + ) + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_sync + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_tlsCRLFile_support(self): + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: + self.assertRaises( + ConfigurationError, + self.simple_client, + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + ) + else: + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client( + "localhost", + ssl=True, + tlsCAFile=CA_PEM, + tlsCRLFile=CRL_PEM, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + await connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + + uri_fmt = ( + "mongodb://localhost/?ssl=true&tlsCRLFile=%s" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ConnectionFailure): + await connected( + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + ) + + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_server_resolvable + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_validation_with_system_ca_certs(self): + # Expects the server to be running with server.pem and ca.pem. + # + # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem + # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem + # --sslWeakCertificateValidation + # + self.patch_system_certs(CA_PEM) + with self.assertRaises(ConnectionFailure): + # Server cert is verified but hostname matching fails + await connected( + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert is verified. Disable hostname matching. + await connected( + self.simple_client( + "server", + ssl=True, + tlsAllowInvalidHostnames=True, + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] + ) + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] + ) + + # Server cert and hostname are verified. + await connected( + self.simple_client( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] + ) + ) + + def test_system_certs_config_error(self): + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) + if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( + ctx, "load_default_certs" + ): + raise SkipTest("Can't test when system CA certificates are loadable.") + + have_certifi = ssl_support.HAVE_CERTIFI + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test regardless of environment. + ssl_support.HAVE_CERTIFI = False + ssl_support.HAVE_WINCERTSTORE = False + try: + with self.assertRaises(ConfigurationError): + self.simple_client("mongodb://localhost/?ssl=true") + finally: + ssl_support.HAVE_CERTIFI = have_certifi + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_certifi_support(self): + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_CERTIFI: + raise SkipTest("Need certifi to test certifi support.") + + have_wincertstore = ssl_support.HAVE_WINCERTSTORE + # Force the test on Windows, regardless of environment. + ssl_support.HAVE_WINCERTSTORE = False + try: + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) + finally: + ssl_support.HAVE_WINCERTSTORE = have_wincertstore + + def test_wincertstore(self): + if sys.platform != "win32": + raise SkipTest("Only valid on Windows.") + if hasattr(ssl, "SSLContext"): + # SSLSocket doesn't provide ca_certs attribute on pythons + # with SSLContext and SSLContext provides no information + # about ca_certs. + raise SkipTest("Can't test when SSLContext available.") + if not ssl_support.HAVE_WINCERTSTORE: + raise SkipTest("Need wincertstore to test wincertstore.") + + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, CA_PEM) + + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) + ssl_sock = ctx.wrap_socket(socket.socket()) + self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) + + @async_client_context.require_auth + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_mongodb_x509_auth(self): + host, port = await async_client_context.host, await async_client_context.port + self.addAsyncCleanup(remove_all_users, async_client_context.client["$external"]) + + # Give x509 user all necessary privileges. + await async_client_context.create_user( + "$external", + MONGODB_X509_USERNAME, + roles=[ + {"role": "readWriteAnyDatabase", "db": "admin"}, + {"role": "userAdminAnyDatabase", "db": "admin"}, + ], + ) + + noauth = self.simple_client( + await async_client_context.pair, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await noauth.pymongo_test.test.find_one() + + listener = EventListener() + auth = self.simple_client( + await async_client_context.pair, + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + event_listeners=[listener], + ) + + # No error + await auth.pymongo_test.test.find_one() + names = listener.started_command_names() + if async_client_context.version.at_least(4, 4, -1): + # Speculative auth skips the authenticate command. + self.assertEqual(names, ["find"]) + else: + self.assertEqual(names, ["authenticate", "find"]) + + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + + uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) + client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + # No error + await client.pymongo_test.test.find_one() + # Auth should fail if username and certificate do not match + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus("not the username"), + host, + port, + ) + + bad_client = self.simple_client( + uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + bad_client = self.simple_client( + await async_client_context.pair, + username="not the username", + authMechanism="MONGODB-X509", + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CLIENT_PEM, + ) + + with self.assertRaises(OperationFailure): + await bad_client.pymongo_test.test.find_one() + + # Invalid certificate (using CA certificate as client certificate) + uri = "mongodb://%s@%s:%d/?authMechanism=MONGODB-X509" % ( + quote_plus(MONGODB_X509_USERNAME), + host, + port, + ) + try: + await connected( + self.simple_client( + uri, + ssl=True, + tlsAllowInvalidCertificates=True, + tlsCertificateKeyFile=CA_PEM, + serverSelectionTimeoutMS=1000, + ) + ) + except (ConnectionFailure, ConfigurationError): + pass + else: + self.fail("Invalid certificate accepted.") + + @async_client_context.require_tlsCertificateKeyFile + @async_client_context.require_no_api_version + @ignore_deprecations + async def test_connect_with_ca_bundle(self): + def remove(path): + try: + os.remove(path) + except OSError: + pass + + temp_ca_bundle = os.path.join(CERT_PATH, "trusted-ca-bundle.pem") + self.addCleanup(remove, temp_ca_bundle) + # Add the CA cert file to the bundle. + cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) + async with self.simple_client( + "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle + ) as client: + self.assertTrue(await client.admin.command("ping")) + + @async_client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + async def test_pyopenssl_ignored_in_async(self): + client = AsyncMongoClient( + "mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true" + ) + await client.admin.command("ping") # command doesn't matter, just needs it to connect + await client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_streaming_protocol.py b/test/asynchronous/test_streaming_protocol.py new file mode 100644 index 0000000000..70ec49de80 --- /dev/null +++ b/test/asynchronous/test_streaming_protocol.py @@ -0,0 +1,228 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the database module.""" +from __future__ import annotations + +import sys +import time + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import ( + HeartbeatEventListener, + ServerEventListener, + async_wait_until, +) + +from pymongo import monitoring +from pymongo.hello import HelloCompat + +_IS_SYNC = False + + +class TestStreamingProtocol(AsyncIntegrationTest): + @async_client_context.require_failCommand_appName + async def test_failCommand_streaming(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], + heartbeatFrequencyMS=500, + appName="failingHeartbeatTest", + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + listener.reset() + + fail_hello = { + "configureFailPoint": "failCommand", + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": False, + "errorCode": 10107, + "appName": "failingHeartbeatTest", + }, + } + async with self.fail_point(fail_hello): + + def _marked_unknown(event): + return ( + event.server_address == address + and not event.new_description.is_server_type_known + ) + + def _discovered_node(event): + return ( + event.server_address == address + and not event.previous_description.is_server_type_known + and event.new_description.is_server_type_known + ) + + def marked_unknown(): + return len(listener.matching(_marked_unknown)) >= 1 + + def rediscovered(): + return len(listener.matching(_discovered_node)) >= 1 + + # Topology events are not published synchronously + await async_wait_until(marked_unknown, "mark node unknown") + await async_wait_until(rediscovered, "rediscover node") + + # Server should be selectable. + await client.admin.command("ping") + + @async_client_context.require_failCommand_appName + async def test_streaming_rtt(self): + listener = ServerEventListener() + hb_listener = HeartbeatEventListener() + # On Windows, RTT can actually be 0.0 because time.time() only has + # 1-15 millisecond resolution. We need to delay the initial hello + # to ensure that RTT is never zero. + name = "streamingRttTest" + delay_hello: dict = { + "configureFailPoint": "failCommand", + "mode": {"times": 1000}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "blockConnection": True, + "blockTimeMS": 20, + # This can be uncommented after SERVER-49220 is fixed. + # 'appName': name, + }, + } + async with self.fail_point(delay_hello): + client = await self.async_rs_or_single_client( + event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name + ) + # Force a connection. + await client.admin.command("ping") + address = await client.address + + delay_hello["data"]["blockTimeMS"] = 500 + delay_hello["data"]["appName"] = name + async with self.fail_point(delay_hello): + + def rtt_exceeds_250_ms(): + # XXX: Add a public TopologyDescription getter to MongoClient? + topology = client._topology + sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None + return sd.round_trip_time > 0.250 + + await async_wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") + + # Server should be selectable. + await client.admin.command("ping") + + def changed_event(event): + return event.server_address == address and isinstance( + event, monitoring.ServerDescriptionChangedEvent + ) + + # There should only be one event published, for the initial discovery. + events = listener.matching(changed_event) + self.assertEqual(1, len(events)) + self.assertGreater(events[0].new_description.round_trip_time, 0) + + @async_client_context.require_failCommand_appName + async def test_monitor_waits_after_server_check_error(self): + # This test implements: + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + fail_hello = { + "mode": {"times": 5}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 1234, + "appName": "SDAMMinHeartbeatFrequencyTest", + }, + } + async with self.fail_point(fail_hello): + start = time.time() + client = await self.async_single_client( + appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 + ) + # Force a connection. + await client.admin.command("ping") + duration = time.time() - start + # Explanation of the expected events: + # 0ms: run configureFailPoint + # 1ms: create MongoClient + # 2ms: failed monitor handshake, 1 + # 502ms: failed monitor handshake, 2 + # 1002ms: failed monitor handshake, 3 + # 1502ms: failed monitor handshake, 4 + # 2002ms: failed monitor handshake, 5 + # 2502ms: monitor handshake succeeds + # 2503ms: run awaitable hello + # 2504ms: application handshake succeeds + # 2505ms: ping command succeeds + self.assertGreaterEqual(duration, 2) + self.assertLessEqual(duration, 4.0) + + @async_client_context.require_failCommand_appName + async def test_heartbeat_awaited_flag(self): + hb_listener = HeartbeatEventListener() + client = await self.async_single_client( + event_listeners=[hb_listener], + heartbeatFrequencyMS=500, + appName="heartbeatEventAwaitedFlag", + ) + # Force a connection. + await client.admin.command("ping") + + def hb_succeeded(event): + return isinstance(event, monitoring.ServerHeartbeatSucceededEvent) + + def hb_failed(event): + return isinstance(event, monitoring.ServerHeartbeatFailedEvent) + + fail_heartbeat = { + "mode": {"times": 2}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "closeConnection": True, + "appName": "heartbeatEventAwaitedFlag", + }, + } + async with self.fail_point(fail_heartbeat): + await async_wait_until( + lambda: hb_listener.matching(hb_failed), "published failed event" + ) + # Reconnect. + await client.admin.command("ping") + + hb_succeeded_events = hb_listener.matching(hb_succeeded) + hb_failed_events = hb_listener.matching(hb_failed) + self.assertFalse(hb_succeeded_events[0].awaited) + self.assertTrue(hb_failed_events[0].awaited) + # Depending on thread scheduling, the failed heartbeat could occur on + # the second or third check. + events = [type(e) for e in hb_listener.events[:4]] + if events == [ + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatSucceededEvent, + monitoring.ServerHeartbeatStartedEvent, + monitoring.ServerHeartbeatFailedEvent, + ]: + self.assertFalse(hb_succeeded_events[1].awaited) + else: + self.assertTrue(hb_succeeded_events[1].awaited) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_transactions.py b/test/asynchronous/test_transactions.py new file mode 100644 index 0000000000..478710362e --- /dev/null +++ b/test/asynchronous/test_transactions.py @@ -0,0 +1,630 @@ +# Copyright 2018-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import sys +from io import BytesIO +from test.asynchronous.utils_spec_runner import AsyncSpecRunner + +from gridfs.asynchronous.grid_file import AsyncGridFS, AsyncGridFSBucket +from pymongo.asynchronous.pool import PoolState +from pymongo.server_selectors import writable_server_selector + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import ( + OvertCommandListener, + async_wait_until, +) +from typing import List + +from bson import encode +from bson.raw_bson import RawBSONDocument +from pymongo import WriteConcern, _csot +from pymongo.asynchronous import client_session +from pymongo.asynchronous.client_session import TransactionOptions +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.errors import ( + AutoReconnect, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + InvalidOperation, + OperationFailure, +) +from pymongo.operations import IndexModel, InsertOne +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = False + +# Max number of operations to perform after a transaction to prove unpinning +# occurs. Chosen so that there's a low false positive rate. With 2 mongoses, +# 50 attempts yields a one in a quadrillion chance of a false positive +# (1/(0.5^50)). +UNPIN_TEST_MAX_ATTEMPTS = 50 + + +class AsyncTransactionsBase(AsyncSpecRunner): + def maybe_skip_scenario(self, test): + super().maybe_skip_scenario(test) + if ( + "secondary" in self.id() + and not async_client_context.is_mongos + and not async_client_context.has_secondaries + ): + raise unittest.SkipTest("No secondaries") + + +class TestTransactions(AsyncTransactionsBase): + @async_client_context.require_transactions + def test_transaction_options_validation(self): + default_options = TransactionOptions() + self.assertIsNone(default_options.read_concern) + self.assertIsNone(default_options.write_concern) + self.assertIsNone(default_options.read_preference) + self.assertIsNone(default_options.max_commit_time_ms) + # No error when valid options are provided. + TransactionOptions( + read_concern=ReadConcern(), + write_concern=WriteConcern(), + read_preference=ReadPreference.PRIMARY, + max_commit_time_ms=10000, + ) + with self.assertRaisesRegex(TypeError, "read_concern must be "): + TransactionOptions(read_concern={}) # type: ignore + with self.assertRaisesRegex(TypeError, "write_concern must be "): + TransactionOptions(write_concern={}) # type: ignore + with self.assertRaisesRegex( + ConfigurationError, "transactions do not support unacknowledged write concern" + ): + TransactionOptions(write_concern=WriteConcern(w=0)) + with self.assertRaisesRegex(TypeError, "is not valid for read_preference"): + TransactionOptions(read_preference={}) # type: ignore + with self.assertRaisesRegex(TypeError, "max_commit_time_ms must be an integer or None"): + TransactionOptions(max_commit_time_ms="10000") # type: ignore + + @async_client_context.require_transactions + async def test_transaction_write_concern_override(self): + """Test txn overrides Client/Database/Collection write_concern.""" + client = await self.async_rs_client(w=0) + db = client.test + coll = db.test + await coll.insert_one({}) + async with client.start_session() as s: + async with await s.start_transaction(write_concern=WriteConcern(w=1)): + self.assertTrue((await coll.insert_one({}, session=s)).acknowledged) + self.assertTrue((await coll.insert_many([{}, {}], session=s)).acknowledged) + self.assertTrue((await coll.bulk_write([InsertOne({})], session=s)).acknowledged) + self.assertTrue((await coll.replace_one({}, {}, session=s)).acknowledged) + self.assertTrue( + (await coll.update_one({}, {"$set": {"a": 1}}, session=s)).acknowledged + ) + self.assertTrue( + (await coll.update_many({}, {"$set": {"a": 1}}, session=s)).acknowledged + ) + self.assertTrue((await coll.delete_one({}, session=s)).acknowledged) + self.assertTrue((await coll.delete_many({}, session=s)).acknowledged) + await coll.find_one_and_delete({}, session=s) + await coll.find_one_and_replace({}, {}, session=s) + await coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) + + unsupported_txn_writes: list = [ + (client.drop_database, [db.name], {}), + (db.drop_collection, ["collection"], {}), + (coll.drop, [], {}), + (coll.rename, ["collection2"], {}), + # Drop collection2 between tests of "rename", above. + (coll.database.drop_collection, ["collection2"], {}), + (coll.create_indexes, [[IndexModel("a")]], {}), + (coll.create_index, ["a"], {}), + (coll.drop_index, ["a_1"], {}), + (coll.drop_indexes, [], {}), + (coll.aggregate, [[{"$out": "aggout"}]], {}), + ] + # Creating a collection in a transaction requires MongoDB 4.4+. + if async_client_context.version < (4, 3, 4): + unsupported_txn_writes.extend( + [ + (db.create_collection, ["collection"], {}), + ] + ) + + for op in unsupported_txn_writes: + op, args, kwargs = op + async with client.start_session() as s: + kwargs["session"] = s + await s.start_transaction(write_concern=WriteConcern(w=1)) + with self.assertRaises(OperationFailure): + await op(*args, **kwargs) + await s.abort_transaction() + + @async_client_context.require_transactions + @async_client_context.require_multiple_mongoses + async def test_unpin_for_next_transaction(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + await coll.insert_one({}) + async with client.start_session() as s: + # Session is pinned to Mongos. + async with await s.start_transaction(): + await coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + async with await s.start_transaction(): + cursor = coll.find({}, session=s) + self.assertTrue(await anext(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @async_client_context.require_transactions + @async_client_context.require_multiple_mongoses + async def test_unpin_for_non_transaction_operation(self): + # Increase localThresholdMS and wait until both nodes are discovered + # to avoid false positives. + client = await self.async_rs_client( + async_client_context.mongos_seeds(), localThresholdMS=1000 + ) + await async_wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") + coll = client.test.test + # Create the collection. + await coll.insert_one({}) + async with client.start_session() as s: + # Session is pinned to Mongos. + async with await s.start_transaction(): + await coll.insert_one({}, session=s) + + addresses = set() + for _ in range(UNPIN_TEST_MAX_ATTEMPTS): + cursor = coll.find({}, session=s) + self.assertTrue(await anext(cursor)) + addresses.add(cursor.address) + # Break early if we can. + if len(addresses) > 1: + break + + self.assertGreater(len(addresses), 1) + + @async_client_context.require_transactions + @async_client_context.require_version_min(4, 3, 4) + async def test_create_collection(self): + client = async_client_context.client + db = client.pymongo_test + coll = db.test_create_collection + self.addAsyncCleanup(coll.drop) + + # Use with_transaction to avoid StaleConfig errors on sharded clusters. + async def create_and_insert(session): + coll2 = await db.create_collection(coll.name, session=session) + self.assertEqual(coll, coll2) + await coll.insert_one({}, session=session) + + async with client.start_session() as s: + await s.with_transaction(create_and_insert) + + # Outside a transaction we raise CollectionInvalid on existing colls. + with self.assertRaises(CollectionInvalid): + await db.create_collection(coll.name) + + # Inside a transaction we raise the OperationFailure from create. + async with client.start_session() as s: + await s.start_transaction() + with self.assertRaises(OperationFailure) as ctx: + await db.create_collection(coll.name, session=s) + self.assertEqual(ctx.exception.code, 48) # NamespaceExists + + @async_client_context.require_transactions + async def test_gridfs_does_not_support_transactions(self): + client = async_client_context.client + db = client.pymongo_test + gfs = AsyncGridFS(db) + bucket = AsyncGridFSBucket(db) + + async def gridfs_find(*args, **kwargs): + return await gfs.find(*args, **kwargs).next() + + async def gridfs_open_upload_stream(*args, **kwargs): + await (await bucket.open_upload_stream(*args, **kwargs)).write(b"1") + + gridfs_ops = [ + (gfs.put, (b"123",)), + (gfs.get, (1,)), + (gfs.get_version, ("name",)), + (gfs.get_last_version, ("name",)), + (gfs.delete, (1,)), + (gfs.list, ()), + (gfs.find_one, ()), + (gridfs_find, ()), + (gfs.exists, ()), + (gridfs_open_upload_stream, ("name",)), + ( + bucket.upload_from_stream, + ( + "name", + b"data", + ), + ), + ( + bucket.download_to_stream, + ( + 1, + BytesIO(), + ), + ), + ( + bucket.download_to_stream_by_name, + ( + "name", + BytesIO(), + ), + ), + (bucket.delete, (1,)), + (bucket.find, ()), + (bucket.open_download_stream, (1,)), + (bucket.open_download_stream_by_name, ("name",)), + ( + bucket.rename, + ( + 1, + "new-name", + ), + ), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), + ] + + async with client.start_session() as s, await s.start_transaction(): + for op, args in gridfs_ops: + with self.assertRaisesRegex( + InvalidOperation, + "GridFS does not support multi-document transactions", + ): + await op(*args, session=s) # type: ignore + + # Require 4.2+ for large (16MB+) transactions. + @async_client_context.require_version_min(4, 2) + @async_client_context.require_transactions + @unittest.skipIf(sys.platform == "win32", "Our Windows machines are too slow to pass this test") + async def test_transaction_starts_with_batched_write(self): + if "PyPy" in sys.version and async_client_context.tls: + self.skipTest( + "PYTHON-2937 PyPy is so slow sending large " + "messages over TLS that this test fails" + ) + # Start a transaction with a batch of operations that needs to be + # split. + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + await coll.delete_many({}) + listener.reset() + self.addAsyncCleanup(coll.drop) + large_str = "\0" * (1 * 1024 * 1024) + ops: List[InsertOne[RawBSONDocument]] = [ + InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48) + ] + async with client.start_session() as session: + async with await session.start_transaction(): + await coll.bulk_write(ops, session=session) # type: ignore[arg-type] + # Assert commands were constructed properly. + self.assertEqual( + ["insert", "insert", "commitTransaction"], listener.started_command_names() + ) + first_cmd = listener.started_events[0].command + self.assertTrue(first_cmd["startTransaction"]) + lsid = first_cmd["lsid"] + txn_number = first_cmd["txnNumber"] + for event in listener.started_events[1:]: + self.assertNotIn("startTransaction", event.command) + self.assertEqual(lsid, event.command["lsid"]) + self.assertEqual(txn_number, event.command["txnNumber"]) + self.assertEqual(48, await coll.count_documents({})) + + @async_client_context.require_transactions + async def test_transaction_direct_connection(self): + client = await self.async_single_client() + coll = client.pymongo_test.test + + # Make sure the collection exists. + await coll.insert_one({}) + self.assertEqual(client.topology_description.topology_type_name, "Single") + + async def find(*args, **kwargs): + return coll.find(*args, **kwargs) + + async def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + + ops = [ + (coll.bulk_write, [[InsertOne[dict]({})]]), + (coll.insert_one, [{}]), + (coll.insert_many, [[{}, {}]]), + (coll.replace_one, [{}, {}]), + (coll.update_one, [{}, {"$set": {"a": 1}}]), + (coll.update_many, [{}, {"$set": {"a": 1}}]), + (coll.delete_one, [{}]), + (coll.delete_many, [{}]), + (coll.find_one_and_replace, [{}, {}]), + (coll.find_one_and_update, [{}, {"$set": {"a": 1}}]), + (coll.find_one_and_delete, [{}, {}]), + (coll.find_one, [{}]), + (coll.count_documents, [{}]), + (coll.distinct, ["foo"]), + (coll.aggregate, [[]]), + (find, [{}]), + (coll.aggregate_raw_batches, [[]]), + (find_raw_batches, [{}]), + (coll.database.command, ["find", coll.name]), + ] + for f, args in ops: + async with client.start_session() as s, await s.start_transaction(): + res = await f(*args, session=s) # type:ignore[operator] + if isinstance(res, (AsyncCommandCursor, AsyncCursor)): + await res.to_list() + + @async_client_context.require_transactions + async def test_transaction_pool_cleared_error_labelled_transient(self): + c = await self.async_single_client() + + with self.assertRaises(AutoReconnect) as context: + async with c.start_session() as session: + async with await session.start_transaction(): + server = await c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + async with c._checkout(server, session): + pass + + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + + +class PatchSessionTimeout: + """Patches the client_session's with_transaction timeout for testing.""" + + def __init__(self, mock_timeout): + self.real_timeout = client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT + self.mock_timeout = mock_timeout + + def __enter__(self): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.mock_timeout + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + client_session._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout + + +class TestTransactionsConvenientAPI(AsyncTransactionsBase): + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] + if async_client_context.supports_transactions(): + for address in async_client_context.mongoses: + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) + + async def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + await self.configure_fail_point(client, command_args) + + @async_client_context.require_transactions + async def test_callback_raises_custom_error(self): + class _MyException(Exception): + pass + + async def raise_error(_): + raise _MyException + + async with self.client.start_session() as s: + with self.assertRaises(_MyException): + await s.with_transaction(raise_error) + + @async_client_context.require_transactions + async def test_callback_returns_value(self): + async def callback(_): + return "Foo" + + async with self.client.start_session() as s: + self.assertEqual(await s.with_transaction(callback), "Foo") + + await self.db.test.insert_one({}) + + async def callback2(session): + await self.db.test.insert_one({}, session=session) + return "Foo" + + async with self.client.start_session() as s: + self.assertEqual(await s.with_transaction(callback2), "Foo") + + @async_client_context.require_transactions + async def test_callback_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + err: dict = { + "ok": 0, + "errmsg": "Transaction 7819 has been aborted.", + "code": 251, + "codeName": "NoSuchTransaction", + "errorLabels": ["TransientTransactionError"], + } + raise OperationFailure(err["errmsg"], err["code"], err) + + # Create the collection. + await coll.insert_one({}) + listener.reset() + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + await s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "abortTransaction"]) + + @async_client_context.require_test_commands + @async_client_context.require_transactions + async def test_callback_not_retried_after_commit_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + + # Create the collection. + await coll.insert_one({}) + await self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["commitTransaction"], + "errorCode": 251, # NoSuchTransaction + }, + } + ) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"} + ) + listener.reset() + + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(OperationFailure): + await s.with_transaction(callback) + + self.assertEqual(listener.started_command_names(), ["insert", "commitTransaction"]) + + @async_client_context.require_test_commands + @async_client_context.require_transactions + async def test_commit_not_retried_after_timeout(self): + listener = OvertCommandListener() + client = await self.async_rs_client(event_listeners=[listener]) + coll = client[self.db.name].test + + async def callback(session): + await coll.insert_one({}, session=session) + + # Create the collection. + await coll.insert_one({}) + await self.set_fail_point( + { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["commitTransaction"], "closeConnection": True}, + } + ) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"} + ) + listener.reset() + + async with client.start_session() as s: + with PatchSessionTimeout(0): + with self.assertRaises(ConnectionFailure): + await s.with_transaction(callback) + + # One insert for the callback and two commits (includes the automatic + # retry). + self.assertEqual( + listener.started_command_names(), ["insert", "commitTransaction", "commitTransaction"] + ) + + # Tested here because this supports Motor's convenient transactions API. + @async_client_context.require_transactions + async def test_in_transaction_property(self): + client = async_client_context.client + coll = client.test.testcollection + await coll.insert_one({}) + self.addAsyncCleanup(coll.drop) + + async with client.start_session() as s: + self.assertFalse(s.in_transaction) + await s.start_transaction() + self.assertTrue(s.in_transaction) + await coll.insert_one({}, session=s) + self.assertTrue(s.in_transaction) + await s.commit_transaction() + self.assertFalse(s.in_transaction) + + async with client.start_session() as s: + await s.start_transaction() + # commit empty transaction + await s.commit_transaction() + self.assertFalse(s.in_transaction) + + async with client.start_session() as s: + await s.start_transaction() + await s.abort_transaction() + self.assertFalse(s.in_transaction) + + # Using a callback + async def callback(session): + self.assertTrue(session.in_transaction) + + async with client.start_session() as s: + self.assertFalse(s.in_transaction) + await s.with_transaction(callback) + self.assertFalse(s.in_transaction) + + +class TestOptionsInsideTransactionProse(AsyncTransactionsBase): + @async_client_context.require_transactions + @async_client_context.require_no_standalone + async def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = async_client_context.client + coll = client[self.db.name].test + await coll.delete_many({}) + # Start a new session on the client. + async with client.start_session() as s: + # Start a transaction on the session. + await s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = await inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + await s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_transactions_unified.py b/test/asynchronous/test_transactions_unified.py new file mode 100644 index 0000000000..8e5b1ae181 --- /dev/null +++ b/test/asynchronous/test_transactions_unified.py @@ -0,0 +1,55 @@ +# Copyright 2021-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the Transactions unified spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import client_context, unittest +from test.asynchronous.unified_format import generate_test_classes + +_IS_SYNC = False + + +def setUpModule(): + pass + + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +# Location of JSON test specifications for transactions-convenient-api. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_unified_format.py b/test/asynchronous/test_unified_format.py new file mode 100644 index 0000000000..58a1ea3326 --- /dev/null +++ b/test/asynchronous/test_unified_format.py @@ -0,0 +1,97 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from typing import Any + +sys.path[0:0] = [""] + +from test import UnitTest, unittest +from test.asynchronous.unified_format import MatchEvaluatorUtil, generate_test_classes + +from bson import ObjectId + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-pass"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + expected_failures=[ + "Client side error in command starting transaction", # PYTHON-1894 + ], + ) +) + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "valid-fail"), + module=__name__, + class_name_prefix="UnifiedTestFormat", + bypass_test_generation_errors=True, + expected_failures=[ + ".*", # All tests expected to fail + ], + ) +) + + +class TestMatchEvaluatorUtil(UnitTest): + def setUp(self): + self.match_evaluator = MatchEvaluatorUtil(self) + + def test_unsetOrMatches(self): + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + for actual in [{}, {"y": 2}, None]: + self.match_evaluator.match_result(spec, actual) + + spec = {"x": {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}}} + for actual in [{}, {"x": {}}, {"x": {"y": 2}}]: + self.match_evaluator.match_result(spec, actual) + + spec = {"y": {"$$unsetOrMatches": {"$$exists": True}}} + self.match_evaluator.match_result(spec, {}) + self.match_evaluator.match_result(spec, {"y": 2}) + self.match_evaluator.match_result(spec, {"x": 1}) + self.match_evaluator.match_result(spec, {"y": {}}) + + def test_type(self): + self.match_evaluator.match_result( + { + "operationType": "insert", + "ns": {"db": "change-stream-tests", "coll": "test"}, + "fullDocument": {"_id": {"$$type": "objectId"}, "x": 1}, + }, + { + "operationType": "insert", + "fullDocument": {"_id": ObjectId("5fc93511ac93941052098f0c"), "x": 1}, + "ns": {"db": "change-stream-tests", "coll": "test"}, + }, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/test_versioned_api_integration.py b/test/asynchronous/test_versioned_api_integration.py new file mode 100644 index 0000000000..0f6b544465 --- /dev/null +++ b/test/asynchronous/test_versioned_api_integration.py @@ -0,0 +1,85 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.asynchronous.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncIntegrationTest, async_client_context, unittest +from test.utils_shared import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = False + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(AsyncIntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @async_client_context.require_version_min(4, 7) + async def test_command_options(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + await coll.find(batch_size=25).to_list() + await client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @async_client_context.require_version_min(4, 7) + @async_client_context.require_transactions + async def test_command_options_txn(self): + listener = OvertCommandListener() + client = await self.async_rs_or_single_client( + server_api=ServerApi("1"), event_listeners=[listener] + ) + coll = client.test.test + await coll.insert_many([{} for _ in range(100)]) + self.addAsyncCleanup(coll.delete_many, {}) + + listener.reset() + async with client.start_session() as s, await s.start_transaction(): + await coll.insert_many([{} for _ in range(100)], session=s) + await coll.find(batch_size=25, session=s).to_list() + await client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/asynchronous/unified_format.py b/test/asynchronous/unified_format.py new file mode 100644 index 0000000000..0c9e8c10c8 --- /dev/null +++ b/test/asynchronous/unified_format.py @@ -0,0 +1,1640 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md +""" +from __future__ import annotations + +import asyncio +import binascii +import copy +import functools +import os +import re +import sys +import time +import traceback +from collections import defaultdict +from inspect import iscoroutinefunction +from test.asynchronous import ( + AsyncIntegrationTest, + async_client_context, + client_knobs, + unittest, +) +from test.asynchronous.utils import async_get_pool, flaky +from test.asynchronous.utils_spec_runner import SpecRunnerTask +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS +from test.unified_format_shared import ( + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, +) +from test.utils_shared import ( + async_wait_until, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, + snake_to_camel, + wait_until, +) +from test.version import Version +from typing import Any, Dict, List, Mapping, Optional + +import pytest + +import pymongo +from bson import SON, json_util +from bson.codec_options import DEFAULT_CODEC_OPTIONS +from bson.objectid import ObjectId +from gridfs import AsyncGridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile +from pymongo import ASCENDING, AsyncMongoClient, CursorType, _csot +from pymongo.asynchronous.change_stream import AsyncChangeStream +from pymongo.asynchronous.client_session import AsyncClientSession, TransactionOptions, _TxnState +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.database import AsyncDatabase +from pymongo.asynchronous.encryption import AsyncClientEncryption +from pymongo.driver_info import DriverInfo +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + ClientBulkWriteException, + ConfigurationError, + ConnectionFailure, + EncryptionError, + InvalidOperation, + NotPrimaryError, + OperationFailure, + PyMongoError, +) +from pymongo.monitoring import ( + CommandStartedEvent, +) +from pymongo.operations import ( + SearchIndexModel, +) +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.server_api import ServerApi +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + +IS_INTERRUPTED = False + + +def interrupt_loop(): + global IS_INTERRUPTED + IS_INTERRUPTED = True + + +async def is_run_on_requirement_satisfied(requirement): + topology_satisfied = True + req_topologies = requirement.get("topologies") + if req_topologies: + topology_satisfied = await async_client_context.is_topology_type(req_topologies) + + server_version = Version(*async_client_context.version[:3]) + + min_version_satisfied = True + req_min_server_version = requirement.get("minServerVersion") + if req_min_server_version: + min_version_satisfied = Version.from_string(req_min_server_version) <= server_version + + max_version_satisfied = True + req_max_server_version = requirement.get("maxServerVersion") + if req_max_server_version: + max_version_satisfied = Version.from_string(req_max_server_version) >= server_version + + params_satisfied = True + params = requirement.get("serverParameters") + if params: + for param, val in params.items(): + if param not in async_client_context.server_parameters: + params_satisfied = False + elif async_client_context.server_parameters[param] != val: + params_satisfied = False + + auth_satisfied = True + req_auth = requirement.get("auth") + if req_auth is not None: + if req_auth: + auth_satisfied = async_client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = async_client_context.check_auth_type(requirement["authMechanism"]) + else: + auth_satisfied = not async_client_context.auth_enabled + + csfle_satisfied = True + req_csfle = requirement.get("csfle") + if req_csfle is True: + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version + csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True + + return ( + topology_satisfied + and min_version_satisfied + and max_version_satisfied + and params_satisfied + and auth_satisfied + and csfle_satisfied + ) + + +class NonLazyCursor: + """A find cursor proxy that creates the remote cursor when initialized.""" + + def __init__(self, find_cursor, client): + self.client = client + self.find_cursor = find_cursor + # Create the server side cursor. + self.first_result = None + + @classmethod + async def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = await anext(cursor.find_cursor) + except StopAsyncIteration: + cursor.first_result = None + return cursor + + @property + def alive(self): + return self.first_result is not None or self.find_cursor.alive + + async def __anext__(self): + if self.first_result is not None: + first = self.first_result + self.first_result = None + return first + return await anext(self.find_cursor) + + # Added to support the iterateOnce operation. + try_next = __anext__ + + async def close(self): + await self.find_cursor.close() + self.client = None + + +class EntityMapUtil: + """Utility class that implements an entity map as per the unified + test format specification. + """ + + def __init__(self, test_class): + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class + + def __contains__(self, item): + return item in self._entities + + def __len__(self): + return len(self._entities) + + def __getitem__(self, item): + try: + return self._entities[item] + except KeyError: + self.test.fail(f"Could not find entity named {item} in map") + + def __setitem__(self, key, value): + if not isinstance(key, str): + self.test.fail("Expected entity name of type str, got %s" % (type(key))) + + if key in self._entities: + self.test.fail(f"Entity named {key} already in map") + + self._entities[key] = value + + def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: + if "$$placeholder" in current: + if path not in PLACEHOLDER_MAP: + raise ValueError(f"Could not find a placeholder value for {path}") + return PLACEHOLDER_MAP[path] + + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + + for key in list(current): + value = current[key] + if isinstance(value, dict): + subpath = f"{path}/{key}" + current[key] = self._handle_placeholders(spec, value, subpath) + return current + + async def _create_entity(self, entity_spec, uri=None): + if len(entity_spec) != 1: + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") + + entity_type, spec = next(iter(entity_spec.items())) + spec = self._handle_placeholders(spec, spec, "") + if entity_type == "client": + kwargs: dict = {} + observe_events = spec.get("observeEvents", []) + + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent + for i in range(len(observe_events)): + if "topologyOpeningEvent" == observe_events[i]: + observe_events[i] = "topologyOpenedEvent" + ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) + observe_sensitive_commands = spec.get("observeSensitiveCommands", False) + ignore_commands = [cmd.lower() for cmd in ignore_commands] + listener = EventListenerUtil( + observe_events, + ignore_commands, + observe_sensitive_commands, + spec.get("storeEventsAsEntities"), + self, + ) + self._listeners[spec["id"]] = listener + kwargs["event_listeners"] = [listener] + if spec.get("useMultipleMongoses"): + if async_client_context.load_balancer: + kwargs["h"] = async_client_context.MULTI_MONGOS_LB_URI + elif async_client_context.is_mongos: + kwargs["h"] = async_client_context.mongos_seeds() + kwargs.update(spec.get("uriOptions", {})) + server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") + if server_api: + kwargs["server_api"] = ServerApi( + server_api["version"], + strict=server_api.get("strict"), + deprecation_errors=server_api.get("deprecationErrors"), + ) + if uri: + kwargs["h"] = uri + client = await self.test.async_rs_or_single_client(**kwargs) + await client.aconnect() + self[spec["id"]] = client + return + elif entity_type == "database": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + options = parse_collection_or_database_options(spec.get("databaseOptions", {})) + self[spec["id"]] = client.get_database(spec["databaseName"], **options) + return + elif entity_type == "collection": + database = self[spec["database"]] + if not isinstance(database, AsyncDatabase): + self.test.fail( + "Expected entity {} to be of type AsyncDatabase, got {}".format( + spec["database"], type(database) + ) + ) + options = parse_collection_or_database_options(spec.get("collectionOptions", {})) + self[spec["id"]] = database.get_collection(spec["collectionName"], **options) + return + elif entity_type == "session": + client = self[spec["client"]] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + "Expected entity {} to be of type AsyncMongoClient, got {}".format( + spec["client"], type(client) + ) + ) + opts = camel_to_snake_args(spec.get("sessionOptions", {})) + if "default_transaction_options" in opts: + txn_opts = parse_spec_options(opts["default_transaction_options"]) + txn_opts = TransactionOptions(**txn_opts) + opts = copy.deepcopy(opts) + opts["default_transaction_options"] = txn_opts + session = client.start_session(**dict(opts)) + self[spec["id"]] = session + self._session_lsids[spec["id"]] = copy.deepcopy(session.session_id) + self.test.addAsyncCleanup(session.end_session) + return + elif entity_type == "bucket": + db = self[spec["database"]] + kwargs = parse_spec_options(spec.get("bucketOptions", {}).copy()) + bucket = AsyncGridFSBucket(db, **kwargs) + + # PyMongo does not support AsyncGridFSBucket.drop(), emulate it. + @_csot.apply + async def drop(self: AsyncGridFSBucket, *args: Any, **kwargs: Any) -> None: + await self._files.drop(*args, **kwargs) + await self._chunks.drop(*args, **kwargs) + + if not hasattr(bucket, "drop"): + bucket.drop = drop.__get__(bucket) + self[spec["id"]] = bucket + return + elif entity_type == "clientEncryption": + opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) + if isinstance(opts["key_vault_client"], str): + opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] + self[spec["id"]] = AsyncClientEncryption( + opts["kms_providers"], + opts["key_vault_namespace"], + opts["key_vault_client"], + DEFAULT_CODEC_OPTIONS, + opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), + ) + return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerTask(name) + await thread.start() + self.test.addAsyncCleanup(thread.join, 5) + self[name] = thread + return + + self.test.fail(f"Unable to create entity of unknown type {entity_type}") + + async def create_entities_from_spec(self, entity_spec, uri=None): + for spec in entity_spec: + await self._create_entity(spec, uri=uri) + + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: + client = self[client_name] + if type(client).__name__ != "AsyncMongoClient": + self.test.fail( + f"Expected entity {client_name} to be of type AsyncMongoClient, got {type(client)}" + ) + + listener = self._listeners.get(client_name) + if not listener: + self.test.fail(f"No listeners configured for client {client_name}") + + return listener + + def get_lsid_for_session(self, session_name): + session = self[session_name] + if not isinstance(session, AsyncClientSession): + self.test.fail( + f"Expected entity {session_name} to be of type AsyncClientSession, got {type(session)}" + ) + + try: + return session.session_id + except InvalidOperation: + # session has been closed. + return self._session_lsids[session_name] + + async def advance_cluster_times(self, cluster_time) -> None: + """Manually synchronize entities when desired""" + for entity in self._entities.values(): + if isinstance(entity, AsyncClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) + + +class UnifiedSpecTestMixinV1(AsyncIntegrationTest): + """Mixin class to run test cases from test specification files. + + Assumes that tests conform to the `unified test format + `_. + + Specification of the test suite being currently run is available as + a class attribute ``TEST_SPEC``. + """ + + SCHEMA_VERSION = Version.from_string("1.25") + RUN_ON_LOAD_BALANCER = True + TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes + mongos_clients: list[AsyncMongoClient] = [] + + @staticmethod + async def should_run_on(run_on_spec): + if not run_on_spec: + # Always run these tests. + return True + + for req in run_on_spec: + if await is_run_on_requirement_satisfied(req): + return True + return False + + async def insert_initial_data(self, initial_data): + for i, collection_data in enumerate(initial_data): + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + opts = collection_data.get("createOptions", {}) + documents = collection_data["documents"] + + # Setup the collection with as few majority writes as possible. + db = self.client[db_name] + await db.drop_collection(coll_name) + # Only use majority wc only on the final write. + if i == len(initial_data) - 1: + wc = WriteConcern(w="majority") + else: + wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = await db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + await db.drop_collection(collection) + + if documents: + if opts: + await db.create_collection(coll_name, **opts) + await db.get_collection(coll_name, write_concern=wc).insert_many(documents) + else: + # Ensure collection exists + await db.create_collection(coll_name, write_concern=wc, **opts) + + @classmethod + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + async def asyncSetUp(self): + # super call creates internal client cls.client + await super().asyncSetUp() + # process file-level runOnRequirements + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") + + # add any special-casing for skipping tests here + + # Handle mongos_clients for transactions tests. + self.mongos_clients = [] + if async_client_context.supports_transactions() and not async_client_context.load_balancer: + for address in async_client_context.mongoses: + self.mongos_clients.append(await self.async_single_client("{}:{}".format(*address))) + + # process schemaVersion + # note: we check major schema version during class generation + version = Version.from_string(self.TEST_SPEC["schemaVersion"]) + self.assertLessEqual( + version, + self.SCHEMA_VERSION, + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", + ) + + # initialize internals + self.match_evaluator = MatchEvaluatorUtil(self) + + def maybe_skip_test(self, spec): + # add any special-casing for skipping tests here + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: + self.skipTest("Implement PYTHON-1894") + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: + self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in description + for x in [ + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") + + if "csot" in class_name: + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") + if "change" in description or "change" in class_name: + self.skipTest("CSOT not implemented for watch()") + if "cursors" in class_name: + self.skipTest("CSOT not implemented for cursors") + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): + self.skipTest("CSOT not implemented for tailable cursors") + if "sessions" in class_name: + self.skipTest("CSOT not implemented for sessions") + if "withtransaction" in description: + self.skipTest("CSOT not implemented for with_transaction") + if "transaction" in class_name or "transaction" in description: + self.skipTest("CSOT not implemented for transactions") + + # Some tests need to be skipped based on the operations they try to run. + for op in spec["operations"]: + name = op["name"] + if name == "count": + self.skipTest("PyMongo does not support count()") + if name == "listIndexNames": + self.skipTest("PyMongo does not support list_index_names()") + if not async_client_context.test_commands_enabled: + if name == "failPoint" or name == "targetedFailPoint": + self.skipTest("Test commands must be enabled to use fail points") + if name == "modifyCollection": + self.skipTest("PyMongo does not support modifyCollection") + if "timeoutMode" in op.get("arguments", {}): + self.skipTest("PyMongo does not support timeoutMode") + + def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise + is_error = spec.get("isError") + is_client_error = spec.get("isClientError") + is_timeout_error = spec.get("isTimeoutError") + error_contains = spec.get("errorContains") + error_code = spec.get("errorCode") + error_code_name = spec.get("errorCodeName") + error_labels_contain = spec.get("errorLabelsContain") + error_labels_omit = spec.get("errorLabelsOmit") + expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) + + if is_error: + # already satisfied because exception was raised + pass + + if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + # Connection errors are considered client errors. + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): + pass + else: + self.assertNotIsInstance(error, PyMongoError) + + if is_timeout_error: + self.assertIsInstance(exception, PyMongoError) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception + + if error_contains: + if isinstance(exception, BulkWriteError): + errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() + else: + errmsg = str(exception).lower() + self.assertIn(error_contains.lower(), errmsg) + + if error_code: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) + + if error_code_name: + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) + + if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception + labels = [ + err_label for err_label in error_labels_contain if error.has_error_label(err_label) + ] + self.assertEqual(labels, error_labels_contain) + + if error_labels_omit: + for err_label in error_labels_omit: + if exception.has_error_label(err_label): + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") + + if expect_result: + if isinstance(exception, BulkWriteError): + result = parse_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) + else: + self.fail( + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" + ) + + return exception + + def __raise_if_unsupported(self, opname, target, *target_types): + if not isinstance(target, target_types): + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") + + async def __entityOperation_createChangeStream(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "createChangeStream", target, AsyncMongoClient, AsyncDatabase, AsyncCollection + ) + stream = await target.watch(*args, **kwargs) + self.addAsyncCleanup(stream.close) + return stream + + async def _clientOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _collectionOperation_createChangeStream(self, target, *args, **kwargs): + return await self.__entityOperation_createChangeStream(target, *args, **kwargs) + + async def _databaseOperation_runCommand(self, target, **kwargs): + self.__raise_if_unsupported("runCommand", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + return await target.command(**kwargs) + + async def _databaseOperation_runCursorCommand(self, target, **kwargs): + return await (await self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() + + async def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, AsyncDatabase) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = await target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + async def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + async def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + async def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + async def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = await self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + async def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + await client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + async def _databaseOperation_listCollections(self, target, *args, **kwargs): + if "batch_size" in kwargs: + kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} + cursor = await target.list_collections(*args, **kwargs) + return await cursor.to_list() + + async def _databaseOperation_createCollection(self, target, *args, **kwargs): + # PYTHON-1936 Ignore the listCollections event from create_collection. + kwargs["check_exists"] = False + ret = await target.create_collection(*args, **kwargs) + return ret + + async def __entityOperation_aggregate(self, target, *args, **kwargs): + self.__raise_if_unsupported("aggregate", target, AsyncDatabase, AsyncCollection) + return await (await target.aggregate(*args, **kwargs)).to_list() + + async def _databaseOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_aggregate(self, target, *args, **kwargs): + return await self.__entityOperation_aggregate(target, *args, **kwargs) + + async def _collectionOperation_find(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + find_cursor = target.find(*args, **kwargs) + return await find_cursor.to_list() + + async def _collectionOperation_createFindCursor(self, target, *args, **kwargs): + self.__raise_if_unsupported("find", target, AsyncCollection) + if "filter" not in kwargs: + self.fail('createFindCursor requires a "filter" argument') + cursor = await NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) + self.addAsyncCleanup(cursor.close) + return cursor + + def _collectionOperation_count(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support collection.count()") + + async def _collectionOperation_listIndexes(self, target, *args, **kwargs): + if "batch_size" in kwargs: + self.skipTest("PyMongo does not support batch_size for list_indexes") + return await (await target.list_indexes(*args, **kwargs)).to_list() + + def _collectionOperation_listIndexNames(self, target, *args, **kwargs): + self.skipTest("PyMongo does not support list_index_names") + + async def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return await target.create_search_indexes(models) + + async def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return await (await target.list_search_indexes(name, **agg_kwargs)).to_list() + + async def _sessionOperation_withTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("withTransaction", target, AsyncClientSession) + return await target.with_transaction(*args, **kwargs) + + async def _sessionOperation_startTransaction(self, target, *args, **kwargs): + self.__raise_if_unsupported("startTransaction", target, AsyncClientSession) + return await target.start_transaction(*args, **kwargs) + + async def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported("iterateUntilDocumentOrError", target, AsyncChangeStream) + return await anext(target) + + async def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, AsyncCommandCursor + ) + while target.alive: + try: + return await anext(target) + except StopAsyncIteration: + pass + return None + + async def _cursor_close(self, target, *args, **kwargs): + self.__raise_if_unsupported("close", target, NonLazyCursor, AsyncCommandCursor) + return await target.close() + + async def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + + async def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + + return await target.create_data_key(*args, **kwargs) + + async def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): + return await target.get_keys(*args, **kwargs).to_list() + + async def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): + result = await target.delete_key(*args, **kwargs) + response = result.raw_result + response["deletedCount"] = result.deleted_count + return response + + async def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + data = await target.rewrap_many_data_key(*args, **kwargs) + if data.bulk_write_result: + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + async def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return await target.encrypt(*args, **kwargs) + + async def _bucketOperation_download( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_downloadByName( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> bytes: + async with await target.open_download_stream_by_name(*args, **kwargs) as gout: + return await gout.read() + + async def _bucketOperation_upload( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> ObjectId: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream(*args, **kwargs) + + async def _bucketOperation_uploadWithId( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> Any: + kwargs["source"] = binascii.unhexlify(kwargs.pop("source")["$$hexBytes"]) + if "content_type" in kwargs: + kwargs.setdefault("metadata", {})["contentType"] = kwargs.pop("content_type") + return await target.upload_from_stream_with_id(*args, **kwargs) + + async def _bucketOperation_find( + self, target: AsyncGridFSBucket, *args: Any, **kwargs: Any + ) -> List[GridOut]: + return await target.find(*args, **kwargs).to_list() + + async def run_entity_operation(self, spec): + target = self.entity_map[spec["object"]] + opname = spec["name"] + opargs = spec.get("arguments") + expect_error = spec.get("expectError") + save_as_entity = spec.get("saveResultAsEntity") + expect_result = spec.get("expectResult") + ignore = spec.get("ignoreResultAndError") + if ignore and (expect_error or save_as_entity or expect_result): + raise ValueError( + "ignoreResultAndError is incompatible with saveResultAsEntity" + ", expectError, and expectResult" + ) + if opargs: + arguments = parse_spec_options(copy.deepcopy(opargs)) + prepare_spec_arguments( + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, + ) + else: + arguments = {} + + if isinstance(target, AsyncMongoClient): + method_name = f"_clientOperation_{opname}" + elif isinstance(target, AsyncDatabase): + method_name = f"_databaseOperation_{opname}" + elif isinstance(target, AsyncCollection): + method_name = f"_collectionOperation_{opname}" + # contentType is always stored in metadata in pymongo. + if target.name.endswith(".files") and opname == "find": + for doc in spec.get("expectResult", []): + if "contentType" in doc: + doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") + elif isinstance(target, AsyncChangeStream): + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, AsyncCommandCursor)): + method_name = f"_cursor_{opname}" + elif isinstance(target, AsyncClientSession): + method_name = f"_sessionOperation_{opname}" + elif isinstance(target, AsyncGridFSBucket): + method_name = f"_bucketOperation_{opname}" + if "id" in arguments: + arguments["file_id"] = arguments.pop("id") + # MD5 is always disabled in pymongo. + arguments.pop("disable_md5", None) + elif isinstance(target, AsyncClientEncryption): + method_name = f"_clientEncryptionOperation_{opname}" + else: + method_name = "doesNotExist" + + try: + method = getattr(self, method_name) + except AttributeError: + target_opname = camel_to_snake(opname) + if target_opname == "iterate_once": + target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" + try: + cmd = getattr(target, target_opname) + except AttributeError: + self.fail(f"Unsupported operation {opname} on entity {target}") + else: + cmd = functools.partial(method, target) + + try: + # CSOT: Translate the spec test "timeout" arg into pymongo's context timeout API. + if "timeout" in arguments: + timeout = arguments.pop("timeout") + with pymongo.timeout(timeout): + result = await cmd(**dict(arguments)) + else: + result = await cmd(**dict(arguments)) + except Exception as exc: + # Ignore all operation errors but to avoid masking bugs don't + # ignore things like TypeError and ValueError. + if ignore and isinstance(exc, (PyMongoError,)): + return exc + if expect_error: + return self.process_error(exc, expect_error) + raise + else: + if expect_error: + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') + + if expect_result: + actual = coerce_result(opname, result) + self.match_evaluator.match_result(expect_result, actual) + + if save_as_entity: + self.entity_map[save_as_entity] = result + return None + return None + + async def __set_fail_point(self, client, command_args): + if not async_client_context.test_commands_enabled: + self.skipTest("Test commands must be enabled") + + await self.configure_fail_point(client, command_args) + self.addAsyncCleanup(self.configure_fail_point, client, command_args, off=True) + + async def _testOperation_failPoint(self, spec): + await self.__set_fail_point( + client=self.entity_map[spec["client"]], command_args=spec["failPoint"] + ) + + async def _testOperation_targetedFailPoint(self, spec): + session = self.entity_map[spec["session"]] + if not session._pinned_address: + self.fail( + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) + ) + + client = await self.async_single_client("{}:{}".format(*session._pinned_address)) + await self.__set_fail_point(client=client, command_args=spec["failPoint"]) + + async def _testOperation_createEntities(self, spec): + await self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + await self.entity_map.advance_cluster_times(self._cluster_time) + + def _testOperation_assertSessionTransactionState(self, spec): + session = self.entity_map[spec["session"]] + expected_state = getattr(_TxnState, spec["state"].upper()) + self.assertEqual(expected_state, session._transaction.state) + + def _testOperation_assertSessionPinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNotNone(session._transaction.pinned_address) + + def _testOperation_assertSessionUnpinned(self, spec): + session = self.entity_map[spec["session"]] + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + def __get_last_two_command_lsids(self, listener): + cmd_started_events = [] + for event in reversed(listener.events): + if isinstance(event, CommandStartedEvent): + cmd_started_events.append(event) + if len(cmd_started_events) < 2: + self.fail( + "Needed 2 CommandStartedEvents to compare lsids, " + "got %s" % (len(cmd_started_events)) + ) + return tuple([e.command["lsid"] for e in cmd_started_events][:2]) + + def _testOperation_assertDifferentLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertNotEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSameLsidOnLastTwoCommands(self, spec): + listener = self.entity_map.get_listener_for_client(spec["client"]) + self.assertEqual(*self.__get_last_two_command_lsids(listener)) + + def _testOperation_assertSessionDirty(self, spec): + session = self.entity_map[spec["session"]] + self.assertTrue(session._server_session.dirty) + + def _testOperation_assertSessionNotDirty(self, spec): + session = self.entity_map[spec["session"]] + return self.assertFalse(session._server_session.dirty) + + async def _testOperation_assertCollectionExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertIn(collection_name, collection_name_list) + + async def _testOperation_assertCollectionNotExists(self, spec): + database_name = spec["databaseName"] + collection_name = spec["collectionName"] + collection_name_list = list( + await self.client.get_database(database_name).list_collection_names() + ) + self.assertNotIn(collection_name, collection_name_list) + + async def _testOperation_assertIndexExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + index_names = [idx["name"] async for idx in await collection.list_indexes()] + self.assertIn(spec["indexName"], index_names) + + async def _testOperation_assertIndexNotExists(self, spec): + collection = self.client[spec["databaseName"]][spec["collectionName"]] + async for index in await collection.list_indexes(): + self.assertNotEqual(spec["indexName"], index["name"]) + + async def _testOperation_assertNumberConnectionsCheckedOut(self, spec): + client = self.entity_map[spec["client"]] + pool = await async_get_pool(client) + self.assertEqual(spec["connections"], pool.active_sockets) + + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event(event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + async def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + await async_wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + async def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + await asyncio.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + async def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + async def primary_changed() -> bool: + primary = await client.primary + if primary is None: + return False + return primary != old_primary + + await async_wait_until(primary_changed, "change primary", timeout=timeout) + + async def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + await thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) + + async def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + await thread.stop() + await thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + + async def _testOperation_loop(self, spec): + failure_key = spec.get("storeFailuresAsEntity") + error_key = spec.get("storeErrorsAsEntity") + successes_key = spec.get("storeSuccessesAsEntity") + iteration_key = spec.get("storeIterationsAsEntity") + iteration_limiter_key = spec.get("numIterations") + for i in [failure_key, error_key]: + if i: + self.entity_map[i] = [] + for i in [successes_key, iteration_key]: + if i: + self.entity_map[i] = 0 + i = 0 + global IS_INTERRUPTED + while True: + if iteration_limiter_key and i >= iteration_limiter_key: + break + i += 1 + if IS_INTERRUPTED: + break + try: + if iteration_key: + self.entity_map._entities[iteration_key] += 1 + for op in spec["operations"]: + await self.run_entity_operation(op) + if successes_key: + self.entity_map._entities[successes_key] += 1 + except Exception as exc: + if isinstance(exc, AssertionError): + key = failure_key or error_key + else: + key = error_key or failure_key + if not key: + raise + self.entity_map[key].append( + {"error": str(exc), "time": time.time(), "type": type(exc).__name__} + ) + + async def run_special_operation(self, spec): + opname = spec["name"] + method_name = f"_testOperation_{opname}" + try: + method = getattr(self, method_name) + except AttributeError: + self.fail(f"Unsupported special test operation {opname}") + else: + if iscoroutinefunction(method): + await method(spec["arguments"]) + else: + method(spec["arguments"]) + + async def run_operations(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + await self.run_entity_operation(op) + + async def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + await self.run_special_operation(op) + else: + result = await self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + + def check_events(self, spec): + for event_spec in spec: + client_name = event_spec["client"] + events = event_spec["events"] + event_type = event_spec.get("eventType", "command") + ignore_extra_events = event_spec.get("ignoreExtraEvents", False) + server_connection_id = event_spec.get("serverConnectionId") + has_server_connection_id = event_spec.get("hasServerConnectionId", False) + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events(event_type) + if ignore_extra_events: + actual_events = actual_events[: len(events)] + + if len(events) == 0: + self.assertEqual(actual_events, []) + continue + + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) + + for idx, expected_event in enumerate(events): + self.match_evaluator.match_event(expected_event, actual_events[idx]) + + if has_server_connection_id: + assert server_connection_id is not None + assert server_connection_id >= 0 + else: + assert server_connection_id is None + + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + + async def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.getMessage()) + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + await self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + + async def verify_outcome(self, spec): + for collection_data in spec: + coll_name = collection_data["collectionName"] + db_name = collection_data["databaseName"] + expected_documents = collection_data["documents"] + + coll = self.client.get_database(db_name).get_collection( + coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern(level="local"), + ) + + if expected_documents: + sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) + actual_documents = await coll.find({}, sort=[("_id", ASCENDING)]).to_list() + self.assertListEqual(sorted_expected_documents, actual_documents) + + async def run_scenario(self, spec, uri=None): + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + await self.kill_all_sessions() + + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.asyncSetUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + await decorator(self._run_scenario)(spec, uri) + return + await self._run_scenario(spec, uri) + + async def _run_scenario(self, spec, uri=None): + # maybe skip test manually + self.maybe_skip_test(spec) + + # process test-level runOnRequirements + run_on_spec = spec.get("runOnRequirements", []) + if not await self.should_run_on(run_on_spec): + raise unittest.SkipTest("runOnRequirements not satisfied") + + # process skipReason + skip_reason = spec.get("skipReason", None) + if skip_reason is not None: + raise unittest.SkipTest(f"{skip_reason}") + + # process createEntities + self._uri = uri + self.entity_map = EntityMapUtil(self) + await self.entity_map.create_entities_from_spec( + self.TEST_SPEC.get("createEntities", []), uri=uri + ) + self._cluster_time = None + # process initialData + if "initialData" in self.TEST_SPEC: + await self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = self.client._topology.max_cluster_time() + await self.entity_map.advance_cluster_times(self._cluster_time) + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + await self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + await self.run_operations(spec["operations"]) + + # process expectEvents + if "expectEvents" in spec: + expect_events = spec["expectEvents"] + self.assertTrue(expect_events, "expectEvents must be non-empty") + self.check_events(expect_events) + + # process outcome + await self.verify_outcome(spec.get("outcome", [])) + + +class UnifiedSpecTestMeta(type): + """Metaclass for generating test classes.""" + + TEST_SPEC: Any + EXPECTED_FAILURES: Any + + def __init__(cls, *args, **kwargs): + super().__init__(*args, **kwargs) + + def create_test(spec): + async def test_case(self): + await self.run_scenario(spec) + + return test_case + + for test_spec in cls.TEST_SPEC["tests"]: + description = test_spec["description"] + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) + test_method = create_test(copy.deepcopy(test_spec)) + test_method.__name__ = str(test_name) + + for fail_pattern in cls.EXPECTED_FAILURES: + if re.search(fail_pattern, description): + test_method = unittest.expectedFailure(test_method) + break + + setattr(cls, test_name, test_method) + + +_ALL_MIXIN_CLASSES = [ + UnifiedSpecTestMixinV1, + # add mixin classes for new schema major versions here +] + + +_SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS = { + KLASS.SCHEMA_VERSION[0]: KLASS for KLASS in _ALL_MIXIN_CLASSES +} + + +def generate_test_classes( + test_path, + module=__name__, + class_name_prefix="", + expected_failures=[], # noqa: B006 + bypass_test_generation_errors=False, + **kwargs, +): + """Method for generating test classes. Returns a dictionary where keys are + the names of test classes and values are the test class objects. + """ + test_klasses = {} + + def test_base_class_factory(test_spec): + """Utility that creates the base class to use for test generation. + This is needed to ensure that cls.TEST_SPEC is appropriately set when + the metaclass __init__ is invoked. + """ + + class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore + TEST_SPEC = test_spec + EXPECTED_FAILURES = expected_failures + + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if "csfle" in req: + base = pytest.mark.encryption(base) + + return base + + for dirpath, _, filenames in os.walk(test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + fpath = os.path.join(dirpath, filename) + with open(fpath) as scenario_stream: + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) + + test_type = os.path.splitext(filename)[0] + snake_class_name = "Test{}_{}_{}".format( + class_name_prefix, + dirname.replace("-", "_"), + test_type.replace("-", "_").replace(".", "_"), + ) + class_name = snake_to_camel(snake_class_name) + + try: + schema_version = Version.from_string(scenario_def["schemaVersion"]) + mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) + if mixin_class is None: + raise ValueError( + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" + ) + module_dict = {"__module__": module, "TEST_PATH": test_path} + module_dict.update(kwargs) + test_klasses[class_name] = type( + class_name, + ( + mixin_class, + test_base_class_factory(scenario_def), + ), + module_dict, + ) + except Exception: + if bypass_test_generation_errors: + continue + raise + + return test_klasses diff --git a/test/asynchronous/utils.py b/test/asynchronous/utils.py new file mode 100644 index 0000000000..02ba46c71a --- /dev/null +++ b/test/asynchronous/utils.py @@ -0,0 +1,276 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing pymongo that require synchronization.""" +from __future__ import annotations + +import asyncio +import contextlib +import os +import random +import sys +import threading # Used in the synchronized version of this file +import time +import traceback +from functools import wraps +from inspect import iscoroutinefunction + +from bson.son import SON +from pymongo import AsyncMongoClient +from pymongo.errors import ConfigurationError +from pymongo.hello import HelloCompat +from pymongo.lock import _async_create_lock +from pymongo.operations import _Op +from pymongo.read_preferences import ReadPreference +from pymongo.server_selectors import any_server_selector, writable_server_selector +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration + +_IS_SYNC = False + + +async def async_get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = await client._get_topology() + server = await topology._select_server(writable_server_selector, _Op.TEST) + return server.pool + + +async def async_get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in await (await client._get_topology()).select_servers( + any_server_selector, _Op.TEST + ) + ] + + +async def async_wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. + + E.g.: + + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') + + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). + + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = await predicate() + else: + retval = predicate() + if retval: + return retval + + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) + + await asyncio.sleep(interval) + + +async def async_is_mongos(client): + res = await client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" + + +async def async_ensure_all_connected(client: AsyncMongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. + + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. + """ + hello: dict = await client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") + + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} + + # Run hello until we have connected to each host at least once. + async def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = await client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list + + try: + + async def predicate(): + return target_host_list == await discover() + + await async_wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) + + +async def asyncAssertRaisesExactly(cls, fn, *args, **kwargs): + """ + Unlike the standard assertRaises, this checks that a function raises a + specific class of exception, and not a subclass. E.g., check that + MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. + """ + try: + await fn(*args, **kwargs) + except Exception as e: + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" + else: + raise AssertionError("%s not raised" % cls) + + +async def async_set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + await client.admin.command(cmd) + + +async def async_joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t + else: + await asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, +): + """Decorate a test as flaky. + + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying + + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + async def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = await target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + await asyncio.sleep(delay) + if reset_func: + await reset_func() + + return wrapper + + return decorator + + +class AsyncMockConnection: + def __init__(self): + self.cancel_context = _CancellationContext() + self.more_to_come = False + self.id = random.randint(0, 100) + self.is_sdam = False + self.server_connection_id = random.randint(0, 100) + + def close_conn(self, reason): + pass + + def __aenter__(self): + return self + + def __aexit__(self, exc_type, exc_val, exc_tb): + pass + + +class AsyncMockPool: + def __init__(self, address, options, is_sdam=False, client_id=None): + self.gen = _PoolGeneration() + self._lock = _async_create_lock() + self.opts = options + self.operation_count = 0 + self.conns = [] + + def stale_generation(self, gen, service_id): + return self.gen.stale(gen, service_id) + + @contextlib.asynccontextmanager + async def checkout(self, handler=None): + yield AsyncMockConnection() + + async def checkin(self, *args, **kwargs): + pass + + async def _reset(self, service_id=None): + async with self._lock: + self.gen.inc(service_id) + + async def ready(self): + pass + + async def reset(self, service_id=None, interrupt_connections=False): + await self._reset() + + async def reset_without_pause(self): + await self._reset() + + async def close(self): + await self._reset() + + async def update_is_writable(self, is_writable): + pass + + async def remove_stale_sockets(self, *args, **kwargs): + pass diff --git a/test/asynchronous/utils_selection_tests.py b/test/asynchronous/utils_selection_tests.py new file mode 100644 index 0000000000..d6b92fadb4 --- /dev/null +++ b/test/asynchronous/utils_selection_tests.py @@ -0,0 +1,204 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys +from test.asynchronous import AsyncPyMongoTestCase +from test.asynchronous.utils import AsyncMockPool + +sys.path[0:0] = [""] + +from test import unittest +from test.pymongo_mocks import DummyMonitor +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) +from test.utils_shared import parse_read_preference + +from bson import json_util +from pymongo.asynchronous.settings import TopologySettings +from pymongo.asynchronous.topology import Topology +from pymongo.common import HEARTBEAT_FREQUENCY +from pymongo.errors import AutoReconnect, ConfigurationError +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector + +_IS_SYNC = False + + +def get_topology_settings_dict(**kwargs): + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": AsyncMockPool, + } + settings.update(kwargs) + return settings + + +async def create_topology(scenario_def, **kwargs): + # Initialize topologies. + if "heartbeatFrequencyMS" in scenario_def: + frequency = int(scenario_def["heartbeatFrequencyMS"]) / 1000.0 + else: + frequency = HEARTBEAT_FREQUENCY + + seeds, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + + topology_type = get_topology_type_name(scenario_def) + if topology_type == "LoadBalanced": + kwargs.setdefault("load_balanced", True) + # Force topology description to ReplicaSet + elif topology_type in ["ReplicaSetNoPrimary", "ReplicaSetWithPrimary"]: + kwargs.setdefault("replica_set_name", "rs") + settings = get_topology_settings_dict(heartbeat_frequency=frequency, seeds=seeds, **kwargs) + + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + topology = Topology(TopologySettings(**settings)) + await topology.open() + + # Update topologies with server descriptions. + for server in scenario_def["topology_description"]["servers"]: + server_description = make_server_description(server, hosts) + await topology.on_change(server_description) + + # Assert that descriptions match + assert ( + scenario_def["topology_description"]["type"] == topology.description.topology_type_name + ), topology.description.topology_type_name + + return topology + + +def create_test(scenario_def): + async def run_scenario(self): + _, hosts = get_addresses(scenario_def["topology_description"]["servers"]) + # "Eligible servers" is defined in the server selection spec as + # the set of servers matching both the ReadPreference's mode + # and tag sets. + top_latency = await create_topology(scenario_def) + + # "In latency window" is defined in the server selection + # spec as the subset of suitable_servers that falls within the + # allowable latency window. + top_suitable = await create_topology(scenario_def, local_threshold_ms=1000000) + + # Create server selector. + if scenario_def.get("operation") == "write": + pref = writable_server_selector + else: + # Make first letter lowercase to match read_pref's modes. + pref_def = scenario_def["read_preference"] + if scenario_def.get("error"): + with self.assertRaises((ConfigurationError, ValueError)): + # Error can be raised when making Read Pref or selecting. + pref = parse_read_preference(pref_def) + await top_latency.select_server(pref, _Op.TEST) + return + + pref = parse_read_preference(pref_def) + + # Select servers. + if not scenario_def.get("suitable_servers"): + with self.assertRaises(AutoReconnect): + await top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + if not scenario_def["in_latency_window"]: + with self.assertRaises(AutoReconnect): + await top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) + + return + + actual_suitable_s = await top_suitable.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + actual_latency_s = await top_latency.select_servers( + pref, _Op.TEST, server_selection_timeout=0 + ) + + expected_suitable_servers = {} + for server in scenario_def["suitable_servers"]: + server_description = make_server_description(server, hosts) + expected_suitable_servers[server["address"]] = server_description + + actual_suitable_servers = {} + for s in actual_suitable_s: + actual_suitable_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_suitable_servers), len(expected_suitable_servers)) + for k, actual in actual_suitable_servers.items(): + expected = expected_suitable_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + expected_latency_servers = {} + for server in scenario_def["in_latency_window"]: + server_description = make_server_description(server, hosts) + expected_latency_servers[server["address"]] = server_description + + actual_latency_servers = {} + for s in actual_latency_s: + actual_latency_servers[ + "%s:%d" % (s.description.address[0], s.description.address[1]) + ] = s.description + + self.assertEqual(len(actual_latency_servers), len(expected_latency_servers)) + for k, actual in actual_latency_servers.items(): + expected = expected_latency_servers[k] + self.assertEqual(expected.address, actual.address) + self.assertEqual(expected.server_type, actual.server_type) + self.assertEqual(expected.round_trip_time, actual.round_trip_time) + self.assertEqual(expected.tags, actual.tags) + self.assertEqual(expected.all_hosts, actual.all_hosts) + + return run_scenario + + +def create_selection_tests(test_dir): + class TestAllScenarios(AsyncPyMongoTestCase): + pass + + for dirpath, _, filenames in os.walk(test_dir): + dirname = os.path.split(dirpath) + dirname = os.path.split(dirname[-2])[-1] + "_" + dirname[-1] + + for filename in filenames: + if os.path.splitext(filename)[1] != ".json": + continue + with open(os.path.join(dirpath, filename)) as scenario_stream: + scenario_def = json_util.loads(scenario_stream.read()) + + # Construct test from scenario. + new_test = create_test(scenario_def) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" + + new_test.__name__ = test_name + setattr(TestAllScenarios, new_test.__name__, new_test) + + return TestAllScenarios diff --git a/test/asynchronous/utils_spec_runner.py b/test/asynchronous/utils_spec_runner.py new file mode 100644 index 0000000000..496c28a045 --- /dev/null +++ b/test/asynchronous/utils_spec_runner.py @@ -0,0 +1,817 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing driver specs.""" +from __future__ import annotations + +import asyncio +import functools +import os +import time +import unittest +from collections import abc +from inspect import iscoroutinefunction +from test.asynchronous import AsyncIntegrationTest, async_client_context, client_knobs +from test.asynchronous.helpers import ConcurrentRunner +from test.utils_shared import ( + CMAPListener, + CompareType, + EventListener, + OvertCommandListener, + ScenarioDict, + ServerAndTopologyEventListener, + camel_to_snake, + camel_to_snake_args, + parse_spec_options, + prepare_spec_arguments, +) +from typing import List + +from bson import ObjectId, decode, encode, json_util +from bson.binary import Binary +from bson.int64 import Int64 +from bson.son import SON +from gridfs import GridFSBucket +from gridfs.asynchronous.grid_file import AsyncGridFSBucket +from pymongo.asynchronous import client_session +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _async_cond_wait, _async_create_condition, _async_create_lock +from pymongo.read_concern import ReadConcern +from pymongo.read_preferences import ReadPreference +from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.write_concern import WriteConcern + +_IS_SYNC = False + + +class SpecRunnerTask(ConcurrentRunner): + def __init__(self, name): + super().__init__(name=name) + self.exc = None + self.daemon = True + self.cond = _async_create_condition(_async_create_lock()) + self.ops = [] + + async def schedule(self, work): + self.ops.append(work) + async with self.cond: + self.cond.notify() + + async def stop(self): + self.stopped = True + async with self.cond: + self.cond.notify() + + async def run(self): + while not self.stopped or self.ops: + if not self.ops: + async with self.cond: + await _async_cond_wait(self.cond, 10) + if self.ops: + try: + work = self.ops.pop(0) + await work() + except Exception as exc: + self.exc = exc + await self.stop() + + +class AsyncSpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = async_client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = async_client_context.require_version_max(*max_ver)(method) + + return method + + @staticmethod + async def valid_topology(run_on_req): + return await async_client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return async_client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return async_client_context.auth_enabled + return not async_client_context.auth_enabled + return True + + async def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + await self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + async def predicate(): + return await self.should_run_on(scenario_def) + + return async_client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + async def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + +class AsyncSpecRunner(AsyncIntegrationTest): + mongos_clients: List + knobs: client_knobs + listener: EventListener + + async def asyncSetUp(self) -> None: + await super().asyncSetUp() + self.mongos_clients = [] + + # Speed up the tests by decreasing the heartbeat frequency. + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.targets = {} + self.listener = None # type: ignore + self.pool_listener = None + self.server_listener = None + self.maxDiff = None + + async def asyncTearDown(self) -> None: + self.knobs.disable() + + async def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + await self.configure_fail_point(client, command_args) + + async def targeted_fail_point(self, session, fail_point): + """Run the targetedFailPoint test operation. + + Enable the fail point on the session's pinned mongos. + """ + clients = {c.address: c for c in self.mongos_clients} + client = clients[session._pinned_address] + await self.configure_fail_point(client, fail_point) + self.addAsyncCleanup(self.set_fail_point, {"mode": "off"}) + + def assert_session_pinned(self, session): + """Run the assertSessionPinned test operation. + + Assert that the given session is pinned. + """ + self.assertIsNotNone(session._transaction.pinned_address) + + def assert_session_unpinned(self, session): + """Run the assertSessionUnpinned test operation. + + Assert that the given session is not pinned. + """ + self.assertIsNone(session._pinned_address) + self.assertIsNone(session._transaction.pinned_address) + + async def assert_collection_exists(self, database, collection): + """Run the assertCollectionExists test operation.""" + db = self.client[database] + self.assertIn(collection, await db.list_collection_names()) + + async def assert_collection_not_exists(self, database, collection): + """Run the assertCollectionNotExists test operation.""" + db = self.client[database] + self.assertNotIn(collection, await db.list_collection_names()) + + async def assert_index_exists(self, database, collection, index): + """Run the assertIndexExists test operation.""" + coll = self.client[database][collection] + self.assertIn(index, [doc["name"] async for doc in await coll.list_indexes()]) + + async def assert_index_not_exists(self, database, collection, index): + """Run the assertIndexNotExists test operation.""" + coll = self.client[database][collection] + self.assertNotIn(index, [doc["name"] async for doc in await coll.list_indexes()]) + + async def wait(self, ms): + """Run the "wait" test operation.""" + await asyncio.sleep(ms / 1000.0) + + def assertErrorLabelsContain(self, exc, expected_labels): + labels = [l for l in expected_labels if exc.has_error_label(l)] + self.assertEqual(labels, expected_labels) + + def assertErrorLabelsOmit(self, exc, omit_labels): + for label in omit_labels: + self.assertFalse( + exc.has_error_label(label), msg=f"error labels should not contain {label}" + ) + + async def kill_all_sessions(self): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + await client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + + def check_command_result(self, expected_result, result): + # Only compare the keys in the expected result. + filtered_result = {} + for key in expected_result: + try: + filtered_result[key] = result[key] + except KeyError: + pass + self.assertEqual(filtered_result, expected_result) + + # TODO: factor the following function with test_crud.py. + def check_result(self, expected_result, result): + if isinstance(result, _WriteResult): + for res in expected_result: + prop = camel_to_snake(res) + # SPEC-869: Only BulkWriteResult has upserted_count. + if prop == "upserted_count" and not isinstance(result, BulkWriteResult): + if result.upserted_id is not None: + upserted_count = 1 + else: + upserted_count = 0 + self.assertEqual(upserted_count, expected_result[res], prop) + elif prop == "inserted_ids": + # BulkWriteResult does not have inserted_ids. + if isinstance(result, BulkWriteResult): + self.assertEqual(len(expected_result[res]), result.inserted_count) + else: + # InsertManyResult may be compared to [id1] from the + # crud spec or {"0": id1} from the retryable write spec. + ids = expected_result[res] + if isinstance(ids, dict): + ids = [ids[str(i)] for i in range(len(ids))] + + self.assertEqual(ids, result.inserted_ids, prop) + elif prop == "upserted_ids": + # Convert indexes from strings to integers. + ids = expected_result[res] + expected_ids = {} + for str_index in ids: + expected_ids[int(str_index)] = ids[str_index] + self.assertEqual(expected_ids, result.upserted_ids, prop) + else: + self.assertEqual(getattr(result, prop), expected_result[res], prop) + + return True + else: + + def _helper(expected_result, result): + if isinstance(expected_result, abc.Mapping): + for i in expected_result.keys(): + self.assertEqual(expected_result[i], result[i]) + + elif isinstance(expected_result, list): + for i, k in zip(expected_result, result): + _helper(i, k) + else: + self.assertEqual(expected_result, result) + + _helper(expected_result, result) + return None + + def get_object_name(self, op): + """Allow subclasses to override handling of 'object' + + Transaction spec says 'object' is required. + """ + return op["object"] + + @staticmethod + def parse_options(opts): + return parse_spec_options(opts) + + async def run_operation(self, sessions, collection, operation): + original_collection = collection + name = camel_to_snake(operation["name"]) + if name == "run_command": + name = "command" + elif name == "download_by_name": + name = "open_download_stream_by_name" + elif name == "download": + name = "open_download_stream" + elif name == "map_reduce": + self.skipTest("PyMongo does not support mapReduce") + elif name == "count": + self.skipTest("PyMongo does not support count") + + database = collection.database + collection = database.get_collection(collection.name) + if "collectionOptions" in operation: + collection = collection.with_options( + **self.parse_options(operation["collectionOptions"]) + ) + + object_name = self.get_object_name(operation) + if object_name == "gridfsbucket": + # Only create the GridFSBucket when we need it (for the gridfs + # retryable reads tests). + obj = AsyncGridFSBucket(database, bucket_name=collection.name) + else: + objects = { + "client": database.client, + "database": database, + "collection": collection, + "testRunner": self, + } + objects.update(sessions) + obj = objects[object_name] + + # Combine arguments with options and handle special cases. + arguments = operation.get("arguments", {}) + arguments.update(arguments.pop("options", {})) + self.parse_options(arguments) + + cmd = getattr(obj, name) + + with_txn_callback = functools.partial( + self.run_operations, sessions, original_collection, in_with_transaction=True + ) + prepare_spec_arguments(operation, arguments, name, sessions, with_txn_callback) + + if name == "run_on_thread": + args = {"sessions": sessions, "collection": collection} + args.update(arguments) + arguments = args + + if not _IS_SYNC and iscoroutinefunction(cmd): + result = await cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) + # Cleanup open change stream cursors. + if name == "watch": + self.addAsyncCleanup(result.close) + + if name == "aggregate": + if arguments["pipeline"] and "$out" in arguments["pipeline"][-1]: + # Read from the primary to ensure causal consistency. + out = collection.database.get_collection( + arguments["pipeline"][-1]["$out"], read_preference=ReadPreference.PRIMARY + ) + return out.find() + if "download" in name: + result = Binary(result.read()) + + if isinstance(result, AsyncCursor) or isinstance(result, AsyncCommandCursor): + return await result.to_list() + + return result + + def allowable_errors(self, op): + """Allow encryption spec to override expected error classes.""" + return (PyMongoError,) + + async def _run_op(self, sessions, collection, op, in_with_transaction): + expected_result = op.get("result") + if expect_error(op): + with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: + await self.run_operation(sessions, collection, op.copy()) + exc = context.exception + if expect_error_message(expected_result): + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() + else: + errmsg = str(exc).lower() + self.assertIn(expected_result["errorContains"].lower(), errmsg) + if expect_error_code(expected_result): + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) + if expect_error_labels_contain(expected_result): + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) + if expect_error_labels_omit(expected_result): + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc + + # Reraise the exception if we're in the with_transaction + # callback. + if in_with_transaction: + raise context.exception + else: + result = await self.run_operation(sessions, collection, op.copy()) + if "result" in op: + if op["name"] == "runCommand": + self.check_command_result(expected_result, result) + else: + self.check_result(expected_result, result) + + async def run_operations(self, sessions, collection, ops, in_with_transaction=False): + for op in ops: + await self._run_op(sessions, collection, op, in_with_transaction) + + # TODO: factor with test_command_monitoring.py + def check_events(self, test, listener, session_ids): + events = listener.started_events + if not len(test["expectations"]): + return + + # Give a nicer message when there are missing or extra events + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) + for i, expectation in enumerate(test["expectations"]): + event_type = next(iter(expectation)) + event = events[i] + + # The tests substitute 42 for any number other than 0. + if event.command_name == "getMore" and event.command["getMore"]: + event.command["getMore"] = Int64(42) + elif event.command_name == "killCursors": + event.command["cursors"] = [Int64(42)] + elif event.command_name == "update": + # TODO: remove this once PYTHON-1744 is done. + # Add upsert and multi fields back into expectations. + updates = expectation[event_type]["command"]["updates"] + for update in updates: + update.setdefault("upsert", False) + update.setdefault("multi", False) + + # Replace afterClusterTime: 42 with actual afterClusterTime. + expected_cmd = expectation[event_type]["command"] + expected_read_concern = expected_cmd.get("readConcern") + if expected_read_concern is not None: + time = expected_read_concern.get("afterClusterTime") + if time == 42: + actual_time = event.command.get("readConcern", {}).get("afterClusterTime") + if actual_time is not None: + expected_read_concern["afterClusterTime"] = actual_time + + recovery_token = expected_cmd.get("recoveryToken") + if recovery_token == 42: + expected_cmd["recoveryToken"] = CompareType(dict) + + # Replace lsid with a name like "session0" to match test. + if "lsid" in event.command: + for name, lsid in session_ids.items(): + if event.command["lsid"] == lsid: + event.command["lsid"] = name + break + + for attr, expected in expectation[event_type].items(): + actual = getattr(event, attr) + expected = wrap_types(expected) + if isinstance(expected, dict): + for key, val in expected.items(): + if val is None: + if key in actual: + self.fail(f"Unexpected key [{key}] in {actual!r}") + elif key not in actual: + self.fail(f"Expected key [{key}] in {actual!r}") + else: + self.assertEqual( + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" + ) + else: + self.assertEqual(actual, expected) + + def maybe_skip_scenario(self, test): + if test.get("skipReason"): + self.skipTest(test.get("skipReason")) + + def get_scenario_db_name(self, scenario_def): + """Allow subclasses to override a test's database name.""" + return scenario_def["database_name"] + + def get_scenario_coll_name(self, scenario_def): + """Allow subclasses to override a test's collection name.""" + return scenario_def["collection_name"] + + def get_outcome_coll_name(self, outcome, collection): + """Allow subclasses to override outcome collection.""" + return collection.name + + async def run_test_ops(self, sessions, collection, test): + """Added to allow retryable writes spec to override a test's + operation. + """ + await self.run_operations(sessions, collection, test["operations"]) + + def parse_client_options(self, opts): + """Allow encryption spec to override a clientOptions parsing.""" + # Convert test['clientOptions'] to dict to avoid a Jython bug using + # "**" with ScenarioDict. + return dict(opts) + + async def setup_scenario(self, scenario_def): + """Allow specs to override a test's setup.""" + db_name = self.get_scenario_db_name(scenario_def) + coll_name = self.get_scenario_coll_name(scenario_def) + documents = scenario_def["data"] + + # Setup the collection with as few majority writes as possible. + db = async_client_context.client.get_database(db_name) + coll_exists = bool(await db.list_collection_names(filter={"name": coll_name})) + if coll_exists: + await db[coll_name].delete_many({}) + # Only use majority wc only on the final write. + wc = WriteConcern(w="majority") + if documents: + db.get_collection(coll_name, write_concern=wc).insert_many(documents) + elif not coll_exists: + # Ensure collection exists. + await db.create_collection(coll_name, write_concern=wc) + + async def run_scenario(self, scenario_def, test): + self.maybe_skip_scenario(test) + + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + await self.kill_all_sessions() + self.addAsyncCleanup(self.kill_all_sessions) + await self.setup_scenario(scenario_def) + database_name = self.get_scenario_db_name(scenario_def) + collection_name = self.get_scenario_coll_name(scenario_def) + # SPEC-1245 workaround StaleDbVersion on distinct + for c in self.mongos_clients: + await c[database_name][collection_name].distinct("x") + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point(fp) + self.addAsyncCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + listener = OvertCommandListener() + pool_listener = CMAPListener() + server_listener = ServerAndTopologyEventListener() + # Create a new client, to avoid interference from pooled sessions. + client_options = self.parse_client_options(test["clientOptions"]) + use_multi_mongos = test["useMultipleMongoses"] + host = None + if use_multi_mongos: + if async_client_context.load_balancer: + host = async_client_context.MULTI_MONGOS_LB_URI + elif async_client_context.is_mongos: + host = async_client_context.mongos_seeds() + client = await self.async_rs_client( + h=host, event_listeners=[listener, pool_listener, server_listener], **client_options + ) + self.scenario_client = client + self.listener = listener + self.pool_listener = pool_listener + self.server_listener = server_listener + + # Create session0 and session1. + sessions = {} + session_ids = {} + for i in range(2): + # Don't attempt to create sessions if they are not supported by + # the running server version. + if not async_client_context.sessions_enabled: + break + session_name = "session%d" % i + opts = camel_to_snake_args(test["sessionOptions"][session_name]) + if "default_transaction_options" in opts: + txn_opts = self.parse_options(opts["default_transaction_options"]) + txn_opts = client_session.TransactionOptions(**txn_opts) + opts["default_transaction_options"] = txn_opts + + s = client.start_session(**dict(opts)) + + sessions[session_name] = s + # Store lsid so we can access it after end_session, in check_events. + session_ids[session_name] = s.session_id + + self.addAsyncCleanup(end_sessions, sessions) + + collection = client[database_name][collection_name] + await self.run_test_ops(sessions, collection, test) + + await end_sessions(sessions) + + self.check_events(test, listener, session_ids) + + # Disable fail points. + if "failPoint" in test: + fp = test["failPoint"] + await self.set_fail_point( + {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + # Assert final state is expected. + outcome = test["outcome"] + expected_c = outcome.get("collection") + if expected_c is not None: + outcome_coll_name = self.get_outcome_coll_name(outcome, collection) + + # Read from the primary with local read concern to ensure causal + # consistency. + outcome_coll = async_client_context.client[collection.database.name].get_collection( + outcome_coll_name, + read_preference=ReadPreference.PRIMARY, + read_concern=ReadConcern("local"), + ) + actual_data = await outcome_coll.find(sort=[("_id", 1)]).to_list() + + # The expected data needs to be the left hand side here otherwise + # CompareType(Binary) doesn't work. + self.assertEqual(wrap_types(expected_c["data"]), actual_data) + + +def expect_any_error(op): + if isinstance(op, dict): + return op.get("error") + + return False + + +def expect_error_message(expected_result): + if isinstance(expected_result, dict): + return isinstance(expected_result["errorContains"], str) + + return False + + +def expect_error_code(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorCodeName"] + + return False + + +def expect_error_labels_contain(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsContain"] + + return False + + +def expect_error_labels_omit(expected_result): + if isinstance(expected_result, dict): + return expected_result["errorLabelsOmit"] + + return False + + +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + +def expect_error(op): + expected_result = op.get("result") + return ( + expect_any_error(op) + or expect_error_message(expected_result) + or expect_error_code(expected_result) + or expect_error_labels_contain(expected_result) + or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) + ) + + +async def end_sessions(sessions): + for s in sessions.values(): + # Aborts the transaction if it's open. + await s.end_session() + + +def decode_raw(val): + """Decode RawBSONDocuments in the given container.""" + if isinstance(val, (list, abc.Mapping)): + return decode(encode({"v": val}))["v"] + return val + + +TYPES = { + "binData": Binary, + "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, +} + + +def wrap_types(val): + """Support $$type assertion in command results.""" + if isinstance(val, list): + return [wrap_types(v) for v in val] + if isinstance(val, abc.Mapping): + typ = val.get("$$type") + if typ: + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) + d = {} + for key in val: + d[key] = wrap_types(val[key]) + return d + return val diff --git a/test/atlas/test_connection.py b/test/atlas/test_connection.py index a1eb97edee..ac217ab40d 100644 --- a/test/atlas/test_connection.py +++ b/test/atlas/test_connection.py @@ -13,23 +13,22 @@ # limitations under the License. """Test connections to various Atlas cluster types.""" +from __future__ import annotations import os import sys import unittest from collections import defaultdict +from test import PyMongoTestCase + +import pytest sys.path[0:0] = [""] import pymongo -from pymongo.ssl_support import HAS_SNI - -try: - import dns # noqa +from pymongo.ssl_support import _has_sni - HAS_DNS = True -except ImportError: - HAS_DNS = False +pytestmark = pytest.mark.atlas_connect URIS = { @@ -38,78 +37,63 @@ "ATLAS_FREE": os.environ.get("ATLAS_FREE"), "ATLAS_TLS11": os.environ.get("ATLAS_TLS11"), "ATLAS_TLS12": os.environ.get("ATLAS_TLS12"), - "ATLAS_SERVERLESS": os.environ.get("ATLAS_SERVERLESS"), "ATLAS_SRV_REPL": os.environ.get("ATLAS_SRV_REPL"), "ATLAS_SRV_SHRD": os.environ.get("ATLAS_SRV_SHRD"), "ATLAS_SRV_FREE": os.environ.get("ATLAS_SRV_FREE"), "ATLAS_SRV_TLS11": os.environ.get("ATLAS_SRV_TLS11"), "ATLAS_SRV_TLS12": os.environ.get("ATLAS_SRV_TLS12"), - "ATLAS_SRV_SERVERLESS": os.environ.get("ATLAS_SRV_SERVERLESS"), + "ATLAS_X509_DEV_WITH_CERT": os.environ.get("ATLAS_X509_DEV_WITH_CERT"), } -# Set this variable to true to run the SRV tests even when dnspython is not -# installed. -MUST_TEST_SRV = os.environ.get("MUST_TEST_SRV") - -def connect(uri): - if not uri: - raise Exception("Must set env variable to test.") - client = pymongo.MongoClient(uri) - # No TLS error - client.admin.command("ping") - # No auth error - client.test.test.count_documents({}) +class TestAtlasConnect(PyMongoTestCase): + def connect(self, uri): + if not uri: + raise Exception("Must set env variable to test.") + client = self.simple_client(uri) + # No TLS error + client.admin.command("ping") + # No auth error + client.test.test.count_documents({}) - -class TestAtlasConnect(unittest.TestCase): - @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") def test_free_tier(self): - connect(URIS["ATLAS_FREE"]) + self.connect(URIS["ATLAS_FREE"]) def test_replica_set(self): - connect(URIS["ATLAS_REPL"]) + self.connect(URIS["ATLAS_REPL"]) def test_sharded_cluster(self): - connect(URIS["ATLAS_SHRD"]) + self.connect(URIS["ATLAS_SHRD"]) def test_tls_11(self): - connect(URIS["ATLAS_TLS11"]) + self.connect(URIS["ATLAS_TLS11"]) def test_tls_12(self): - connect(URIS["ATLAS_TLS12"]) - - def test_serverless(self): - connect(URIS["ATLAS_SERVERLESS"]) + self.connect(URIS["ATLAS_TLS12"]) def connect_srv(self, uri): - connect(uri) + self.connect(uri) self.assertIn("mongodb+srv://", uri) - @unittest.skipUnless(HAS_SNI, "Free tier requires SNI support") - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") + @unittest.skipUnless(_has_sni(True), "Free tier requires SNI support") def test_srv_free_tier(self): self.connect_srv(URIS["ATLAS_SRV_FREE"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_replica_set(self): self.connect_srv(URIS["ATLAS_SRV_REPL"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_sharded_cluster(self): self.connect_srv(URIS["ATLAS_SRV_SHRD"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_11(self): self.connect_srv(URIS["ATLAS_SRV_TLS11"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") def test_srv_tls_12(self): self.connect_srv(URIS["ATLAS_SRV_TLS12"]) - @unittest.skipUnless(HAS_DNS or MUST_TEST_SRV, "SRV requires dnspython") - def test_srv_serverless(self): - self.connect_srv(URIS["ATLAS_SRV_SERVERLESS"]) + def test_x509_with_cert(self): + self.connect(URIS["ATLAS_X509_DEV_WITH_CERT"]) def test_uniqueness(self): """Ensure that we don't accidentally duplicate the test URIs.""" @@ -120,7 +104,7 @@ def test_uniqueness(self): duplicates = [names for names in uri_to_names.values() if len(names) > 1] self.assertFalse( duplicates, - "Error: the following env variables have duplicate values: %s" % (duplicates,), + f"Error: the following env variables have duplicate values: {duplicates}", ) diff --git a/test/auth/connection-string.json b/test/auth/connection-string.json deleted file mode 100644 index 2a37ae8df4..0000000000 --- a/test/auth/connection-string.json +++ /dev/null @@ -1,449 +0,0 @@ -{ - "tests": [ - { - "description": "should use the default source and mechanism", - "uri": "mongodb://user:password@localhost", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": null, - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified", - "uri": "mongodb://user:password@localhost/foo", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": null, - "mechanism_properties": null - } - }, - { - "description": "should use the authSource when specified", - "uri": "mongodb://user:password@localhost/foo?authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": null, - "mechanism_properties": null - } - }, - { - "description": "should recognise the mechanism (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", - "valid": true, - "credential": { - "username": "user@DOMAIN.COM", - "password": null, - "source": "$external", - "mechanism": "GSSAPI", - "mechanism_properties": { - "SERVICE_NAME": "mongodb" - } - } - }, - { - "description": "should ignore the database (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", - "valid": true, - "credential": { - "username": "user@DOMAIN.COM", - "password": null, - "source": "$external", - "mechanism": "GSSAPI", - "mechanism_properties": { - "SERVICE_NAME": "mongodb" - } - } - }, - { - "description": "should accept valid authSource (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", - "valid": true, - "credential": { - "username": "user@DOMAIN.COM", - "password": null, - "source": "$external", - "mechanism": "GSSAPI", - "mechanism_properties": { - "SERVICE_NAME": "mongodb" - } - } - }, - { - "description": "should accept generic mechanism property (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true", - "valid": true, - "credential": { - "username": "user@DOMAIN.COM", - "password": null, - "source": "$external", - "mechanism": "GSSAPI", - "mechanism_properties": { - "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true - } - } - }, - { - "description": "should accept the password (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", - "valid": true, - "credential": { - "username": "user@DOMAIN.COM", - "password": "password", - "source": "$external", - "mechanism": "GSSAPI", - "mechanism_properties": { - "SERVICE_NAME": "mongodb" - } - } - }, - { - "description": "must raise an error when the authSource is empty", - "uri": "mongodb://user:password@localhost/foo?authSource=", - "valid": false - }, - { - "description": "must raise an error when the authSource is empty without credentials", - "uri": "mongodb://localhost/admin?authSource=", - "valid": false - }, - { - "description": "should throw an exception if authSource is invalid (GSSAPI)", - "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", - "valid": false - }, - { - "description": "should throw an exception if no username (GSSAPI)", - "uri": "mongodb://localhost/?authMechanism=GSSAPI", - "valid": false - }, - { - "description": "should recognize the mechanism (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should use the authSource when specified (MONGODB-CR)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=MONGODB-CR&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "MONGODB-CR", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username is supplied (MONGODB-CR)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-CR", - "valid": false - }, - { - "description": "should recognize the mechanism (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", - "valid": true, - "credential": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "source": "$external", - "mechanism": "MONGODB-X509", - "mechanism_properties": null - } - }, - { - "description": "should ignore the database (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", - "valid": true, - "credential": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "source": "$external", - "mechanism": "MONGODB-X509", - "mechanism_properties": null - } - }, - { - "description": "should accept valid authSource (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", - "valid": true, - "credential": { - "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "password": null, - "source": "$external", - "mechanism": "MONGODB-X509", - "mechanism_properties": null - } - }, - { - "description": "should recognize the mechanism with no username (MONGODB-X509)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", - "valid": true, - "credential": { - "username": null, - "password": null, - "source": "$external", - "mechanism": "MONGODB-X509", - "mechanism_properties": null - } - }, - { - "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", - "valid": true, - "credential": { - "username": null, - "password": null, - "source": "$external", - "mechanism": "MONGODB-X509", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if supplied a password (MONGODB-X509)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", - "valid": false - }, - { - "description": "should throw an exception if authSource is invalid (MONGODB-X509)", - "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", - "valid": false - }, - { - "description": "should recognize the mechanism (PLAIN)", - "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "$external", - "mechanism": "PLAIN", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (PLAIN)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "PLAIN", - "mechanism_properties": null - } - }, - { - "description": "should use the authSource when specified (PLAIN)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "PLAIN", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username (PLAIN)", - "uri": "mongodb://localhost/?authMechanism=PLAIN", - "valid": false - }, - { - "description": "should recognize the mechanism (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": "SCRAM-SHA-1", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "SCRAM-SHA-1", - "mechanism_properties": null - } - }, - { - "description": "should accept valid authSource (SCRAM-SHA-1)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "SCRAM-SHA-1", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username (SCRAM-SHA-1)", - "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", - "valid": false - }, - { - "description": "should recognize the mechanism (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "admin", - "mechanism": "SCRAM-SHA-256", - "mechanism_properties": null - } - }, - { - "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "foo", - "mechanism": "SCRAM-SHA-256", - "mechanism_properties": null - } - }, - { - "description": "should accept valid authSource (SCRAM-SHA-256)", - "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "bar", - "mechanism": "SCRAM-SHA-256", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if no username (SCRAM-SHA-256)", - "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", - "valid": false - }, - { - "description": "URI with no auth-related info doesn't create credential", - "uri": "mongodb://localhost/", - "valid": true, - "credential": null - }, - { - "description": "database in URI path doesn't create credentials", - "uri": "mongodb://localhost/foo", - "valid": true, - "credential": null - }, - { - "description": "authSource without username doesn't create credential (default mechanism)", - "uri": "mongodb://localhost/?authSource=foo", - "valid": true, - "credential": null - }, - { - "description": "should throw an exception if no username provided (userinfo implies default mechanism)", - "uri": "mongodb://@localhost.com/", - "valid": false - }, - { - "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", - "uri": "mongodb://:@localhost.com/", - "valid": false - }, - { - "description": "should recognise the mechanism (MONGODB-AWS)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS", - "valid": true, - "credential": { - "username": null, - "password": null, - "source": "$external", - "mechanism": "MONGODB-AWS", - "mechanism_properties": null - } - }, - { - "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", - "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", - "valid": true, - "credential": { - "username": null, - "password": null, - "source": "$external", - "mechanism": "MONGODB-AWS", - "mechanism_properties": null - } - }, - { - "description": "should throw an exception if username and no password (MONGODB-AWS)", - "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", - "valid": false, - "credential": null - }, - { - "description": "should use username and password if specified (MONGODB-AWS)", - "uri": "mongodb://user%21%40%23%24%25%5E%26%2A%28%29_%2B:pass%21%40%23%24%25%5E%26%2A%28%29_%2B@localhost/?authMechanism=MONGODB-AWS", - "valid": true, - "credential": { - "username": "user!@#$%^&*()_+", - "password": "pass!@#$%^&*()_+", - "source": "$external", - "mechanism": "MONGODB-AWS", - "mechanism_properties": null - } - }, - { - "description": "should use username, password and session token if specified (MONGODB-AWS)", - "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:token%21%40%23%24%25%5E%26%2A%28%29_%2B", - "valid": true, - "credential": { - "username": "user", - "password": "password", - "source": "$external", - "mechanism": "MONGODB-AWS", - "mechanism_properties": { - "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" - } - } - } - ] -} diff --git a/test/auth/legacy/connection-string.json b/test/auth/legacy/connection-string.json new file mode 100644 index 0000000000..3a099c8137 --- /dev/null +++ b/test/auth/legacy/connection-string.json @@ -0,0 +1,651 @@ +{ + "tests": [ + { + "description": "should use the default source and mechanism", + "uri": "mongodb://user:password@localhost", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified", + "uri": "mongodb://user:password@localhost/foo", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified", + "uri": "mongodb://user:password@localhost/foo?authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": null, + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should ignore the database (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/foo?authMechanism=GSSAPI", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept valid authSource (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "should accept generic mechanism property (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" + } + } + }, + { + "description": "should accept forwardAndReverse hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forwardAndReverse", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "forwardAndReverse" + } + } + }, + { + "description": "should accept no hostname canonicalization (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:none", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": null, + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "other", + "CANONICALIZE_HOST_NAME": "none" + } + } + }, + { + "description": "must raise an error when the hostname canonicalization is invalid", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:invalid", + "valid": false + }, + { + "description": "should accept the password (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM:password@localhost/?authMechanism=GSSAPI&authSource=$external", + "valid": true, + "credential": { + "username": "user@DOMAIN.COM", + "password": "password", + "source": "$external", + "mechanism": "GSSAPI", + "mechanism_properties": { + "SERVICE_NAME": "mongodb" + } + } + }, + { + "description": "must raise an error when the authSource is empty", + "uri": "mongodb://user:password@localhost/foo?authSource=", + "valid": false + }, + { + "description": "must raise an error when the authSource is empty without credentials", + "uri": "mongodb://localhost/admin?authSource=", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (GSSAPI)", + "uri": "mongodb://user%40DOMAIN.COM@localhost/?authMechanism=GSSAPI&authSource=foo", + "valid": false + }, + { + "description": "should throw an exception if no username (GSSAPI)", + "uri": "mongodb://localhost/?authMechanism=GSSAPI", + "valid": false + }, + { + "description": "should recognize the mechanism (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should ignore the database (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should recognize the mechanism with no username when auth source is explicitly specified (MONGODB-X509)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-X509&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-X509", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if supplied a password (MONGODB-X509)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-X509", + "valid": false + }, + { + "description": "should throw an exception if authSource is invalid (MONGODB-X509)", + "uri": "mongodb://CN%3DmyName%2COU%3DmyOrgUnit%2CO%3DmyOrg%2CL%3DmyLocality%2CST%3DmyState%2CC%3DmyCountry@localhost/foo?authMechanism=MONGODB-X509&authSource=bar", + "valid": false + }, + { + "description": "should recognize the mechanism (PLAIN)", + "uri": "mongodb://user:password@localhost/?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should use the authSource when specified (PLAIN)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=PLAIN&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "PLAIN", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (PLAIN)", + "uri": "mongodb://localhost/?authMechanism=PLAIN", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-1)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-1&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-1", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-1)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-1", + "valid": false + }, + { + "description": "should recognize the mechanism (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "admin", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should use the database when no authSource is specified (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "foo", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should accept valid authSource (SCRAM-SHA-256)", + "uri": "mongodb://user:password@localhost/foo?authMechanism=SCRAM-SHA-256&authSource=bar", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "bar", + "mechanism": "SCRAM-SHA-256", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if no username (SCRAM-SHA-256)", + "uri": "mongodb://localhost/?authMechanism=SCRAM-SHA-256", + "valid": false + }, + { + "description": "URI with no auth-related info doesn't create credential", + "uri": "mongodb://localhost/", + "valid": true, + "credential": null + }, + { + "description": "database in URI path doesn't create credentials", + "uri": "mongodb://localhost/foo", + "valid": true, + "credential": null + }, + { + "description": "authSource without username doesn't create credential (default mechanism)", + "uri": "mongodb://localhost/?authSource=foo", + "valid": true, + "credential": null + }, + { + "description": "should throw an exception if no username provided (userinfo implies default mechanism)", + "uri": "mongodb://@localhost.com/", + "valid": false + }, + { + "description": "should throw an exception if no username/password provided (userinfo implies default mechanism)", + "uri": "mongodb://:@localhost.com/", + "valid": false + }, + { + "description": "should recognise the mechanism (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified (MONGODB-AWS)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-AWS&authSource=$external", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should throw an exception if username and no password (MONGODB-AWS)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-AWS", + "valid": false, + "credential": null + }, + { + "description": "should use username and password if specified (MONGODB-AWS)", + "uri": "mongodb://user%21%40%23%24%25%5E%26%2A%28%29_%2B:pass%21%40%23%24%25%5E%26%2A%28%29_%2B@localhost/?authMechanism=MONGODB-AWS", + "valid": true, + "credential": { + "username": "user!@#$%^&*()_+", + "password": "pass!@#$%^&*()_+", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": null + } + }, + { + "description": "should use username, password and session token if specified (MONGODB-AWS)", + "uri": "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS&authMechanismProperties=AWS_SESSION_TOKEN:token%21%40%23%24%25%5E%26%2A%28%29_%2B", + "valid": true, + "credential": { + "username": "user", + "password": "password", + "source": "$external", + "mechanism": "MONGODB-AWS", + "mechanism_properties": { + "AWS_SESSION_TOKEN": "token!@#$%^&*()_+" + } + } + }, + { + "description": "should recognise the mechanism with test environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "test" + } + } + }, + { + "description": "should recognise the mechanism when auth source is explicitly specified and with environment (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authSource=$external&authMechanismProperties=ENVIRONMENT:test", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "test" + } + } + }, + { + "description": "should throw an exception if supplied a password (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if username is specified for test (MONGODB-OIDC)", + "uri": "mongodb://principalName@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:test", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if specified environment is not supported (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:invalid", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if neither environment nor callbacks specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception when unsupported auth property is specified (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=UnsupportedProperty:unexisted", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with azure provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should accept a username with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should accept a url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb%3A%2F%2Ftest-cluster", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } + }, + { + "description": "should accept an un-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:mongodb://test-cluster", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "mongodb://test-cluster" + } + } + }, + { + "description": "should handle a complicated url-encoded TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:abcd%25ef%3Ag%26hi", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "abcd%ef:g&hi" + } + } + }, + { + "description": "should url-encode a TOKEN_RESOURCE (MONGODB-OIDC)", + "uri": "mongodb://user@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:a$b", + "valid": true, + "credential": { + "username": "user", + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": "a$b" + } + } + }, + { + "description": "should accept a username and throw an error for a password with azure provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:foo", + "valid": false, + "credential": null + }, + { + "description": "should throw an exception if no token audience is given for azure provider (MONGODB-OIDC)", + "uri": "mongodb://username@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": "foo" + } + } + }, + { + "description": "should throw an error for a username and password with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp,TOKEN_RESOURCE:foo", + "valid": false, + "credential": null + }, + { + "description": "should throw an error if not TOKEN_RESOURCE with gcp provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:gcp", + "valid": false, + "credential": null + }, + { + "description": "should recognise the mechanism with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": true, + "credential": { + "username": null, + "password": null, + "source": "$external", + "mechanism": "MONGODB-OIDC", + "mechanism_properties": { + "ENVIRONMENT": "k8s" + } + } + }, + { + "description": "should throw an error for a username and password with k8s provider (MONGODB-OIDC)", + "uri": "mongodb://user:pass@localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:k8s", + "valid": false, + "credential": null + } + ] +} diff --git a/test/auth/unified/mongodb-oidc-no-retry.json b/test/auth/unified/mongodb-oidc-no-retry.json new file mode 100644 index 0000000000..0a8658455e --- /dev/null +++ b/test/auth/unified/mongodb-oidc-no-retry.json @@ -0,0 +1,422 @@ +{ + "description": "MONGODB-OIDC authentication with retry disabled", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "auth": true, + "authMechanism": "MONGODB-OIDC", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client0", + "uriOptions": { + "authMechanism": "MONGODB-OIDC", + "authMechanismProperties": { + "$$placeholder": 1 + }, + "retryReads": false, + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collName" + } + } + ], + "initialData": [ + { + "collectionName": "collName", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "A read operation should succeed", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "A write operation should succeed", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Read commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {} + }, + "expectResult": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "collName", + "filter": {} + } + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Write commands should reauthenticate and retry when a ReauthenticationRequired error happens", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 391 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake with cached token should use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "collName", + "documents": [ + { + "_id": 1, + "x": 1 + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Handshake without cached token should not use speculative authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "errorCode": 18 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 1 + } + }, + "expectError": { + "errorCode": 18 + } + } + ] + } + ] +} diff --git a/test/auth_aws/test_auth_aws.py b/test/auth_aws/test_auth_aws.py index a63e60718c..9738694d85 100644 --- a/test/auth_aws/test_auth_aws.py +++ b/test/auth_aws/test_auth_aws.py @@ -13,19 +13,31 @@ # limitations under the License. """Test MONGODB-AWS Authentication.""" +from __future__ import annotations import os import sys import unittest +from test import PyMongoTestCase +from unittest.mock import patch + +import pytest sys.path[0:0] = [""] +try: + from pymongo_auth_aws import AwsCredential, auth +except ImportError: + pass + from pymongo import MongoClient from pymongo.errors import OperationFailure -from pymongo.uri_parser import parse_uri +from pymongo.synchronous.uri_parser import parse_uri + +pytestmark = pytest.mark.auth_aws -class TestAuthAWS(unittest.TestCase): +class TestAuthAWS(PyMongoTestCase): uri: str @classmethod @@ -36,7 +48,7 @@ def test_should_fail_without_credentials(self): if "@" not in self.uri: self.skipTest("MONGODB_URI already has no credentials") - hosts = ["%s:%s" % addr for addr in parse_uri(self.uri)["nodelist"]] + hosts = ["{}:{}".format(*addr) for addr in parse_uri(self.uri)["nodelist"]] self.assertTrue(hosts) with MongoClient(hosts) as client: with self.assertRaises(OperationFailure): @@ -53,15 +65,127 @@ def test_connect_uri(self): with MongoClient(self.uri) as client: client.get_database().test.find_one() + def setup_cache(self): + if os.environ.get("AWS_ACCESS_KEY_ID", None) or "@" in self.uri: + self.skipTest("Not testing cached credentials") + + # Make a connection to ensure that we enable caching. + client = self.simple_client(self.uri) + client.get_database().test.find_one() + client.close() + + self.assertTrue(auth.get_use_cached_credentials()) + + # Ensure cleared credentials. + auth.set_cached_credentials(None) + self.assertEqual(auth.get_cached_credentials(), None) + + client = self.simple_client(self.uri) + client.get_database().test.find_one() + client.close() + return auth.get_cached_credentials() + + def test_cache_credentials(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + + def test_cache_about_to_expire(self): + creds = self.setup_cache() + client = self.simple_client(self.uri) + + # Make the creds about to expire. + creds = auth.get_cached_credentials() + assert creds is not None + + creds = AwsCredential(creds.username, creds.password, creds.token, lambda x: True) + auth.set_cached_credentials(creds) + + client.get_database().test.find_one() + new_creds = auth.get_cached_credentials() + self.assertNotEqual(creds, new_creds) + + def test_poisoned_cache(self): + creds = self.setup_cache() + + client = self.simple_client(self.uri) + + # Poison the creds with invalid password. + assert creds is not None + creds = AwsCredential("a" * 24, "b" * 24, "c" * 24) + auth.set_cached_credentials(creds) + + with self.assertRaises(OperationFailure): + client.get_database().test.find_one() + + # Make sure the cache was cleared. + self.assertEqual(auth.get_cached_credentials(), None) + + # The next attempt should generate a new cred and succeed. + client.get_database().test.find_one() + self.assertNotEqual(auth.get_cached_credentials(), None) + + def test_environment_variables_ignored(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + os.environ.copy() + + client = self.simple_client(self.uri) + + client.get_database().test.find_one() + + self.assertIsNotNone(auth.get_cached_credentials()) + + mock_env = { + "AWS_ACCESS_KEY_ID": "foo", + "AWS_SECRET_ACCESS_KEY": "bar", + "AWS_SESSION_TOKEN": "baz", + } + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client.get_database().test.find_one() + + auth.set_cached_credentials(None) + + client2 = self.simple_client(self.uri) + + with patch.dict("os.environ", mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + with self.assertRaises(OperationFailure): + client2.get_database().test.find_one() + + def test_no_cache_environment_variables(self): + creds = self.setup_cache() + self.assertIsNotNone(creds) + auth.set_cached_credentials(None) + + mock_env = {"AWS_ACCESS_KEY_ID": creds.username, "AWS_SECRET_ACCESS_KEY": creds.password} + if creds.token: + mock_env["AWS_SESSION_TOKEN"] = creds.token -class TestAWSLambdaExamples(unittest.TestCase): + client = self.simple_client(self.uri) + + with patch.dict(os.environ, mock_env): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], creds.username) + client.get_database().test.find_one() + + self.assertIsNone(auth.get_cached_credentials()) + + mock_env["AWS_ACCESS_KEY_ID"] = "foo" + + client2 = self.simple_client(self.uri) + + with patch.dict("os.environ", mock_env), self.assertRaises(OperationFailure): + self.assertEqual(os.environ["AWS_ACCESS_KEY_ID"], "foo") + client2.get_database().test.find_one() + + +class TestAWSLambdaExamples(PyMongoTestCase): def test_shared_client(self): # Start AWS Lambda Example 1 import os - from pymongo import MongoClient - - client = MongoClient(host=os.environ["MONGODB_URI"]) + client = self.simple_client(host=os.environ["MONGODB_URI"]) def lambda_handler(event, context): return client.db.command("ping") @@ -72,9 +196,7 @@ def test_IAM_auth(self): # Start AWS Lambda Example 2 import os - from pymongo import MongoClient - - client = MongoClient( + client = self.simple_client( host=os.environ["MONGODB_URI"], authSource="$external", authMechanism="MONGODB-AWS", diff --git a/test/bson_binary_vector/float32.json b/test/bson_binary_vector/float32.json new file mode 100644 index 0000000000..72dafce10f --- /dev/null +++ b/test/bson_binary_vector/float32.json @@ -0,0 +1,65 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype FLOAT32", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector FLOAT32", + "valid": true, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927000000FE420000E04000" + }, + { + "description": "Vector with decimals and negative value FLOAT32", + "valid": true, + "vector": [127.7, -7.7], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927006666FF426666F6C000" + }, + { + "description": "Empty Vector FLOAT32", + "valid": true, + "vector": [], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009270000" + }, + { + "description": "Infinity Vector FLOAT32", + "valid": true, + "vector": [{"$numberDouble": "-Infinity"}, 0.0, {"$numberDouble": "Infinity"} ], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "2000000005766563746F72000E000000092700000080FF000000000000807F00" + }, + { + "description": "FLOAT32 with padding", + "valid": false, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 3, + "canonical_bson": "1C00000005766563746F72000A0000000927030000FE420000E04000" + }, + { + "description": "Insufficient vector data with 3 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1700000005766563746F7200050000000927002A2A2A00" + }, + { + "description": "Insufficient vector data with 5 bytes FLOAT32", + "valid": false, + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "canonical_bson": "1900000005766563746F7200070000000927002A2A2A2A2A00" + } + ] +} diff --git a/test/bson_binary_vector/int8.json b/test/bson_binary_vector/int8.json new file mode 100644 index 0000000000..29524fb617 --- /dev/null +++ b/test/bson_binary_vector/int8.json @@ -0,0 +1,57 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype INT8", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector INT8", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000903007F0700" + }, + { + "description": "Empty Vector INT8", + "valid": true, + "vector": [], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009030000" + }, + { + "description": "Overflow Vector INT8", + "valid": false, + "vector": [128], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "Underflow Vector INT8", + "valid": false, + "vector": [-129], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "INT8 with padding", + "valid": false, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000903037F0700" + }, + { + "description": "INT8 with float inputs", + "valid": false, + "vector": [127.77, 7.77], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + } + ] +} diff --git a/test/bson_binary_vector/packed_bit.json b/test/bson_binary_vector/packed_bit.json new file mode 100644 index 0000000000..7cc272e38b --- /dev/null +++ b/test/bson_binary_vector/packed_bit.json @@ -0,0 +1,83 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype PACKED_BIT", + "test_key": "vector", + "tests": [ + { + "description": "Padding specified with no vector data PACKED_BIT", + "valid": false, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 1, + "canonical_bson": "1400000005766563746F72000200000009100100" + }, + { + "description": "Simple Vector PACKED_BIT", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000910007F0700" + }, + { + "description": "PACKED_BIT with padding", + "valid": true, + "vector": [127, 8], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0800" + }, + { + "description": "Empty Vector PACKED_BIT", + "valid": true, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" + }, + { + "description": "Overflow Vector PACKED_BIT", + "valid": false, + "vector": [256], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Underflow Vector PACKED_BIT", + "valid": false, + "vector": [-1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Vector with float values PACKED_BIT", + "valid": false, + "vector": [127.5], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Exceeding maximum padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 8, + "canonical_bson": "1500000005766563746F7200030000000910080100" + }, + { + "description": "Negative padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": -1 + } + ] +} diff --git a/test/bson_corpus/binary.json b/test/bson_corpus/binary.json index beb2e07a70..0e0056f3a2 100644 --- a/test/bson_corpus/binary.json +++ b/test/bson_corpus/binary.json @@ -55,6 +55,11 @@ "canonical_bson": "1D000000057800100000000773FFD26444B34C6990E8E7D1DFC035D400", "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"07\"}}}" }, + { + "description": "subtype 0x08", + "canonical_bson": "1D000000057800100000000873FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"08\"}}}" + }, { "description": "subtype 0x80", "canonical_bson": "0F0000000578000200000080FFFF00", @@ -69,6 +74,36 @@ "description": "$type query operator (conflicts with legacy $binary form with $type field)", "canonical_bson": "180000000378001000000010247479706500020000000000", "canonical_extjson": "{\"x\" : { \"$type\" : {\"$numberInt\": \"2\"}}}" + }, + { + "description": "subtype 0x09 Vector FLOAT32", + "canonical_bson": "170000000578000A0000000927000000FE420000E04000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwAAAP5CAADgQA==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector INT8", + "canonical_bson": "11000000057800040000000903007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector PACKED_BIT", + "canonical_bson": "11000000057800040000000910007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) FLOAT32", + "canonical_bson": "0F0000000578000200000009270000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) INT8", + "canonical_bson": "0F0000000578000200000009030000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) PACKED_BIT", + "canonical_bson": "0F0000000578000200000009100000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAA=\", \"subType\": \"09\"}}}" } ], "decodeErrors": [ diff --git a/test/bson_corpus/datetime.json b/test/bson_corpus/datetime.json index f857afdc36..1554341d29 100644 --- a/test/bson_corpus/datetime.json +++ b/test/bson_corpus/datetime.json @@ -24,6 +24,7 @@ { "description" : "Y10K", "canonical_bson" : "1000000009610000DC1FD277E6000000", + "relaxed_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}", "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" }, { diff --git a/test/bson_corpus/decimal128-1.json b/test/bson_corpus/decimal128-1.json index 7eefec6bf7..8e7fbc93c6 100644 --- a/test/bson_corpus/decimal128-1.json +++ b/test/bson_corpus/decimal128-1.json @@ -312,6 +312,30 @@ "canonical_bson": "18000000136400000000000a5bc138938d44c64d31cc3700", "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" + }, + { + "description": "Clamped zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "Clamped zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "Clamped negative zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "Clamped negative zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" } ] } diff --git a/test/change_streams/unified/change-streams-clusterTime.json b/test/change_streams/unified/change-streams-clusterTime.json new file mode 100644 index 0000000000..2b09e548f1 --- /dev/null +++ b/test/change_streams/unified/change-streams-clusterTime.json @@ -0,0 +1,81 @@ +{ + "description": "change-streams-clusterTime", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "clusterTime is present", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "ns": { + "db": "database0", + "coll": "collection0" + }, + "clusterTime": { + "$$exists": true + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-disambiguatedPaths.json b/test/change_streams/unified/change-streams-disambiguatedPaths.json new file mode 100644 index 0000000000..a8667b5436 --- /dev/null +++ b/test/change_streams/unified/change-streams-disambiguatedPaths.json @@ -0,0 +1,187 @@ +{ + "description": "disambiguatedPaths", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "6.1.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "disambiguatedPaths is present on updateDescription when an ambiguous path is present", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": { + "1": 1 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.1": [ + "a", + "1" + ] + } + } + } + } + ] + }, + { + "description": "disambiguatedPaths returns array indices as integers", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "a": [ + { + "1": 1 + } + ] + } + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "a.0.1": 2 + } + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "update", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "updateDescription": { + "updatedFields": { + "$$exists": true + }, + "removedFields": { + "$$exists": true + }, + "truncatedArrays": { + "$$exists": true + }, + "disambiguatedPaths": { + "a.0.1": [ + "a", + { + "$$type": "int" + }, + "1" + ] + } + } + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-errors.json b/test/change_streams/unified/change-streams-errors.json index 04fe8f04f3..65e99e541e 100644 --- a/test/change_streams/unified/change-streams-errors.json +++ b/test/change_streams/unified/change-streams-errors.json @@ -145,7 +145,7 @@ "minServerVersion": "4.1.11", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -190,7 +190,7 @@ "minServerVersion": "4.2", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/test/change_streams/unified/change-streams-nsType.json b/test/change_streams/unified/change-streams-nsType.json new file mode 100644 index 0000000000..1861c9a5e0 --- /dev/null +++ b/test/change_streams/unified/change-streams-nsType.json @@ -0,0 +1,145 @@ +{ + "description": "change-streams-nsType", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.1.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + } + ], + "tests": [ + { + "description": "nsType is present when creating collections", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "collection" + } + } + ] + }, + { + "description": "nsType is present when creating timeseries", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "timeseries": { + "timeField": "time", + "metaField": "meta", + "granularity": "minutes" + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "timeseries" + } + } + ] + }, + { + "description": "nsType is present when creating views", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create", + "nsType": "view" + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams-pre_and_post_images.json b/test/change_streams/unified/change-streams-pre_and_post_images.json index 8beefb2bc8..e62fc03459 100644 --- a/test/change_streams/unified/change-streams-pre_and_post_images.json +++ b/test/change_streams/unified/change-streams-pre_and_post_images.json @@ -6,7 +6,7 @@ "minServerVersion": "6.0.0", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/test/change_streams/unified/change-streams-resume-allowlist.json b/test/change_streams/unified/change-streams-resume-allowlist.json index b4953ec736..1ec72b432b 100644 --- a/test/change_streams/unified/change-streams-resume-allowlist.json +++ b/test/change_streams/unified/change-streams-resume-allowlist.json @@ -6,7 +6,7 @@ "minServerVersion": "3.6", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" diff --git a/test/change_streams/unified/change-streams-resume-errorLabels.json b/test/change_streams/unified/change-streams-resume-errorLabels.json index c156b550ce..7fd70108f0 100644 --- a/test/change_streams/unified/change-streams-resume-errorLabels.json +++ b/test/change_streams/unified/change-streams-resume-errorLabels.json @@ -6,7 +6,7 @@ "minServerVersion": "4.3.1", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ], "serverless": "forbid" @@ -1478,6 +1478,11 @@ }, { "description": "change stream resumes after StaleShardVersion", + "runOnRequirements": [ + { + "maxServerVersion": "6.0.99" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/change_streams/unified/change-streams-showExpandedEvents.json b/test/change_streams/unified/change-streams-showExpandedEvents.json new file mode 100644 index 0000000000..b9594e0c1e --- /dev/null +++ b/test/change_streams/unified/change-streams-showExpandedEvents.json @@ -0,0 +1,516 @@ +{ + "description": "change-streams-showExpandedEvents", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "topologies": [ + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "database1" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "collection1" + } + }, + { + "database": { + "id": "shardedDb", + "client": "client0", + "databaseName": "shardedDb" + } + }, + { + "database": { + "id": "adminDb", + "client": "client0", + "databaseName": "admin" + } + }, + { + "collection": { + "id": "shardedCollection", + "database": "shardedDb", + "collectionName": "shardedCollection" + } + } + ], + "initialData": [ + { + "collectionName": "collection0", + "databaseName": "database0", + "documents": [] + } + ], + "tests": [ + { + "description": "when provided, showExpandedEvents is sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": true + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when omitted, showExpandedEvents is not sent as a part of the aggregate command", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream0" + } + ], + "expectEvents": [ + { + "client": "client0", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "showExpandedEvents": { + "$$exists": false + } + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "database0" + } + } + ] + } + ] + }, + { + "description": "when showExpandedEvents is true, new fields on change stream events are handled appropriately", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "a": 1 + } + } + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "rename", + "object": "collection0", + "arguments": { + "to": "foo", + "dropTarget": true + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "insert", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "collectionUUID": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "operationDescription": { + "$$exists": true + } + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "rename", + "ns": { + "db": "database0", + "coll": "collection0" + }, + "to": { + "db": "database0", + "coll": "foo" + }, + "operationDescription": { + "dropTarget": { + "$$exists": true + }, + "to": { + "db": "database0", + "coll": "foo" + } + } + } + } + ] + }, + { + "description": "when showExpandedEvents is true, createIndex events are reported", + "operations": [ + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [ + { + "$match": { + "operationType": { + "$ne": "create" + } + } + } + ], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "createIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, dropIndexes events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "dropIndex", + "object": "collection0", + "arguments": { + "name": "x_1" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "dropIndexes" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, create events on views are reported", + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "foo" + } + }, + { + "name": "createChangeStream", + "object": "database0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "foo", + "viewOn": "testName" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "create" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, modify events are reported", + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_2" + } + }, + { + "name": "createChangeStream", + "object": "collection0", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "command": { + "collMod": "collection0" + }, + "commandName": "collMod" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "modify" + } + } + ] + }, + { + "description": "when showExpandedEvents is true, shardCollection events are reported", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createCollection", + "object": "shardedDb", + "arguments": { + "collection": "shardedCollection" + } + }, + { + "name": "createChangeStream", + "object": "shardedCollection", + "arguments": { + "pipeline": [], + "showExpandedEvents": true + }, + "saveResultAsEntity": "changeStream0" + }, + { + "name": "runCommand", + "object": "adminDb", + "arguments": { + "command": { + "shardCollection": "shardedDb.shardedCollection", + "key": { + "_id": 1 + } + }, + "commandName": "shardCollection" + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "changeStream0", + "expectResult": { + "operationType": "shardCollection" + } + } + ] + } + ] +} diff --git a/test/change_streams/unified/change-streams.json b/test/change_streams/unified/change-streams.json index c8b60ed4e2..a155d85b6e 100644 --- a/test/change_streams/unified/change-streams.json +++ b/test/change_streams/unified/change-streams.json @@ -181,7 +181,12 @@ "field": "array", "newSize": 2 } - ] + ], + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } + } } } } @@ -1408,6 +1413,11 @@ "$$unsetOrMatches": { "$$exists": true } + }, + "disambiguatedPaths": { + "$$unsetOrMatches": { + "$$exists": true + } } } } diff --git a/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json new file mode 100644 index 0000000000..ec4489fa09 --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-prefix-suffix.json @@ -0,0 +1,38 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + }, + { + "queryType": "suffixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields-substring.json b/test/client-side-encryption/etc/data/encryptedFields-substring.json new file mode 100644 index 0000000000..ee22def77b --- /dev/null +++ b/test/client-side-encryption/etc/data/encryptedFields-substring.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "strMaxLength": { + "$numberInt": "10" + }, + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/test/client-side-encryption/etc/data/encryptedFields.json b/test/client-side-encryption/etc/data/encryptedFields.json index 2364590e4c..88abe5a604 100644 --- a/test/client-side-encryption/etc/data/encryptedFields.json +++ b/test/client-side-encryption/etc/data/encryptedFields.json @@ -1,7 +1,4 @@ { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", "fields": [ { "keyId": { @@ -30,4 +27,4 @@ "bsonType": "string" } ] -} +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/lookup/key-doc.json b/test/client-side-encryption/etc/data/lookup/key-doc.json new file mode 100644 index 0000000000..566b56c354 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/key-doc.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle.json b/test/client-side-encryption/etc/data/lookup/schema-csfle.json new file mode 100644 index 0000000000..29ac9ad5da --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-csfle2.json b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json new file mode 100644 index 0000000000..3f1c02781c --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-csfle2.json @@ -0,0 +1,19 @@ +{ + "properties": { + "csfle2": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe.json b/test/client-side-encryption/etc/data/lookup/schema-qe.json new file mode 100644 index 0000000000..9428ea1b45 --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe.esc", + "ecocCollection": "enxcol_.qe.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/lookup/schema-qe2.json b/test/client-side-encryption/etc/data/lookup/schema-qe2.json new file mode 100644 index 0000000000..77d5bd37cb --- /dev/null +++ b/test/client-side-encryption/etc/data/lookup/schema-qe2.json @@ -0,0 +1,20 @@ +{ + "escCollection": "enxcol_.qe2.esc", + "ecocCollection": "enxcol_.qe2.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "qe2", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": 0 + } + } + ] +} diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Date.json b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json new file mode 100644 index 0000000000..defa6e37ff --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Date.json @@ -0,0 +1,36 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json new file mode 100644 index 0000000000..dbe28e9c10 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalNoPrecision.json @@ -0,0 +1,26 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json new file mode 100644 index 0000000000..538ab20f0e --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DecimalPrecision.json @@ -0,0 +1,35 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json new file mode 100644 index 0000000000..fb4f46d375 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoubleNoPrecision.json @@ -0,0 +1,26 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json new file mode 100644 index 0000000000..07d1c84d6f --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-DoublePrecision.json @@ -0,0 +1,35 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Int.json b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json new file mode 100644 index 0000000000..4f0b4854e4 --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Int.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/etc/data/range-encryptedFields-Long.json b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json new file mode 100644 index 0000000000..32fe1ea15d --- /dev/null +++ b/test/client-side-encryption/etc/data/range-encryptedFields-Long.json @@ -0,0 +1,32 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-encryptedFields.json b/test/client-side-encryption/limits/limits-encryptedFields.json new file mode 100644 index 0000000000..c52a0271e1 --- /dev/null +++ b/test/client-side-encryption/limits/limits-encryptedFields.json @@ -0,0 +1,14 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "LOCALAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "path": "foo", + "bsonType": "string" + } + ] +} \ No newline at end of file diff --git a/test/client-side-encryption/limits/limits-qe-doc.json b/test/client-side-encryption/limits/limits-qe-doc.json new file mode 100644 index 0000000000..71efbf4068 --- /dev/null +++ b/test/client-side-encryption/limits/limits-qe-doc.json @@ -0,0 +1,3 @@ +{ + "foo": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +} \ No newline at end of file diff --git a/test/client-side-encryption/spec/legacy/azureKMS.json b/test/client-side-encryption/spec/legacy/azureKMS.json index afecf40b0a..b0f5111370 100644 --- a/test/client-side-encryption/spec/legacy/azureKMS.json +++ b/test/client-side-encryption/spec/legacy/azureKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/legacy/bypassedCommand.json b/test/client-side-encryption/spec/legacy/bypassedCommand.json index bd0b1c565d..18054a70cb 100644 --- a/test/client-side-encryption/spec/legacy/bypassedCommand.json +++ b/test/client-side-encryption/spec/legacy/bypassedCommand.json @@ -78,7 +78,7 @@ ] }, { - "description": "current op is not bypassed", + "description": "kill op is not bypassed", "clientOptions": { "autoEncryptOpts": { "kmsProviders": { @@ -90,14 +90,15 @@ { "name": "runCommand", "object": "database", - "command_name": "currentOp", + "command_name": "killOp", "arguments": { "command": { - "currentOp": 1 + "killOp": 1, + "op": 1234 } }, "result": { - "errorContains": "command not supported for auto encryption: currentOp" + "errorContains": "command not supported for auto encryption: killOp" } } ] diff --git a/test/client-side-encryption/spec/legacy/explain.json b/test/client-side-encryption/spec/legacy/explain.json index 0e451e4818..8ca3b48d37 100644 --- a/test/client-side-encryption/spec/legacy/explain.json +++ b/test/client-side-encryption/spec/legacy/explain.json @@ -1,7 +1,7 @@ { "runOn": [ { - "minServerVersion": "4.1.10" + "minServerVersion": "7.0.0" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json deleted file mode 100644 index 629faf189d..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-BypassQueryAnalysis.json +++ /dev/null @@ -1,289 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - }, - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "BypassQueryAnalysis decrypts", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "bypassQueryAnalysis": true - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": { - "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", - "subType": "06" - } - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1, - "encryptedIndexed": "value123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$binary": { - "base64": "BHEBAAAFZAAgAAAAAHb62aV7+mqmaGcotPLdG3KP7S8diFwWMLM/5rYtqLrEBXMAIAAAAAAVJ6OWHRv3OtCozHpt3ZzfBhaxZirLv3B+G8PuaaO4EgVjACAAAAAAsZXWOWA+UiCBbrJNB6bHflB/cn7pWSvwWN2jw4FPeIUFcABQAAAAAMdD1nV2nqeI1eXEQNskDflCy8I7/HvvqDKJ6XxjhrPQWdLqjz+8GosGUsB7A8ee/uG9/guENuL25XD+Fxxkv1LLXtavHOlLF7iW0u9yabqqBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AE0AAAAAq83vqxI0mHYSNBI0VniQEkzZZBBDgeZh+h+gXEmOrSFtVvkUcnHWj/rfPW7iJ0G3UJ8zpuBmUM/VjOMJCY4+eDqdTiPIwX+/vNXegc8FZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsAA==", - "subType": "06" - } - } - } - ], - "ordered": true - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": 1 - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-Compact.json b/test/client-side-encryption/spec/legacy/fle2-Compact.json deleted file mode 100644 index 46da99cbfc..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-Compact.json +++ /dev/null @@ -1,232 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - }, - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "Compact works", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "runCommand", - "object": "database", - "command_name": "compactStructuredEncryptionData", - "arguments": { - "command": { - "compactStructuredEncryptionData": "default" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "compactStructuredEncryptionData": "default", - "compactionTokens": { - "encryptedIndexed": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - }, - "encryptedUnindexed": { - "$binary": { - "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", - "subType": "00" - } - } - } - }, - "command_name": "compactStructuredEncryptionData" - } - } - ] - }, - { - "description": "Compact errors on an unencrypted client", - "operations": [ - { - "name": "runCommand", - "object": "database", - "command_name": "compactStructuredEncryptionData", - "arguments": { - "command": { - "compactStructuredEncryptionData": "default" - } - }, - "result": { - "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json deleted file mode 100644 index 6836f40e04..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-CreateCollection.json +++ /dev/null @@ -1,2239 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "tests": [ - { - "description": "state collections and index are created", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - } - ] - }, - { - "description": "default state collection names are applied", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - } - ] - }, - { - "description": "drop removes all state collections", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - }, - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "enxcol_.encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "enxcol_.encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - }, - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "enxcol_.encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - }, - { - "description": "encryptedFieldsMap with cyclic entries does not loop", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - }, - "default.encryptedCollection.esc": { - "escCollection": "encryptedCollection", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - } - ] - }, - { - "description": "CreateCollection without encryptedFields.", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "plaintextCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "plaintextCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "plaintextCollection" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "plaintextCollection" - } - }, - "command_name": "listCollections", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "plaintextCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "plaintextCollection" - }, - "command_name": "create", - "database_name": "default" - } - } - ] - }, - { - "description": "CreateCollection from encryptedFieldsMap.", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - } - ] - }, - { - "description": "CreateCollection from encryptedFields.", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "encryptedCollection" - } - }, - "command_name": "listCollections", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - } - ] - }, - { - "description": "DropCollection from encryptedFieldsMap", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - }, - { - "description": "DropCollection from encryptedFields", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": {} - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - }, - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "encryptedCollection" - } - }, - "command_name": "listCollections", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - }, - { - "description": "DropCollection from remote encryptedFields", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "aws": {} - }, - "encryptedFieldsMap": {} - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "__safeContent___1" - } - }, - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.esc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection.ecoc" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.esc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection.ecoc", - "clusteredIndex": { - "key": { - "_id": 1 - }, - "unique": true - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "create": "encryptedCollection", - "encryptedFields": { - "escCollection": "encryptedCollection.esc", - "eccCollection": "encryptedCollection.ecc", - "ecocCollection": "encryptedCollection.ecoc", - "fields": [ - { - "path": "firstName", - "bsonType": "string", - "keyId": { - "$binary": { - "subType": "04", - "base64": "AAAAAAAAAAAAAAAAAAAAAA==" - } - } - } - ] - } - }, - "command_name": "create", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "encryptedCollection" - } - }, - "command_name": "listCollections", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "__safeContent___1", - "key": { - "__safeContent__": 1 - } - } - ] - }, - "command_name": "createIndexes", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "encryptedCollection" - } - }, - "command_name": "listCollections", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.esc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection.ecoc" - }, - "command_name": "drop", - "database_name": "default" - } - }, - { - "command_started_event": { - "command": { - "drop": "encryptedCollection" - }, - "command_name": "drop", - "database_name": "default" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json deleted file mode 100644 index c6d0bca0d1..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-DecryptExistingData.json +++ /dev/null @@ -1,148 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [ - { - "_id": 1, - "encryptedUnindexed": { - "$binary": { - "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", - "subType": "06" - } - } - } - ], - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "FLE2 decrypt of existing data succeeds", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1, - "encryptedUnindexed": "value123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": 1 - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-Delete.json b/test/client-side-encryption/spec/legacy/fle2-Delete.json deleted file mode 100644 index 0e3e06396e..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-Delete.json +++ /dev/null @@ -1,305 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "Delete can query an FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "encryptedIndexed": "value123" - } - }, - "result": { - "deletedCount": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "delete": "default", - "deletes": [ - { - "q": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "limit": 1 - } - ], - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } - } - } - }, - "command_name": "delete" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json deleted file mode 100644 index ea3eb4850c..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-EncryptedFieldsMap.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "encryptedFieldsMap is preferred over remote encryptedFields", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.default": { - "escCollection": "esc", - "eccCollection": "ecc", - "ecocCollection": "ecoc", - "fields": [] - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedUnindexed": { - "$binary": { - "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", - "subType": "06" - } - } - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1, - "encryptedUnindexed": "value123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedUnindexed": { - "$binary": { - "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", - "subType": "06" - } - } - } - ], - "ordered": true - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": 1 - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedUnindexed": { - "$binary": { - "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", - "subType": "06" - } - } - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json deleted file mode 100644 index 1d3227ee7f..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFields-vs-jsonSchema.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "json_schema": { - "properties": {}, - "bsonType": "object" - }, - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "encryptedFields is preferred over jsonSchema", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "123" - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedIndexed": "123" - } - }, - "result": [ - { - "_id": 1, - "encryptedIndexed": "123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json deleted file mode 100644 index 030952e056..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-EncryptedFieldsMap-defaults.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "key_vault_data": [], - "tests": [ - { - "description": "default state collections are applied to encryptionInformation", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.default": { - "fields": [] - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "foo": { - "$binary": { - "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", - "subType": "06" - } - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "foo": { - "$binary": { - "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", - "subType": "06" - } - } - } - ], - "encryptionInformation": { - "type": { - "$numberInt": "1" - }, - "schema": { - "default.default": { - "fields": [], - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc" - } - } - }, - "ordered": true - }, - "command_name": "insert" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "foo": { - "$binary": { - "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", - "subType": "06" - } - } - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json deleted file mode 100644 index b31438876f..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-FindOneAndUpdate.json +++ /dev/null @@ -1,602 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "findOneAndUpdate can query an FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - }, - { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "encryptedIndexed": "value123" - }, - "update": { - "$set": { - "foo": "bar" - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "default", - "query": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "update": { - "$set": { - "foo": "bar" - } - }, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } - } - } - }, - "command_name": "findAndModify" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "foo": "bar", - "__safeContent__": [ - { - "$binary": { - "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", - "subType": "00" - } - } - ] - } - ] - } - } - }, - { - "description": "findOneAndUpdate can modify an FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - }, - { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "encryptedIndexed": "value123" - }, - "update": { - "$set": { - "encryptedIndexed": "value456" - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 1, - "encryptedIndexed": "value123" - } - }, - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "encryptedIndexed": "value456" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "default", - "query": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "update": { - "$set": { - "encryptedIndexed": { - "$$type": "binData" - } - } - }, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } - } - } - }, - "command_name": "findAndModify" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": { - "$eq": 1 - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json deleted file mode 100644 index 81a549590e..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Indexed.json +++ /dev/null @@ -1,300 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "Insert and find FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "123" - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedIndexed": "123" - } - }, - "result": [ - { - "_id": 1, - "encryptedIndexed": "123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVjACAAAAAAWuidNu47c9A4Clic3DvFhn1AQJVC+FJtoE5bGZuz6PsFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json deleted file mode 100644 index 1a75095907..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-InsertFind-Unindexed.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "Insert and find FLE2 unindexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedUnindexed": "value123" - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1, - "encryptedUnindexed": "value123" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedUnindexed": { - "$$type": "binData" - } - } - ], - "ordered": true - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": { - "$eq": 1 - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedUnindexed": { - "$$type": "binData" - } - } - ] - } - } - }, - { - "description": "Query with an unindexed field fails", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedUnindexed": "value123" - } - } - }, - { - "name": "find", - "arguments": { - "filter": { - "encryptedUnindexed": "value123" - } - }, - "result": { - "errorContains": "Cannot query" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2-MissingKey.json deleted file mode 100644 index 2db1cd7702..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-MissingKey.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [ - { - "encryptedUnindexed": { - "$binary": { - "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", - "subType": "06" - } - } - } - ], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [], - "tests": [ - { - "description": "FLE2 encrypt fails with mising key", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "123" - } - }, - "result": { - "errorContains": "not all keys requested were satisfied" - } - } - ] - }, - { - "description": "FLE2 decrypt fails with mising key", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "find", - "arguments": { - "filter": {} - }, - "result": { - "errorContains": "not all keys requested were satisfied" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json deleted file mode 100644 index e9dd586c26..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-NoEncryption.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "key_vault_data": [], - "encrypted_fields": { - "fields": [] - }, - "tests": [ - { - "description": "insert with no encryption succeeds", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "foo": "bar" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "foo": "bar" - } - ], - "ordered": true - }, - "command_name": "insert" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "foo": "bar" - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-Update.json b/test/client-side-encryption/spec/legacy/fle2-Update.json deleted file mode 100644 index 87830af32d..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-Update.json +++ /dev/null @@ -1,610 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "encrypted_fields": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - }, - "key_vault_data": [ - { - "_id": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "keyMaterial": { - "$binary": { - "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", - "subType": "00" - } - }, - "creationDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "updateDate": { - "$date": { - "$numberLong": "1648914851981" - } - }, - "status": { - "$numberInt": "0" - }, - "masterKey": { - "provider": "local" - } - } - ], - "tests": [ - { - "description": "Update can query an FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "encryptedIndexed": "value123" - }, - "update": { - "$set": { - "foo": "bar" - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "update": "default", - "updates": [ - { - "q": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "u": { - "$set": { - "foo": "bar" - } - } - } - ], - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } - } - } - }, - "command_name": "update" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "foo": "bar", - "__safeContent__": [ - { - "$binary": { - "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", - "subType": "00" - } - } - ] - } - ] - } - } - }, - { - "description": "Update can modify an FLE2 indexed field", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - } - } - }, - "operations": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "encryptedIndexed": "value123" - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "encryptedIndexed": "value123" - }, - "update": { - "$set": { - "encryptedIndexed": "value456" - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "encryptedIndexed": "value456" - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1, - "filter": { - "name": "default" - } - }, - "command_name": "listCollections" - } - }, - { - "command_started_event": { - "command": { - "find": "datakeys", - "filter": { - "$or": [ - { - "_id": { - "$in": [ - { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - } - ] - } - }, - { - "keyAltNames": { - "$in": [] - } - } - ] - }, - "$db": "keyvault", - "readConcern": { - "level": "majority" - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "insert": "default", - "documents": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - } - } - ], - "ordered": true, - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "update": "default", - "updates": [ - { - "q": { - "encryptedIndexed": { - "$eq": { - "$binary": { - "base64": "BbEAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVjACAAAAAA19X9v9NlWidu/wR5/C/7WUV54DfL5CkNmT5WYrhxdDcFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsEmNtAAAAAAAAAAAAAA==", - "subType": "06" - } - } - } - }, - "u": { - "$set": { - "encryptedIndexed": { - "$$type": "binData" - } - } - } - } - ], - "encryptionInformation": { - "type": 1, - "schema": { - "default.default": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - }, - "deleteTokens": { - "default.default": { - "encryptedIndexed": { - "e": { - "$binary": { - "base64": "65pz95EthqQpfoHS9nWvdCh05AV+OokP7GUaI+7j8+w=", - "subType": "00" - } - }, - "o": { - "$binary": { - "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", - "subType": "00" - } - } - } - } - } - } - }, - "command_name": "update" - } - }, - { - "command_started_event": { - "command": { - "find": "default", - "filter": { - "_id": { - "$eq": 1 - } - } - }, - "command_name": "find" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "encryptedIndexed": { - "$$type": "binData" - }, - "__safeContent__": [ - { - "$binary": { - "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", - "subType": "00" - } - } - ] - } - ] - } - } - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json deleted file mode 100644 index fab36f75a1..0000000000 --- a/test/client-side-encryption/spec/legacy/fle2-validatorAndPartialFieldExpression.json +++ /dev/null @@ -1,520 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "6.0.0", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "default", - "collection_name": "default", - "data": [], - "tests": [ - { - "description": "create with a validator on an unencrypted field is OK", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "validator": { - "unencrypted_string": "foo" - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection" - } - } - ] - }, - { - "description": "create with a validator on an encrypted field is an error", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection", - "validator": { - "encryptedIndexed": "foo" - } - }, - "result": { - "errorContains": "Comparison to encrypted fields not supported" - } - } - ] - }, - { - "description": "collMod with a validator on an unencrypted field is OK", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "collMod": "encryptedCollection", - "validator": { - "unencrypted_string": "foo" - } - } - } - } - ] - }, - { - "description": "collMod with a validator on an encrypted field is an error", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "collMod": "encryptedCollection", - "validator": { - "encryptedIndexed": "foo" - } - } - }, - "result": { - "errorContains": "Comparison to encrypted fields not supported" - } - } - ] - }, - { - "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "name", - "key": { - "name": 1 - }, - "partialFilterExpression": { - "unencrypted_string": "foo" - } - } - ] - } - } - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "default", - "collection": "encryptedCollection", - "index": "name" - } - } - ] - }, - { - "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", - "clientOptions": { - "autoEncryptOpts": { - "kmsProviders": { - "local": { - "key": { - "$binary": { - "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", - "subType": "00" - } - } - } - }, - "encryptedFieldsMap": { - "default.encryptedCollection": { - "escCollection": "enxcol_.default.esc", - "eccCollection": "enxcol_.default.ecc", - "ecocCollection": "enxcol_.default.ecoc", - "fields": [ - { - "keyId": { - "$binary": { - "base64": "EjRWeBI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedIndexed", - "bsonType": "string", - "queries": { - "queryType": "equality", - "contention": { - "$numberLong": "0" - } - } - }, - { - "keyId": { - "$binary": { - "base64": "q83vqxI0mHYSNBI0VniQEg==", - "subType": "04" - } - }, - "path": "encryptedUnindexed", - "bsonType": "string" - } - ] - } - } - } - }, - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "collection": "encryptedCollection" - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "command": { - "createIndexes": "encryptedCollection", - "indexes": [ - { - "name": "name", - "key": { - "name": 1 - }, - "partialFilterExpression": { - "encryptedIndexed": "foo" - } - } - ] - } - }, - "result": { - "errorContains": "Comparison to encrypted fields not supported" - } - } - ] - } - ] -} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..9b28df2f9a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,261 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "bypassQueryAnalysis": true + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json new file mode 100644 index 0000000000..868095e1e6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Compact.json @@ -0,0 +1,233 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..c266aa6b83 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,94 @@ +{ + "runOn": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..c324be8abc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-CreateCollection.json @@ -0,0 +1,1758 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "tests": [ + { + "description": "state collections and index are created", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "default state collection names are applied", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "drop removes all state collections", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "plaintextCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "plaintextCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "plaintextCollection" + }, + "command_name": "create", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": {} + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "command_name": "create", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "command_name": "createIndexes", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "command_name": "listCollections", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "command_name": "drop", + "database_name": "default" + } + }, + { + "command_started_event": { + "command": { + "drop": "encryptedCollection" + }, + "command_name": "drop", + "database_name": "default" + } + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + }, + "result": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..1fb4c1d1bc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-DecryptExistingData.json @@ -0,0 +1,149 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json new file mode 100644 index 0000000000..ddfe57b00c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Delete.json @@ -0,0 +1,284 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..bdc5c99bc2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,212 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..8e0c6dafa3 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,300 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": {}, + "bsonType": "object" + }, + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..1c0a057cad --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,105 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..c5e689a3de --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,560 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..6e156ffc60 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,296 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "result": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..48280f5bd4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,248 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + } + }, + { + "description": "Query with an unindexed field fails", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "result": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json new file mode 100644 index 0000000000..1e655f0a9c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-MissingKey.json @@ -0,0 +1,116 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "result": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..a6843c4737 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-NoEncryption.json @@ -0,0 +1,87 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "key_vault_data": [], + "encrypted_fields": { + "fields": [] + }, + "tests": [ + { + "description": "insert with no encryption succeeds", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "foo": "bar" + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..59241927ca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Compact.json @@ -0,0 +1,289 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "compactStructuredEncryptionData", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + } + }, + "result": { + "ok": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "compactStructuredEncryptionData" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json new file mode 100644 index 0000000000..df2161cc36 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Aggregate.json @@ -0,0 +1,508 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json new file mode 100644 index 0000000000..fae25a1c02 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Correctness.json @@ -0,0 +1,1842 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json new file mode 100644 index 0000000000..b4f15d9b1f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Delete.json @@ -0,0 +1,442 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..97ab4aaeb9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -0,0 +1,514 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json new file mode 100644 index 0000000000..a011c388e4 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-InsertFind.json @@ -0,0 +1,499 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json new file mode 100644 index 0000000000..6bab6499f5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Date-Update.json @@ -0,0 +1,516 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json new file mode 100644 index 0000000000..d1a82c2164 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Aggregate.json @@ -0,0 +1,1902 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json new file mode 100644 index 0000000000..4316a31c3e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Correctness.json @@ -0,0 +1,1158 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json new file mode 100644 index 0000000000..19cae3c64f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Delete.json @@ -0,0 +1,1116 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..4ab3b63ea5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1906 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json new file mode 100644 index 0000000000..5a2adf6907 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-InsertFind.json @@ -0,0 +1,1893 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json new file mode 100644 index 0000000000..b840d38347 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Decimal-Update.json @@ -0,0 +1,1910 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..271f57b125 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -0,0 +1,584 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..8954445887 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..7b3d5d8225 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -0,0 +1,476 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..af371f7b3f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..bbe81f87ad --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -0,0 +1,571 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json new file mode 100644 index 0000000000..987bdf1aa6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..c2a119cb7f --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,381 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json new file mode 100644 index 0000000000..daa7f4e973 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Aggregate.json @@ -0,0 +1,1132 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json new file mode 100644 index 0000000000..edb336743c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Correctness.json @@ -0,0 +1,1160 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json new file mode 100644 index 0000000000..4a9c1f27b5 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Delete.json @@ -0,0 +1,732 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..d7860de83e --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -0,0 +1,1136 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json new file mode 100644 index 0000000000..934af381f1 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-InsertFind.json @@ -0,0 +1,1123 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 0000000000..ec95e0334a --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1140 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..e8a50ebeca --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -0,0 +1,580 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..87d0e3dd8c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1650 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json new file mode 100644 index 0000000000..8a0fecf786 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -0,0 +1,474 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..ac77931d61 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,584 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..5dcc09dca9 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -0,0 +1,571 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json new file mode 100644 index 0000000000..483e3d52e6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-DoublePrecision-Update.json @@ -0,0 +1,588 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json new file mode 100644 index 0000000000..6cd837c789 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Aggregate.json @@ -0,0 +1,484 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json new file mode 100644 index 0000000000..9dc4e4e501 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json new file mode 100644 index 0000000000..b251db9157 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Delete.json @@ -0,0 +1,420 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..6e09b5ea2c --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -0,0 +1,488 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json new file mode 100644 index 0000000000..cbab7e7699 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-InsertFind.json @@ -0,0 +1,475 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json new file mode 100644 index 0000000000..cb6b223943 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Int-Update.json @@ -0,0 +1,492 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json new file mode 100644 index 0000000000..5c4bf10101 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Aggregate.json @@ -0,0 +1,484 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "aggregate" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json new file mode 100644 index 0000000000..d81e0933f8 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Correctness.json @@ -0,0 +1,1644 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Find with $gt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "result": [] + } + ] + }, + { + "description": "Find with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "result": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "result": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "result": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json new file mode 100644 index 0000000000..faf0c401b7 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Delete.json @@ -0,0 +1,420 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": { + "deletedCount": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "delete" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..b233b40b54 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -0,0 +1,488 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "result": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "findAndModify" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json new file mode 100644 index 0000000000..1b787d4cb6 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-InsertFind.json @@ -0,0 +1,475 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "result": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json new file mode 100644 index 0000000000..07182bb5e2 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-Long-Update.json @@ -0,0 +1,492 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command_name": "update", + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + } + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + } + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json new file mode 100644 index 0000000000..6215604508 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Rangev2-WrongType.json @@ -0,0 +1,163 @@ +{ + "runOn": [ + { + "minServerVersion": "8.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ], + "maxServerVersion": "8.99.99" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberLong": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "result": { + "errorContains": "cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "result": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-Update.json b/test/client-side-encryption/spec/legacy/fle2v2-Update.json new file mode 100644 index 0000000000..cb260edc0d --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-Update.json @@ -0,0 +1,570 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "encrypted_fields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + } + }, + { + "description": "Update can modify an FLE2 indexed field", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "result": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "encryptedIndexed": "value456" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "command_name": "update" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..901c4dd841 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,503 @@ +{ + "runOn": [ + { + "minServerVersion": "7.0.0", + "topology": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "database": "default", + "collection": "encryptedCollection", + "index": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": { + "$binary": { + "base64": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "operations": [ + { + "name": "dropCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "database", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "result": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/gcpKMS.json b/test/client-side-encryption/spec/legacy/gcpKMS.json index c2c08b8a23..65f12ec139 100644 --- a/test/client-side-encryption/spec/legacy/gcpKMS.json +++ b/test/client-side-encryption/spec/legacy/gcpKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" diff --git a/test/client-side-encryption/spec/legacy/getMore.json b/test/client-side-encryption/spec/legacy/getMore.json index ee99bf7537..94e788ef61 100644 --- a/test/client-side-encryption/spec/legacy/getMore.json +++ b/test/client-side-encryption/spec/legacy/getMore.json @@ -216,7 +216,10 @@ "command_started_event": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "default", "batchSize": 2 diff --git a/test/client-side-encryption/spec/legacy/keyCache.json b/test/client-side-encryption/spec/legacy/keyCache.json new file mode 100644 index 0000000000..912ce80020 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/keyCache.json @@ -0,0 +1,270 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "keyExpirationMS": 1 + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/kmipKMS.json b/test/client-side-encryption/spec/legacy/kmipKMS.json index 5749d21ab8..349328b433 100644 --- a/test/client-side-encryption/spec/legacy/kmipKMS.json +++ b/test/client-side-encryption/spec/legacy/kmipKMS.json @@ -78,6 +78,17 @@ "bsonType": "string", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } } }, "bsonType": "object" @@ -117,6 +128,38 @@ "altname", "kmip_altname" ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] } ], "tests": [ @@ -218,6 +261,102 @@ ] } } + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "kmip": {} + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ] + } + } } ] } diff --git a/test/client-side-encryption/spec/legacy/maxWireVersion.json b/test/client-side-encryption/spec/legacy/maxWireVersion.json index c1088a0ecf..f04f58dffd 100644 --- a/test/client-side-encryption/spec/legacy/maxWireVersion.json +++ b/test/client-side-encryption/spec/legacy/maxWireVersion.json @@ -1,7 +1,7 @@ { "runOn": [ { - "maxServerVersion": "4.0" + "maxServerVersion": "4.0.99" } ], "database_name": "default", diff --git a/test/client-side-encryption/spec/legacy/namedKMS.json b/test/client-side-encryption/spec/legacy/namedKMS.json new file mode 100644 index 0000000000..c859443585 --- /dev/null +++ b/test/client-side-encryption/spec/legacy/namedKMS.json @@ -0,0 +1,197 @@ +{ + "runOn": [ + { + "minServerVersion": "4.1.10" + } + ], + "database_name": "default", + "collection_name": "default", + "data": [], + "json_schema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "local:name2": { + "key": { + "$binary": { + "base64": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk", + "subType": "00" + } + } + } + } + } + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "result": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "command_name": "listCollections" + } + }, + { + "command_started_event": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "command_name": "find" + } + }, + { + "command_started_event": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "command_name": "insert" + } + }, + { + "command_started_event": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "command_name": "find" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ] + } + } + } + ] +} diff --git a/test/client-side-encryption/spec/legacy/timeoutMS.json b/test/client-side-encryption/spec/legacy/timeoutMS.json new file mode 100644 index 0000000000..b667767cfc --- /dev/null +++ b/test/client-side-encryption/spec/legacy/timeoutMS.json @@ -0,0 +1,200 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "cse-timeouts-db", + "collection_name": "cse-timeouts-coll", + "data": [], + "json_schema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "key_vault_data": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "command_name": "listCollections" + } + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + }, + "clientOptions": { + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + } + }, + "timeoutMS": 50 + }, + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "result": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json new file mode 100644 index 0000000000..24f33ab3ec --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-cleanupStructuredEncryptionData.json @@ -0,0 +1,219 @@ +{ + "description": "QE-Text-cleanupStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text cleanupStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "cleanupStructuredEncryptionData": "coll" + }, + "commandName": "cleanupStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "cleanupStructuredEncryptionData": "coll", + "cleanupTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "cleanupStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json new file mode 100644 index 0000000000..c7abfe2d4b --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-compactStructuredEncryptionData.json @@ -0,0 +1,261 @@ +{ + "description": "QE-Text-compactStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text compactStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "coll" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "coll", + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "db.coll": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ], + "strEncodeVersion": { + "$numberInt": "1" + }, + "escCollection": "enxcol_.coll.esc", + "ecocCollection": "enxcol_.coll.ecoc" + } + } + }, + "compactionTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json new file mode 100644 index 0000000000..7279385743 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-prefixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-prefixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE prefixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fmUMXTMV/XRiN0IL3VXxSEn6SQG9E6Po30kJKB8JJlQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vZIDMiFDgjmLNYVrrbnq1zT4hg7sGpe/PMtighSsnRc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "26Z5G+sHTzV3D7F8Y0m08389USZ2afinyFV3ez9UEBQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q/JEq8of7bE0QE5Id0XuOsNQ4qVpANYymcPQDUL2Ywk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Uvvv46LkfbgLoPqZ6xTBzpgoYRTM6FUgRdqZ9eaVojI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nMxdq2lladuBJA3lv3JC2MumIUtRJBNJVLp3PVE6nQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hS3V0qq5CF/SkTl3ZWWWgXcAJ8G5yGtkY2RwcHNc5Oc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McgwYUxfKj5+4D0vskZymy4KA82s71MR25iV/Enutww=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ciqdk1b+t+Vrr6oIlFFk0Zdym5BPmwN3glQ0/VcsVdM=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json new file mode 100644 index 0000000000..6a8f133eac --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-substringPreview.json @@ -0,0 +1,551 @@ +{ + "description": "QE-Text-substringPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "10" + }, + "strMaxLength": { + "$numberLong": "20" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "oba" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IpY3x/jjm8j/74jAdUhgxdM5hk68zR0zv/lTKm/72Vg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G+ky260C6QiOfIxKz14FmaMbAxvui1BKJO/TnLOHlGk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7dv3gAKe9vwJMZmpB40pRCwRTmc7ds9UkGhxH8j084E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o0V+Efn6x8XQdE80F1tztNaT3qxHjcsd9DOQ47BtmQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sJvrCjyVot7PIZFsdRehWFANKAj6fmBaj3FLbz/dZLE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e98auxFmu02h5MfBIARk29MI7hSmvN3F9DaQ0xjqoEM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US83krGNov/ezL6IhsY5eEOCxv1xUPDIEL/nmY0IKi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P2Aq5+OHZPG0CWIdmZvWq9c/18ZKVYW3vbxd+WU/TXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8AdPRPnSzcd5uhq4TZfNvNeF0XjLNVwAsJJMTtktw84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9O6u/G51I4ZHFLhL4ZLuudbr0s202A2QnPfThmOXPhI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N7AjYVyVlv6+lVSTM+cIxRL3SMgs3G5LgxSs+jrgDkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RbGF7dQbPGYQFd9DDO1hPz1UlLOJ77FAC6NsjGwJeos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m7srHMgKm6kZwsNx8rc45pmw0/9Qro6xuQ8lZS3+RYk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K75CNU3JyKFqZWPiIsVi4+n7DhYmcPl/nEhQ3d88mVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c7bwGpUZc/7JzEnMS7qQ/TPuXZyrmMihFaAV6zIqbZc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rDvEdUgEk8u4Srt3ETokWs2FXcnyJaRGQ+NbkFwi2rQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VcdZj9zfveRBRlpCR2OYWau2+GokOFb73TE3gpElNiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOa9o2xfA6OgkbYUxd6wQJicaeN6guhy2V66W3ALsaA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1xGkJh+um70XiRd8lKLDtyHgDqrf7/59Mg7X0+KZh8k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OSvllqHxycbcZN4phR6NDujY3ttA59o7nQJ6V9eJpX0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZTX1pyk8Vdw0BSbJx7GeJNcQf3tGKxbrrNSTqBqUWkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cn7V05zb5iXwYrePGMHztC+GRq+Tj8IMpRDraauPhSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E9bV9KyrZxHJSUmMg0HrDK4gGN+75ruelAnrM6hXQgY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrssTNmdgXoTGpbaF0JLRCGH6cDQuz1XEFNTy98nrb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jZmyOJP35dsxQ/OY5U4ISpVRIYr8iedNfcwZiKt29Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d2mocORMbX9MX+/itAW8r1kxVw2/uii4vzXtc+2CIRQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JBnJy58eRPhDo3DuZvsHbvQDiHXxdtAx1Eif66k5SfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OjbDulC8s62v0pgweBSsQqtJjJBwH5JinfJpj7nVr+A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "85i7KT2GP9nSda3Gsil5LKubhq0LDtc22pxBxHpR+nE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "u9Fvsclwrs9lwIcMPV/fMZD7L3d5anSfJQVjQb9mgLg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LZ32ttmLJGOIw9oFaUCn3Sx5uHPTYJPSFpeGRWNqlUc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMsZvGEePTqtl0FJAL/jAdyWNQIlpwN61YIlZsSIZ6s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XZcu1a/ZGsIzAl3j4MXQlLo4v2p7kvIqRHtIQYFmL6k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Zse27LinlYCEnX6iTmJceI33mEJxFb0LdPxp0RiMOaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOv2Hgb2/sBpnX9XwFbIN6yDxhjchwlmczUf82W2tp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oQxZ9A6j3x5j6x1Jqw/N9tpP4rfWMjcV3y+a3PkrL7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/D7ew3EijyUnmT22awVFspcuyo3JChJcDeCPwpljzVM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BEmmwqyamt9X3bcWDld61P01zquy8fBHAXq3SHAPP0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wygD9/kAo1KsRvtr1v+9/lvqoWdKwgh6gDHvAQfXPPk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pRTKgF/uksrF1c1AcfSTY6ZhqBKVud1vIztQ4/36SLs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C4iUo8oNJsjJ37BqnBgIgSQpf99X2Bb4W5MZEAmakHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "icoE53jIq6Fu/YGKUiSUTYyZ8xdiTQY9jJiGxVJObpw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oubCwk0V6G2RFWtcOnYDU4uUBoXBrhBRi4nZgrYj9JY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IyqhQ9nGhzEi5YW2W6v1kGU5DY2u2qSqbM/qXdLdWVU=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "blah" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json new file mode 100644 index 0000000000..deec5e63b0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/QE-Text-suffixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-suffixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uDCWsucUsJemUP7pmeb+Kd8B9qupVzI8wnLFqX1rkiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W3E1x4bHZ8SEHFz4zwXM0G5Z5WSwBhnxE8x5/qdP6JM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6g/TXVDDf6z+ntResIvTKWdmIy4ajQ1rhwdNZIiEG7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hU+u/T3D6dHDpT3d/v5AlgtRoAufCXCAyO2jQlgsnCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vrPnq0AtBIURNgNGA6HJL+5/p5SBWe+qz8505TRo/dE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W5pylBxdv2soY2NcBfPiHDVLTS6tx+0ULkI8gysBeFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oWO3xX3x0bYUJGK2S1aPAmlU3Xtfsgb9lTZ6flGAlsg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SjZGucTEUbdpd86O8yj1pyMyBOOKxvAQ9C8ngZ9C5UE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CEaMZkxVDVbnXr+To0DOyvsva04UQkIYP3KtgYVVwf8=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrEndsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/addKeyAltName.json b/test/client-side-encryption/spec/unified/addKeyAltName.json index 8b6c174cbc..f70bc572a8 100644 --- a/test/client-side-encryption/spec/unified/addKeyAltName.json +++ b/test/client-side-encryption/spec/unified/addKeyAltName.json @@ -98,7 +98,9 @@ }, "keyAltName": "new_key_alt_name" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/aggregate.json b/test/client-side-encryption/spec/unified/aggregate.json new file mode 100644 index 0000000000..d04ce49d28 --- /dev/null +++ b/test/client-side-encryption/spec/unified/aggregate.json @@ -0,0 +1,433 @@ +{ + "description": "aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with deterministic encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": "457-55-5642" + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Aggregate with empty pipeline", + "skipReason": "SERVER-40829 hides agg support behind enableTestCommands flag.", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [], + "cursor": {} + }, + "commandName": "aggregate" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Aggregate should fail with random encryption", + "skipReason": "SERVER-39395", + "operations": [ + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "random": "abc" + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "Database aggregate should fail", + "operations": [ + { + "name": "aggregate", + "object": "db", + "arguments": { + "pipeline": [ + { + "$currentOp": { + "allUsers": false, + "idleConnections": false, + "localOps": true + } + }, + { + "$match": { + "command.aggregate": { + "$eq": 1 + } + } + }, + { + "$project": { + "command": 1 + } + }, + { + "$project": { + "command.lsid": 0 + } + } + ] + }, + "expectError": { + "errorContains": "non-collection command not supported for auto encryption: aggregate" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/awsTemporary.json b/test/client-side-encryption/spec/unified/awsTemporary.json new file mode 100644 index 0000000000..24b732a5eb --- /dev/null +++ b/test/client-side-encryption/spec/unified/awsTemporary.json @@ -0,0 +1,313 @@ +{ + "description": "awsTemporary", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": "bad" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using the AWS provider with temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert with invalid temporary credentials", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "security token" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/azureKMS.json b/test/client-side-encryption/spec/unified/azureKMS.json new file mode 100644 index 0000000000..b70959217f --- /dev/null +++ b/test/client-side-encryption/spec/unified/azureKMS.json @@ -0,0 +1,293 @@ +{ + "description": "azureKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "azure": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "n+HWZ0ZSVOYA3cvQgP7inN4JSXfOH85IngmeQxRpQHjCCcqT3IFqEWNlrsVHiz3AELimHhX4HKqOLWMUeSIT6emUDDoQX9BAv8DR1+E1w4nGs/NyEneac78EYFkK3JysrFDOgl2ypCCTKAypkn9CkAx1if4cfgQE93LW4kczcyHdGiH36CIxrCDGv1UzAvERN5Qa47DVwsM6a+hWsF2AAAJVnF0wYLLJU07TuRHdMrrphPWXZsFgyV+lRqJ7DDpReKNO8nMPLV/mHqHBHGPGQiRdb9NoJo8CvokGz4+KE8oLwzKf6V24dtwZmRkrsDV4iOhvROAzz+Euo1ypSkL3mw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601573901680" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "azure_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using Azure KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_azure": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_azure": { + "$binary": { + "base64": "AQGVERPgAAAAAAAAAAAAAAAC5DbBSwPwfSlBrDtRuglvNvCXD1KzDuCKY2P+4bRFtHDjpTOE2XuytPAUaAbXf1orsPq59PVZmsbTZbt2CB8qaQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badQueries.json b/test/client-side-encryption/spec/unified/badQueries.json new file mode 100644 index 0000000000..7a4f30d5b7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badQueries.json @@ -0,0 +1,1393 @@ +{ + "description": "badQueries", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "collection": { + "id": "coll_with_encrypted_id", + "database": "db", + "collectionName": "coll_with_encrypted_id" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "default", + "collectionName": "coll_with_encrypted_id", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "_id": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + } + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "$text unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$text": { + "$search": "search text" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$where unconditionally fails", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "$where": { + "$code": "function() { return true }" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Unsupported match expression operator for encryption" + } + } + ] + }, + { + "description": "$bit operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAllSet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnyClear": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$bitsAnySet": 35 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "geo operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$near": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "unable to find index" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$nearSphere": [ + 0, + 0 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoIntersects": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$geoWithin": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 1, + 0 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ] + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "inequality operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lt": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$gte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$lte": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + } + ] + }, + { + "description": "other misc operators succeed on unencrypted, error on encrypted", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$mod": [ + 3, + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$regex": "pattern", + "$options": "" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$size": 2 + } + } + }, + "object": "coll", + "expectResult": [] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$size": 2 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Invalid match expression operator on encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$eq": null + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$eq": null + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null predicate for encrypted field" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "unencrypted": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + null + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Illegal equality to null inside $in against an encrypted field" + } + } + ] + }, + { + "description": "$addToSet succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "unencrypted": [ + "a" + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$addToSet": { + "encrypted_string": [ + "a" + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$addToSet not allowed on encrypted values" + } + } + ] + }, + { + "description": "$inc succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$mul succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$mul": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$inc and $mul not allowed on encrypted values" + } + } + ] + }, + { + "description": "$max succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$max": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$min succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$min": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$max and $min not allowed on encrypted values" + } + } + ] + }, + { + "description": "$currentDate succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "unencrypted": true + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$currentDate": { + "encrypted_string": true + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$currentDate not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pop succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pop": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pop not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pull succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pull": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pull not allowed on encrypted values" + } + } + ] + }, + { + "description": "$pullAll succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "unencrypted": [ + 1 + ] + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$pullAll": { + "encrypted_string": [ + 1 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$pullAll not allowed on encrypted values" + } + } + ] + }, + { + "description": "$push succeeds on unencrypted, error on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "unencrypted": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$push": { + "encrypted_string": 1 + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$push not allowed on encrypted values" + } + } + ] + }, + { + "description": "array filters on encrypted fields does not error in mongocryptd, but errors in mongod", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string.$[i].x": 1 + } + }, + "arrayFilters": [ + { + "i.x": 1 + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "Array update operations not allowed on encrypted values" + } + } + ] + }, + { + "description": "positional operator succeeds on unencrypted, errors on encrypted", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "unencrypted": 1 + }, + "update": { + "$set": { + "unencrypted.$": 1 + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "abc" + }, + "update": { + "$set": { + "encrypted_string.$": "abc" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt fields below '$' positional update operator" + } + } + ] + }, + { + "description": "an update that would produce an array on an encrypted field errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_string": [ + 1, + 2 + ] + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with encrypted field on _id errors", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll_with_encrypted_id", + "expectError": { + "errorContains": "Invalid schema containing the 'encrypt' keyword." + } + } + ] + }, + { + "description": "an insert with an array value for an encrypted field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "encrypted_string": [ + "123", + "456" + ] + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element of type" + } + } + ] + }, + { + "description": "an insert with a Timestamp(0,0) value in the top-level fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "random": { + "$timestamp": { + "t": 0, + "i": 0 + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "A command that inserts cannot supply Timestamp(0, 0) for an encrypted" + } + } + ] + }, + { + "description": "distinct with the key referring to a field where the keyID is a JSON Pointer errors", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": {}, + "fieldName": "encrypted_w_altname" + }, + "object": "coll", + "expectError": { + "errorContains": "The distinct key is not allowed to be marked for encryption with a non-UUID keyId" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/badSchema.json b/test/client-side-encryption/spec/unified/badSchema.json new file mode 100644 index 0000000000..af93d659d4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/badSchema.json @@ -0,0 +1,393 @@ +{ + "description": "badSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "array" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "foo": { + "properties": { + "bar": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + }, + "bsonType": "object" + } + } + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "anyOf": [ + { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + ] + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Schema with an encrypted field in an array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll0", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema without specifying parent object types", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll1", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with siblings of encrypt document", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll2", + "expectError": { + "errorContains": "'encrypt' cannot be used in conjunction with 'bsonType'" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + }, + { + "description": "Schema with logical keywords", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll3", + "expectError": { + "errorContains": "Invalid schema" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/basic.json b/test/client-side-encryption/spec/unified/basic.json new file mode 100644 index 0000000000..5522f585da --- /dev/null +++ b/test/client-side-encryption/spec/unified/basic.json @@ -0,0 +1,431 @@ +{ + "description": "basic", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with deterministic encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with randomized encryption, then find it", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "random": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "random": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bulk.json b/test/client-side-encryption/spec/unified/bulk.json new file mode 100644 index 0000000000..90922b88d0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bulk.json @@ -0,0 +1,407 @@ +{ + "description": "bulk", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Bulk write with encryption", + "operations": [ + { + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "encrypted_string": "string1" + } + } + }, + { + "updateOne": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + } + }, + { + "deleteOne": { + "filter": { + "$and": [ + { + "encrypted_string": "string1" + }, + { + "_id": 2 + } + ] + } + } + } + ], + "ordered": true + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "$and": [ + { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + { + "_id": { + "$eq": 2 + } + } + ] + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassAutoEncryption.json b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json new file mode 100644 index 0000000000..3254c43781 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassAutoEncryption.json @@ -0,0 +1,403 @@ +{ + "description": "bypassAutoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "bypassAutoEncryption": true, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with bypassAutoEncryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Insert with bypassAutoEncryption for local schema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "encrypted_string": "string0" + }, + "bypassDocumentValidation": true + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 2, + "encrypted_string": "string0" + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": {} + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/bypassedCommand.json b/test/client-side-encryption/spec/unified/bypassedCommand.json new file mode 100644 index 0000000000..b0c4c56322 --- /dev/null +++ b/test/client-side-encryption/spec/unified/bypassedCommand.json @@ -0,0 +1,147 @@ +{ + "description": "bypassedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "ping is bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "kill op is not bypassed", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "killOp": 1, + "op": 1234 + }, + "commandName": "killOp" + }, + "expectError": { + "errorContains": "command not supported for auto encryption: killOp" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/count.json b/test/client-side-encryption/spec/unified/count.json new file mode 100644 index 0000000000..d44b3e827d --- /dev/null +++ b/test/client-side-encryption/spec/unified/count.json @@ -0,0 +1,293 @@ +{ + "description": "count", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Count with deterministic encryption", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "cursor": {}, + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + { + "$group": { + "_id": { + "$const": 1 + }, + "n": { + "$sum": { + "$const": 1 + } + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "Count fails when filtering on a random encrypted field", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/countDocuments.json b/test/client-side-encryption/spec/unified/countDocuments.json new file mode 100644 index 0000000000..c0202258b8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/countDocuments.json @@ -0,0 +1,296 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "countDocuments with deterministic encryption", + "skipReason": "waiting on SERVER-39395", + "operations": [ + { + "name": "countDocuments", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": 1 + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/create-and-createIndexes.json b/test/client-side-encryption/spec/unified/create-and-createIndexes.json new file mode 100644 index 0000000000..5debd15945 --- /dev/null +++ b/test/client-side-encryption/spec/unified/create-and-createIndexes.json @@ -0,0 +1,121 @@ +{ + "description": "create-and-createIndexes", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencryptedCollection" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "documents": [] + } + ], + "tests": [ + { + "description": "create is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection" + } + } + ] + }, + { + "description": "createIndexes is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "unencryptedCollection" + } + }, + { + "name": "createIndex", + "object": "coll", + "arguments": { + "keys": { + "x": 1 + }, + "name": "name" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "unencryptedCollection", + "indexName": "name" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json index 16cf6ca70d..2344a61a95 100644 --- a/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json +++ b/test/client-side-encryption/spec/unified/createDataKey-kms_providers-invalid.json @@ -1,5 +1,5 @@ { - "description": "createDataKey-provider-invalid", + "description": "createDataKey-kms_providers-invalid", "schemaVersion": "1.8", "runOnRequirements": [ { diff --git a/test/client-side-encryption/spec/unified/createDataKey.json b/test/client-side-encryption/spec/unified/createDataKey.json index 110c726f9a..f99fa3dbcf 100644 --- a/test/client-side-encryption/spec/unified/createDataKey.json +++ b/test/client-side-encryption/spec/unified/createDataKey.json @@ -337,6 +337,70 @@ } ] }, + { + "description": "create datakey with KMIP delegated KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip", + "opts": { + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + }, + "delegated": true + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, { "description": "create datakey with local KMS provider", "operations": [ diff --git a/test/client-side-encryption/spec/unified/delete.json b/test/client-side-encryption/spec/unified/delete.json new file mode 100644 index 0000000000..242bcdba8c --- /dev/null +++ b/test/client-side-encryption/spec/unified/delete.json @@ -0,0 +1,396 @@ +{ + "description": "delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "deleteOne with deterministic encryption", + "operations": [ + { + "name": "deleteOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "deleteMany with deterministic encryption", + "operations": [ + { + "name": "deleteMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/distinct.json b/test/client-side-encryption/spec/unified/distinct.json new file mode 100644 index 0000000000..a7ac0fc7f1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/distinct.json @@ -0,0 +1,325 @@ +{ + "description": "distinct", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "distinct with deterministic encryption", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectResult": [ + "string0" + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "default", + "key": "encrypted_string", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "Distinct fails when filtering on a random encrypted field", + "operations": [ + { + "name": "distinct", + "arguments": { + "filter": { + "random": "abc" + }, + "fieldName": "encrypted_string" + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/explain.json b/test/client-side-encryption/spec/unified/explain.json new file mode 100644 index 0000000000..667f921165 --- /dev/null +++ b/test/client-side-encryption/spec/unified/explain.json @@ -0,0 +1,293 @@ +{ + "description": "explain", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Explain a find with deterministic encryption", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": "string1" + } + } + }, + "commandName": "explain" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "explain": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "verbosity": "allPlansExecution" + }, + "commandName": "explain" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/find.json b/test/client-side-encryption/spec/unified/find.json new file mode 100644 index 0000000000..7f358d9c08 --- /dev/null +++ b/test/client-side-encryption/spec/unified/find.json @@ -0,0 +1,458 @@ +{ + "description": "find", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Find with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find with $in with deterministic encryption", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1", + "random": "abc" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$binary": { + "base64": "AgAAAAAAAAAAAAAAAAAAAAACyfp+lXvKOi7f5vh6ZsCijLEaXFKq1X06RmyS98ZvmMQGixTw8HM1f/bGxZjGwvYwjXOkIEb7Exgb8p2KCDI5TQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Find fails when filtering on a random encrypted field", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndDelete.json b/test/client-side-encryption/spec/unified/findOneAndDelete.json new file mode 100644 index 0000000000..ff1103cb9b --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndDelete.json @@ -0,0 +1,276 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete with deterministic encryption", + "operations": [ + { + "name": "findOneAndDelete", + "arguments": { + "filter": { + "encrypted_string": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "remove": true + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndReplace.json b/test/client-side-encryption/spec/unified/findOneAndReplace.json new file mode 100644 index 0000000000..c1a89fd2f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndReplace.json @@ -0,0 +1,282 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace with deterministic encryption", + "operations": [ + { + "name": "findOneAndReplace", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1" + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/findOneAndUpdate.json b/test/client-side-encryption/spec/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..ffcb0e79e4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/findOneAndUpdate.json @@ -0,0 +1,286 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate with deterministic encryption", + "operations": [ + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encrypted_string": "string0" + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 0000000000..671413b83f --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,324 @@ +{ + "description": "fle2v2-BypassQueryAnalysis", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "bypassQueryAnalysis": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Compact.json new file mode 100644 index 0000000000..07ebf4351b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Compact.json @@ -0,0 +1,312 @@ +{ + "description": "fle2v2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + }, + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedIndexed": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "encryptedUnindexed": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + }, + { + "description": "Compact errors on an unencrypted client", + "operations": [ + { + "name": "runCommand", + "object": "db1", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectError": { + "errorContains": "'compactStructuredEncryptionData.compactionTokens' is missing" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json new file mode 100644 index 0000000000..fc069d55b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection-OldServer.json @@ -0,0 +1,127 @@ +{ + "description": "fle2v2-CreateCollection-OldServer", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "maxServerVersion": "6.3.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "tests": [ + { + "description": "driver returns an error if creating a QEv2 collection on unsupported server", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption." + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json new file mode 100644 index 0000000000..3dfb76c461 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-CreateCollection.json @@ -0,0 +1,1748 @@ +{ + "description": "fle2v2-CreateCollection", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "escCollection": "invalid_esc_name", + "ecocCollection": "invalid_ecoc_name", + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "encryptedFieldsMap": {} + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + } + ], + "tests": [ + { + "description": "state collections and index are created", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "default state collection names are applied", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "drop removes all state collections", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexNotExists", + "object": "db", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "CreateCollection without encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "plaintextCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "plaintextCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "plaintextCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "plaintextCollection" + }, + "databaseName": "default", + "commandName": "create" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFieldsMap.", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "CreateCollection from encryptedFields.", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFieldsMap", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "DropCollection from remote encryptedFields", + "operations": [ + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "createCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "__safeContent___1" + } + }, + { + "name": "dropCollection", + "object": "db2", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.esc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "enxcol_.encryptedCollection.ecoc" + } + }, + { + "name": "assertCollectionNotExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.esc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "enxcol_.encryptedCollection.ecoc", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "encryptedCollection", + "encryptedFields": { + "fields": [ + { + "path": "firstName", + "bsonType": "string", + "keyId": { + "$binary": { + "subType": "04", + "base64": "AAAAAAAAAAAAAAAAAAAAAA==" + } + } + } + ] + } + }, + "databaseName": "default", + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "__safeContent___1", + "key": { + "__safeContent__": 1 + } + } + ] + }, + "databaseName": "default", + "commandName": "createIndexes" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "encryptedCollection" + } + }, + "databaseName": "default", + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.esc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "enxcol_.encryptedCollection.ecoc" + }, + "databaseName": "default", + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "drop": "encryptedCollection" + }, + "databaseName": "default", + "commandName": "drop" + } + } + ] + } + ] + }, + { + "description": "encryptedFields are consulted for metadata collection names", + "operations": [ + { + "name": "dropCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db1", + "arguments": { + "collection": "encryptedCollection" + }, + "expectError": { + "errorContains": "Encrypted State Collection name should follow" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json new file mode 100644 index 0000000000..b171c78c00 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-DecryptExistingData.json @@ -0,0 +1,186 @@ +{ + "description": "fle2v2-DecryptExistingData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 decrypt of existing data succeeds", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Delete.json new file mode 100644 index 0000000000..305f642ae1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Delete.json @@ -0,0 +1,326 @@ +{ + "description": "fle2v2-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Delete can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 0000000000..7a6957db0a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,258 @@ +{ + "description": "fle2v2-EncryptedFields-vs-EncryptedFieldsMap", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json new file mode 100644 index 0000000000..af24e9b369 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFields-vs-jsonSchema.json @@ -0,0 +1,367 @@ +{ + "description": "fle2v2-EncryptedFields-vs-jsonSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + }, + "validator": { + "$jsonSchema": { + "properties": {}, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "encryptedFields is preferred over jsonSchema", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json new file mode 100644 index 0000000000..3727e43147 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-EncryptedFieldsMap-defaults.json @@ -0,0 +1,139 @@ +{ + "description": "fle2v2-EncryptedFieldsMap-defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "default state collections are applied to encryptionInformation", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": { + "$binary": { + "base64": "BYkAAAAFZAAgAAAAAE8KGPgq7h3n9nH5lfHcia8wtOTLwGkZNLBesb6PULqbBXMAIAAAAACq0558QyD3c3jkR5k0Zc9UpQK8ByhXhtn2d1xVQnuJ3AVjACAAAAAA1003zUWGwD4zVZ0KeihnZOthS3V6CEHUfnJZcIYHefISY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + ], + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [] + } + } + }, + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json new file mode 100644 index 0000000000..5131dc9fef --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-FindOneAndUpdate.json @@ -0,0 +1,622 @@ +{ + "description": "fle2v2-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "foo": "bar" + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "findOneAndUpdate can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json new file mode 100644 index 0000000000..8155797583 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Indexed.json @@ -0,0 +1,361 @@ +{ + "description": "fle2v2-InsertFind-Indexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPGmZcUzdE/FPILvRSyAScGvZparGI2y9rJ/vSBxgCujBXMAIAAAAACi1RjmndKqgnXy7xb22RzUbnZl1sOZRXPOC0KcJkAxmQVsACAAAAAApJtKPW4+o9B7gAynNLL26jtlB4+hq5TXResijcYet8USY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json new file mode 100644 index 0000000000..a6410bb9d8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-InsertFind-Unindexed.json @@ -0,0 +1,301 @@ +{ + "description": "fle2v2-InsertFind-Unindexed", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert and find FLE2 unindexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "Query with an unindexed field fails", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedUnindexed": "value123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "encrypt" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json new file mode 100644 index 0000000000..dc8ffc57b2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-MissingKey.json @@ -0,0 +1,137 @@ +{ + "description": "fle2v2-MissingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "FLE2 encrypt fails with missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "123" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + }, + { + "description": "FLE2 decrypt fails with missing key", + "operations": [ + { + "name": "find", + "arguments": { + "filter": {} + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json new file mode 100644 index 0000000000..4036fe5edd --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-NoEncryption.json @@ -0,0 +1,123 @@ +{ + "description": "fle2v2-NoEncryption", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [] + } + } + } + ], + "tests": [ + { + "description": "insert with no encryption succeeds", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "foo": "bar" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "foo": "bar" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json new file mode 100644 index 0000000000..8ccbcafc24 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Compact.json @@ -0,0 +1,358 @@ +{ + "description": "fle2v2-Rangev2-Compact", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Compact works with 'range' fields", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "default" + }, + "commandName": "compactStructuredEncryptionData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "reply": { + "ok": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "default", + "compactionTokens": { + "encryptedInt": { + "ecoc": { + "$binary": { + "base64": "noN+05JsuO1oDg59yypIGj45i+eFH6HOTXOPpeZ//Mk=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "QxKJD2If48p0l8NAXf2Kr0aleMd/dATSjBK6hTpNMyc=", + "subType": "00" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + }, + { + "commandSucceededEvent": { + "commandName": "compactStructuredEncryptionData", + "reply": { + "ok": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json new file mode 100644 index 0000000000..7933cc5600 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Aggregate.json @@ -0,0 +1,574 @@ +{ + "description": "fle2v2-Rangev2-Date-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json new file mode 100644 index 0000000000..9ed541fa8e --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Correctness.json @@ -0,0 +1,1610 @@ +{ + "description": "fle2v2-Rangev2-Date-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "-1" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 200, + "encryptedDate": { + "$date": { + "$numberLong": "200" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "1" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lte": { + "$date": { + "$numberLong": "1" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$lt": { + "$date": { + "$numberLong": "0" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "200" + } + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + }, + "$lt": { + "$date": { + "$numberLong": "2" + } + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$gte": { + "$date": { + "$numberLong": "0" + } + }, + "$lte": { + "$date": { + "$numberLong": "200" + } + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + }, + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDate": { + "$in": [ + { + "$date": { + "$numberLong": "0" + } + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value type is a date" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json new file mode 100644 index 0000000000..ad05dd4e17 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Delete.json @@ -0,0 +1,505 @@ +{ + "description": "fle2v2-Rangev2-Date-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json new file mode 100644 index 0000000000..55db0279c2 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-FindOneAndUpdate.json @@ -0,0 +1,577 @@ +{ + "description": "fle2v2-Rangev2-Date-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json new file mode 100644 index 0000000000..1fd1edf191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-InsertFind.json @@ -0,0 +1,562 @@ +{ + "description": "fle2v2-Rangev2-Date-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json new file mode 100644 index 0000000000..d5153270d5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Date-Update.json @@ -0,0 +1,581 @@ +{ + "description": "fle2v2-Rangev2-Date-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Date. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDate": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDate": { + "$date": { + "$numberLong": "1" + } + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDate": { + "$gt": { + "$date": { + "$numberLong": "0" + } + } + } + }, + "update": { + "$set": { + "encryptedDate": { + "$date": { + "$numberLong": "2" + } + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDate": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDate": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAJbW4AAAAAAAAAAAAJbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDate": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDate", + "bsonType": "date", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$date": { + "$numberLong": "0" + } + }, + "max": { + "$date": { + "$numberLong": "200" + } + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json new file mode 100644 index 0000000000..712a68be32 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Aggregate.json @@ -0,0 +1,1965 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json new file mode 100644 index 0000000000..edca7724a7 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Correctness.json @@ -0,0 +1,1016 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalNoPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json new file mode 100644 index 0000000000..4b0121ac22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Delete.json @@ -0,0 +1,1179 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json new file mode 100644 index 0000000000..2697549f6a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-FindOneAndUpdate.json @@ -0,0 +1,1969 @@ +{ + "description": "fle2v2-Rangev2-Decimal-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json new file mode 100644 index 0000000000..e3d52f5d04 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-InsertFind.json @@ -0,0 +1,1956 @@ +{ + "description": "fle2v2-Rangev2-Decimal-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RGTjNVEsNJb+DG7DpPOam8rQWD5HZAMpRyiTQaw7tk8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RlQWwhU+uVv0a+9IB5cUkEfvHBvOw3B1Sx6WfPWMqes=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubb81XTC7U+4tcNzf1oYvOY6gR5hC2Izqx54f4GuJ0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6M4Q5NMQ9TqNnjzGOxIkiUIY8TEL0I3XD1QnhefQUqU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BtInzk9t2FFMCEY6AQ7zN8jwrrZEs2irSv6q0Q4NaIw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vxXfETu9cuBIpRBo3jUUU04mJIH/aAhLX8K6VI5Xv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXPCdS+q23zi1bkPnaVG2j0PsVtxdeSLJ//h6J1x8RU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KY3KkfBAsN2l80wbpj41G0gwBR5KmmFnZcagg7D3ENk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI8NFAxXCX4VOnY5X73K6KI/Yspd3aR94KV39MhJlAw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nFxH0UC3mATKA6Vboz+QX/hAjj19kF/SH6H5Cne7qC0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q8hYqIYaIi7nOdG/7qQZYnz8Bsacfi66M1nVku4SH08=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4saA92R4arp4anvD9xFtze+sNcQqTEhPHyl1h70A8NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DbIziOBRRyeQS6RtBR09E37LV+CTKrEjGoRMLSpG6eE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Fv80Plp/7w2gnVqrwawLd6qhJ10G4NCDm3re67cNq4Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T/T2oiQCBBES4YN7EodzPRdabZSFlYIClHBym+bQUZE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQgHD3l46Ujqtbnj1VbbeM29C9wJzOhz+yZ/7XdSrxk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ltlFKzWvyZvHxDFOYDd/XXJ6kUiJj0ln2HTCEz2o4Z4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "flW8A7bltC1u8bzx0WJtxosGJdOVsJFfbx33jxnpFGg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SXO+92QbMKwUSG2t27ciunV1c3VvFkUuDmSczpRe008=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+KioGs1GM+xRBzFE67ePTWj04KMSE5/Y6qUF7nJ5kvU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L3xNVbh6YH+RzqABN+5Jgb7T234Efpn766DmUvxIxgg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hPF+60mBYPjh21dEmPlBhKgyc9S2qLtTkypYvnqP2Fc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EletRsETy2HcjaPIm2c8CkT7ch/P3pJJDC8hasepcSU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "r5bMXUaNKqLPxZ+TG9HYTG4aSDgcpim27rN8rQFkM0w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Q7Erdr8+/S0wUEDDIqlS5XjBVWvhZY65K0uUDb6+Ns=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xEcnhXy35hbXNVBPOOt3TUHbxvKfQ48KjA9b6/rbMqQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "T8bEpiQNgsEudXvyKE9SZlSvbpV/LUaslsdqgSFltyo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hIoiaF2YjnxDbODfhFEB+JGZ5nf8suD3Shck5bwQ3N0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qnA6qzejeRJ0rsZaZ0zOvKAaXyxt5lpscKQNYFZNl4k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "anAKCL2DN/le2VaP0n2ucYSEH/DaaEH/8Sa4OqTZsRA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JCZlBJaFm618oWYSnT9Jr1MtwFVw4BZjOzO+5yWgR90=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yxyk4n9762WzcDVGnTn4jCqUnSMIVCrLDIjCX1QVj34=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fDI6fdKvDJwim5/CQwWZEzcrXE3LHgy7FTtffcC7tXE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Vex+gcz5T+WkzsVZQrkqUR2ryyZbnaOGuWpYvjN0zCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8TLEXz+Gbbp6llHpZXVjLsdlYY9f6hrKpHVpyfDe0RY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fTyt5BrunypS65TfOzFW2E2qdIuT4SLeDeGlbQoJCs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8fKGrkqN0/KuSjyXgDBmRauDKrSa//JBKRWHEB9xBf4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s4codmG7uN4ss6P357jL21lazEe90M9GOK5WrOknSV0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RkSpua8XF+NUdxVDU90EbLUTTyZFX3tt3atBTroFaRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LnTCuCDyAHK5B9KXzjtwGmWB+qergQk2OCjnIx9MI2A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cBFh0virAX4pVXf/udIGI2951i0+0aZAdJcBVGtYnT4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G54X6myQXWZ5fw/G31en3QbdgfXzL9+hFTtJpnWMqDI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EdsiiuezcsFJFnYIyGjCOhnqMj1BOwTB5EFxN+ERUkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dVH9MXLtk0WTwGQ3xmrhOqfropMUkDW3o6paNPGl3NU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sB3HqXKWY3pKbuEH8BTbfNIGfbY+7/ZbOc3XC+JRNNI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WHyDk62Xhqbo4/iie2aLIM4x2uuAjv6102dJSHI58oM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pNUFuHpeNRDUZ/NrtII2c6sNc9eGR1lIUlIyXKERA+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UPa+pdCqnN0bfAptdzldQOSd01gidrDKy8KhWrpSKAI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l+7dOAlo+HUffMqFYXL6pgUFeTbwOM9CjKQLxEoLtc4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SRnDXV/rN6C8xwMutv9E1luv3DOUio3VkgPr8Cpm7Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QcH6gl+gX7xZ7OWhUNQMbndJy0Piz49pDo6RsnLkVSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "t+uL4DnfsI/Zll/KXWW1cOKX3Hu8WIkm3pt9efCVSAQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "myutHDctku/+Uug/nD8gRbYvmx/IovtoAAC2/fz2oHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6C+cjD0e0nSCP6cPqQYbNG7SlOd6Mfvi8hyfm7Ng+D8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zg01JSoOj9oBKT0S1ldJucXzY5AKgreS+h2xJreWTOs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7qQ80/FjodHl1m1py/Oii0/9C/xWbLdhaRXQ+kkCP10=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YwWMNH07vL6c5Nhg+MRnVByhzUunu8y0VLM9z/XvR5U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Dle8bU98+fudAbc14SToZFkwvV3tcYVsjDug0NWljpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "J+eKL1vPJmlzltvhI6Li5Fz/TJmi3Ng+ehRTcs46API=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB3XzfFygLwC3WHkj0up+VbEd25KKoce1vOpG/5bwK4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vnVnmOnL+z2pqwE+A6cVKS0Iwy4F4/2IiElJca9bUQM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+lG5r/Fpqry3BtFuvY67+RntmHAMDoLVOSGc6ZoXPb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L5MXQertqc6uj7ADe8aWKbd1sYHPCE7P1VYVg9Zc3VI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "imKONuZgopt0bhM3GMX2WVPwQYMTobuUUEdhcLfHs4c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOkU1J1uVbiVFWBerbXsSIVcF2nqiicTkFy4x7kFHB8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gI0uDhXeoH/UatDQKEf4qo8FHzWZDhb/wuWTqbq/ID4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cOkd5Aa3btYhtojE/smsF/PJnULqQ4NNqTkU6KXTFmo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AWNJMs1MTe294oFipp8Y6P0CjpkZ4qCZoClQF3XcHq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6gJtlzXOFhGYrVbTuRMmvMlDTwXdNtR9aGBlHZPwIMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LEmwVGA/xsEG7UrcOoYLFu6KCXgijzFznenknuDacm8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mIRFPTXRrGaPtp/Ydij2jgkRe4uoUvAKxW2d8b9zYL0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+Uv2u48WALOO0L311z+eryjYQzKJVMfdHMZPhOAFmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "INXXp0wDyVCq+NtfIrrC2ciETmyW/dWB/48/u4yLEZ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "se7DGo8XrlrQDLEcco1tZrQt9kDe+0RTyl2bw/quG4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vr0m2+Zk9lbN6UgWCyn8xJWJOokU3IDYab5U5q1+CgQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XI+eJ8Gy2JktG1gICgoj1qpsfy1tKmH0kglWbaQH6DA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A+UCuNnuAUqnQzspA6TVqUPRmtZmpSex5HFw7THRxs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaH2Ehfljd19uo0Fvb3iwkdaiWEVQd2YPoitgEPkhSM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S/iZBJGcc8+qZxyMtab65MMBoSglybwk3x58Nb86gnY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w14ZE5qqY5YgkS4Zcs9YNbrQbY1XfGOOHNn9bOYnFVQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0MhGd/jEF1vjkKGp+ZMn9SjLK54jkp9W4Hg+Sp/oxaI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92QZ73e/NRTYgCm4aifaKth6aAsKnLLccBc0zx/qUTY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WOjzemCgFJOiGIp81RSVh/tFlzSTj9eFWcBnsiv2Ycs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DrsP9CmfKPjw5yLL8bnSeAxfNzAwlb+Z8OqCiKgBY7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lMogqg8veBv6mri3/drMe9afJiKMvevkmGcw9BedfLo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TxqwNcY8Tg2MPpNdkPBwvfpuTttSYRHU26DGECKYQ9o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "l0u1b4b4vYACWIwfnB7PZac4oDEgjQZCzHruNPTgAIY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iVSGQ+cCfhbWIrY/v/WBORK92elu9gfRKyGhr6r/k00=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yK1forG50diEXte8ECzjfpHeYsPyuQ/dgxbxn/nzY5k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gIfTLCD3VwnOwkC0zPXWTqaITxX6ZplA69PO2a6zolc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O/Zxlgh3WqpzJ7+Sd8XWMVID4/GXJUUWaSqfgDUi3b0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZQ6yv368zwahUqSUYH/StL0Qgz/TwS1CzlMjVDvCciI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m2rPEYkjwyiKdonMrKlcF7hya4lFOAUwEePJ3SgrNx8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mq0yl5iVKlq71bT/dT/fXOWf2n90bTnXFnOdGDN0JOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6qDGMXipPLC2O6EAAMjO2F9xx4rdqZso4IkPpH2304U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jvQHRQQa2RIszE2LX2Hv2LbRhYawJ6qmtRt8HZzFQXg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ovJXQrkZlpeHRciKyE/WWNm5O389gRgzx1W+Dw596X4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "a4kgRNvYctGYqyQv9qScL/WkljTYVylJ9pE9KDULlxU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qV4Q48vPiCJMTjljotzYKI/zfExWpkKOSHGcAjGyDig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jtI7zbBF+QW/aYYTkn90zzyHLXLgmy7l1bzgMb2oqic=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q0KmJl9txPdn962UNvnfe6UFhdk9YaFZuTm33F+csso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ULNdEqeZJgtmNOhN/Y9INzsE9AnxWYwOMn+pIbRXIFs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "R4oz9+wkdjpKe5tE1jpG7IURAnfvS5fLP4LrD5cZfTE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qG5Z7VhwSu/HT/YFTgDzyAAzJKq51xPw2HeEV5btYC4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OM/1DmIIZ5Qyhtq8TGkHTBEMVKjAnKRZMRXYtTG8ctc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2R5vZbljLXnDFA99YfGuRB7pAdPJVKsT25zLNMC0fUk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OMbavF2EmdAz1fHkLV3ctFEUDfriKhoT2gidwHZ9z1o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MWT4Zrw3/vVvTYMa1Is5Pjr3wEwnBfnEAPPUAHKQhNU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tBkRPfG9yxfKocQx5pAJX0oEHKPL0Tgtr+0UYe09InE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lqxpnDR/H0YgH7RcfKoNoaaRhe1SIazIeMbQ1fu9y3Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "utT1UdR22PWOTrOkZauztX613lAplV4eh/ejTRb7ZSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S+Y2yFyKi/a6FXhih4yGo29X8I8OT6/zwEoX6NMKT4o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QSjVppg29x6oS5yBg8OFjrFt0tuTpWCuKxfIy0k8YnE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y3r6/Xsfvsl3HksXlVYkJgHUqpQGfICxg3x9f8Zw1qM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BSltHzEwDjFN4du9rDHAPvl22atlcTioEtt+gC5L1tk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0arGXjSN0006UnXbrWsGqhvBair569DeFDUME3Df3rA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s/DumaMad08S+PBUUcrS+v42K0z8HgcdiQtrFAEu2Qs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EzJ8Y8N0OQBTlnvrK82PdevDNZZO4E6CNgYVu8Cj6Ks=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VA4vr8jBPI5QdiPrULzzZjBMIUbG3V7Slg5zm0bFcKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YAOvEB2ZLtq9LQiFViBHWaxxWVVonC2rNYj9tN9s3L0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hgaHMo9aAGS+nBwvqnTjZO+YkiQPY1c1XcIYeaYKHyI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YvaoLt3ZpH0atB0tNzwMjpoxRYJXl0DqSjisMJiGVBE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EMmW6CptFsiLoPOi5/uAJQ2FmeLg6mCpuVLLrRWk7Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1jQsNMarSnarlYmXEuoFokeBMg/090qUD9wqo1Zn8Gs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hupXNKhRpJxpyDAAP1TgJ5JMZh9lhbMk6s7D7dMS3C8=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json new file mode 100644 index 0000000000..8ade3593e6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Decimal-Update.json @@ -0,0 +1,1975 @@ +{ + "description": "fle2v2-Rangev2-Decimal-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Decimal. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalNoPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rbf3AeBEv4wWFAKknqDxRW5cLNkFvbIs6iJjc6LShQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0l86Ag5OszXpa78SlOUV3K9nff5iC1p0mRXtLg9M1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hn6yuxFHodeyu7ISlhYrbSf9pTiH4TDEvbYLWjTwFO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zdf4y2etKBuIpkEU1zMwoCkCsdisfXZCh8QPamm+drY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rOQ9oMdiK5xxGH+jPzOvwVqdGGnF3+HkJXxn81s6hp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "61aKKsE3+BJHHWYvs3xSIBvlRmKswmaOo5rygQJguUg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KuDb/GIzqDM8wv7m7m8AECiWJbae5EKKtJRugZx7kR0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Q+t8t2TmNUiCIorVr9F3AlVnX+Mpt2ZYvN+s8UGict8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJRZIpKxUgHyL83kW8cvfjkxN3z6WoNnUg+SQw+LK+k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnUsYjip8SvW0+m9mR5WWTkpK+p6uwJ6yBUAlBnFKMk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PArHlz+yPRYDycAP/PgnI/AkP8Wgmfg++Vf4UG1Bf0E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wnIh53Q3jeK8jEBe1n8kJLa89/H0BxO26ZU8SRIAs9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4F8U59gzBLGhq58PEWQk2nch+R0Va7eTUoxMneReUIA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ihKagIW3uT1dm22ROr/g5QaCpxZVj2+Fs/YSdM2Noco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EJtUOOwjkrPUi9mavYAi+Gom9Y2DuFll7aDwo4mq0M0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dIkr8dbaVRQFskAVT6B286BbcBBt1pZPEOcTZqk4ZcI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aYVAcZYkH/Tieoa1XOjE/zCy5AJcVTHjS0NG2QB7muA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sBidL6y8TenseetpioIAAtn0lK/7C8MoW4JXpVYi3z8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0Dd2klU/t4R86c2WJcJDAd57k/N7OjvYSO5Vf8KH8sw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I3jZ92WEVmZmgaIkLbuWhBxl7EM6bEjiEttgBJunArA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aGHoQMlgJoGvArjfIbc3nnkoc8SWBxcrN7hSmjMRzos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bpiWPnF/KVBQr5F6MEwc5ZZayzIRvQOLDAm4ntwOi8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tI7QVKbE6avWgDD9h4QKyFlnTxFCwd2iLySKakxNR/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XGsge0CnoaXgE3rcpKm8AEeku5QVfokS3kcI+JKV1lk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JQxlryW2Q5WOwfrjAnaZxDvC83Dg6sjRVP5zegf2WiM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YFuHKJOfoqp1iGVxoFjx7bLYgVdsN4GuUFxEgO9HJ5s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z6vUdiCR18ylKomf08uxcQHeRtmyav7/Ecvzz4av3k4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SPGo1Ib5AiP/tSllL7Z5PAypvnKdwJLzt8imfIMSEJQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m94Nh6PFFQFLIib9Cu5LAKavhXnagSHG6F5EF8lD96I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pfEkQI98mB+gm1+JbmVurPAODMFPJ4E8DnqfVyUWbSo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DNj3OVRLbr43s0vd+rgWghOL3FqeO/60npdojC8Ry/M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kAYIQrjHVu49W8FTxyxJeiLVRWWjC9fPcBn+Hx1F+Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "aCSO7UVOpoQvu/iridarxkxV1SVxU1i9HVSYXUAeXk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Gh6hTP/yj1IKlXQ+Q69KTfMlGZjEcXoRLGbQHNFo/1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/gDgIFQ4tAlJk3GN48IS5Qa5IPmErwGk8CHxAbp6gs0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PICyimwPjxpusyKxNssOOwUotAUbygpyEtORsVGXT8g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4lu+cBHyAUvuxC6JUNyHLzHsCogGSWFFnUCkDwfQdgI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pSndkmoNUJwXjgkbkgOrT5f9nSvuoMEZOkwAN9ElRaE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tyW+D4i26QihNM5MuBM+wnt5AdWGSJaJ4X5ydc9iWTU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9Syjr8RoxUgPKr+O5rsCu07AvcebA4P8IVKyS1NVLWc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "67tPfDYnK2tmrioI51fOBG0ygajcV0pLo5+Zm/rEW7U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "y0EiPRxYTuS1eVTIaPQUQBBxwkyxNckbePvKgChwd0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NWd+2veAaeXQgR3vCvzlI4R1WW67D5YsVLdoXfdb8qg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PY5RQqKQsL2GqBBSPNOEVpojNFRX/NijCghIpxD6CZk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lcvwTyEjFlssCJtdjRpdN6oY+C7bxZY+WA+QAqzj9zg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWE7XRNylvTwO/9Fv56dNqUaQWMmESNS/GNIwgBaEI0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ijwlrUeS8nRYqK1F8kiCYF0mNDolEZS+/lJO1Lg93C8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8KzV+qYGYuIjoNj8eEpnTuHrMYuhzphl80rS6wrODuU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wDyTLjSEFF895hSQsHvmoEQVS6KIkZOtq1c9dVogm9I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SGrtPuMYCjUrfKF0Pq/thdaQzmGBMUvlwN3ORIu9tHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KySHON3hIoUk4xWcwTqk6IL0kgjzjxgMBObVIkCGvk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hBIdS9j0XJPeT4ot73ngELkpUoSixvRBvdOL9z48jY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Tx6um0q9HjS5ZvlFhvukpI6ORnyrXMWVW1OoxvgqII0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zFKlyfX5H81+d4A4J3FKn4T5JfG+OWtR06ddyX4Mxas=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cGgCDuPV7MeMMYEDpgOupqyNP4BQ4H7rBnd2QygumgM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IPaUoy98v11EoglTpJ4kBlEawoZ8y7BPwzjLYBpkvHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Pfo4Am6tOWAyZNn8G9W5HWWGC3ZWmX0igI/RRB870Ro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fnTSjd7bC1Udoq6iM7UDnHAC/lsIXSHp/Gy332qw+/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fApBgVRrTDyEumkeWs5p3ag9KB48SbU4Si0dl7Ns9rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QxudfBItgoCnUj5NXVnSmWH3HK76YtKkMmzn4lyyUYY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sSOvwhKa29Wq94bZ5jGIiJQGbG1uBrKSBfOYBz/oZeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FdaMgwwJ0NKsqmPZLC5oE+/0D74Dfpvig3LaI5yW5Fs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sRWBy12IERN43BSZIrnBfC9+zFBUdvjTlkqIH81NGt4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/4tIRpxKhoOwnXAiFn1Z7Xmric4USOIfKvTYQXk3QTc=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Mr/laWHUijZT5VT3x2a7crb7wgd/UXOGz8jr8BVqBpM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VDCpBYsJIxTfcI6Zgf7FTmKMxUffQv+Ys8zt5dlK76I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zYDslUwOUVNwTYkETfjceH/PU3bac9X3UuQyYJ19qK0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rAOmHSz18Jx107xpbv9fYcPOmh/KPAqge0PAtuhIRnc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BFOB1OGVUen7VsOuS0g8Ti7oDsTt2Yj/k/7ta8YAdGM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fckE5SPs0GU+akDkUEM6mm0EtcV3WDE/sQsnTtodlk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mi9+aNjuwIvaMpSHENvKzKRAmX9cYguo2mXLvOoftHQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K6TWn4VcWWkz/gkUkLmbtwkG7SNeABICmLDnoYJFlLU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z+2/cEtGU0Fq7QJFNGA/0y4aWAsw0ncG6X0LYRqwS3c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rrSIf+lgcNZFbbUkS9BmE045jRWBpcBJXHzfMVEFuzE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KlHL3Kyje1/LMIfgbCqw1SolxffJvvgsYBV5y77wxuA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hzJ1YBoETmYeCh352dBmG8d8Wse/bUcqojTWpWQlgsc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lSdcllDXx8MA+s0GULjDA1lQkcV0L8/aHtZ6dM2pZ2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "HGr7JLTTA7ksAnlmjSIwwdBVvgr3fv46/FTdiCPYpos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMr25v1VwOEVZ8xaNUTHJCcsYqV+kwK6RzGYilxPtJ4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "129hJbziPJzNo0IoTU3bECdge0FtaPW8dm4dyNVNwYU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "doiLJ96qoo+v7NqIAZLq6BI5axV8Id8gT5vyJ1ZZ0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cW/Lcul3xYmfyvI/0x/+ybN78aQmBK1XIGs1EEU09N8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1aVIwzu9N5EJV9yEES+/g6hOTH7cA2NTcLIc59cu0wU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kw5tyl7Ew0r1wFyrN1mB9FiVW2hK2BxxxUuJDNWjyjQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ADAY2YBrm6RJBDY/eLLcfNxmSJku+mefz74gH66oyco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8gkqB1LojzPrstpFG7RHYmWxXpIlPDTqWnNsXH7XDRU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TESfVQMDQjfTZmHmUeYUE2XrokJ6CcrsKx/GmypGjOw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qFM+HFVQ539S0Ouynd1fBHoemFxtU9PRxE5+Dq7Ljy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jPiFgUZteSmOg4wf3bsEKCZzcnxmMoILsgp/GaZD+dM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YaWUgJhYgPNN7TkFK16H8SsQS226JguaVhOIQxZwQNQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x90/Qk3AgyaFsvWf2KUCu5XF3j76WFSjt/GrnG01060=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZGWybWL/xlEdMYRFCZDUoz10sywTf7U/7wufsb78lH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8l4ganN66jIcdxfHAdYLaym/mdzUUQ8TViw3MDRySPc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c8p5XEGTqxqvRGVlR+nkxw9uUdoqDqTB0jlYQ361qMA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZGFLlpQBcU3zIUg8MmgWwFKVz/SaA7eSYFrfe3Hb70=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "34529174M77rHr3Ftn9r8jU4a5ztYtyVhMn1wryZSkU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YkQ4pxFWzc49MS0vZM6S8mNo4wAwo21rePBeF3C+9mI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MhOf4mYY00KKVhptOcXf0bXB7WfuuM801MRJg4vXPgc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7pbbD8ihNIYIBJ3tAUPGzHpFPpIeCTAk5L88qCB0/9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C9Q5PoNJTQo6pmNzXEEXUEqH22//UUWY1gqILcIywec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AqGVk1QjDNDLYWGRBX/nv9QdGR2SEgXZEhF0EWBAiSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/sGI3VCbJUKATULJmhTayPOeVW+5MjWSvVCqS77sRbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yOtbL0ih7gsuoxVtRrACMz+4N5uo7jIR7zzmtih2Beo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uA6dkb2Iyg9Su8UNDvZzkPx33kPZtWr/CCuEY+XgzUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1DoSFPdHIplqZk+DyWAmEPckWwXw/GdB25NLmzeEZhk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OfDVS0T3ZuIXI/LNbTp6C9UbPIWLKiMy6Wx+9tqNl+g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3PZjHXbmG6GtPz+iapKtQ3yY4PoFFgjIy+fV2xQv1YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kaoLN0BoBWsmqE7kKkJQejATmLShd8qffcAmlhsxsGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpiw9KgQdegGmp7IJnSGX2miujRLU0xzs0ITTqbPW7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NuXFf7xGUefYjIUTuMxNUTCfVHrF8oL0AT7dPv5Plk4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8Tz53LxtfEBJ9eR+d2690kwNsqPV6XyKo2PlqZCbUrc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e6zsOmHSyV8tyQtSX6BSwui6wK9v1xG3giY/IILJQ2w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2fedFMCxa2DzmIpfbDKGXhQg0PPwbUv6vIWdwwlvhms=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yEJKMFnWXTC8tJUfzCInzQRByNEPjHxpw4L4m8No91Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YbFuWwOiFuQyOzIJXDbOkCWC2DyrG+248TBuVCa1pXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "w7IkwGdrguwDrar5+w0Z3va5wXyZ4VXJkDMISyRjPGo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YmJUoILTRJPhyIyWyXJTsQ6KSZHHbEpwPVup6Ldm/Ko=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FvMjcwVZJmfh6FP/yBg2wgskK+KHD8YVUY6WtrE8xbg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4HCtD4HyYz0nci49IVAa10Z4NJD/FHnRMV4sRX6qro=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nC7BpXCmym+a0Is2kReM9cYN2M1Eh5rVo8fjms14Oiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1qtVWaeVo649ZZZtN8gXbwLgMWGLhz8beODbvru0I7Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ej+mC0QFyMNIiSjR939S+iGBm7dm+1xObu5IcF/OpbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UQ8LbUG3cMegbr9yKfKanAPQE1EfPkFciVDrNqZ5GHY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4iI3mXIDjnX+ralk1HhJY43mZx2uTJM7hsv9MQzTX7E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0WQCcs3rvsasgohERHHCaBM4Iy6yomS4qJ5To3/yYiw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qDCTVPoue1/DOAGNAlUstdA9Sid8MgEY4e5EzHcVHRk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9F9Mus0UnlzHb8E8ImxgXtz6SU98YXD0JqswOKw/Bzs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pctHpHKVBBcsahQ6TNh6/1V1ZrqOtKSAPtATV6BJqh0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vfR3C/4cPkVdxtNaqtF/v635ONbhTf5WbwJM6s4EXNE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ejP43xUBIex6szDcqExAFpx1IE/Ksi5ywJ84GKDFRrs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jbP4AWYd3S2f3ejmMG7dS5IbrFol48UUoT+ve3JLN6U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CiDifI7958sUjNqJUBQULeyF7x0Up3loPWvYKw9uAuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e2dQFsiHqd2BFHNhlSxocjd+cPs4wkcUW/CnCz4KNuM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PJFckVmzBipqaEqsuP2mkjhJE4qhw36NhfQ9DcOHyEU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "S3MeuJhET/B8VcfZYDR9fvX0nscDj416jdDekhmK11s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CGVHZRXpuNtQviDB2Kj03Q8uvs4w3RwTgV847R7GwPw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yUGgmgyLrxbEpDVy89XN3c2cmFpZXWWmuJ/35zVZ+Jw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "inb6Q97mL1a9onfNTT8v9wsoi/fz7KXKq3p8j90AU9c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CCyYx/4npq9xGO1lsCo8ZJhFO9/tN7DB+/DTE778rYg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LNnYw4fwbiAZu0kBdAHPEm/OFnreS+oArdB5O/l/I98=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P006SxmUS/RjiQJVYPdMFnNo3827GIEmSzagggkg05Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oyvwY+WsnYV6UHuPki1o0ILJ2jN4uyXf9yaUNtZJyBA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "36Lk3RHWh1wmtCWC/Yj6jNIo17U5y6SofAgQjzjVxD8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOOo8FqeHnuO9mqOYjIb4vgwIwVyXZ5Y+bY5d9tGFUM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bJiDJjwQRNxqxlGjRm5lLziFhcfTDCnQ/qU1V85qcRg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2Qgrm1n0wUELAQnpkEiIHB856yv76q8jLbpiucetcm0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5ciPOYxTK0WDwwYyfs7yiVymwtYQXDELLxmM4JLl4/o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "31dC2WUSIOKQc4jwT6PikfeYTwi80mTlh7P31T5KNQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YluTV2Mu53EGCKLcWfHZb0BM/IPW2xJdG3vYlDMEsM4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dh/8lGo2Ek6KukSwutH6Q35iy8TgV0FN0SJqe0ZVHN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EVw6HpIs3BKen2qY2gz4y5dw1JpXilfh07msZfQqJpc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FYolLla9L8EZMROEdWetozroU40Dnmwwx2jIMrr7c1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8M6k4QIutSIj6CM41vvkQtuFsaGrjoR9SZJVSLbfGKQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9LM0VoddDNHway442MqY+Z7vohB2UHau/cddshhzf40=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66i8Ytco4Yq/FMl6pIRZazz3CZlu8fO2OI6Pne0pvHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2a/HgX+MjZxjXtSvHgF1yEpHMJBkl8Caee8XrJtn0WM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "frhBM662c4ZVG7mWP8K/HhRjd01lydW/cPcHnDjifqc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6k1T7Q1t668PBqv6fwpVnT1HWh7Am5LtbKvwPJKcpGU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UlJ5Edfusp8S/Pyhw6KTglIejmbr1HO0zUeHn/qFETA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jsxsB+1ECB3assUdoC333do9tYH+LglHmVSJHy4N8Hg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2nzIQxGYF7j3bGsIesECEOqhObKs/9ywknPHeJ3yges=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xJYKtuWrX90JrJVoYtnwP7Ce59XQGFYoalxpNfBXEH0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NLI5lriBTleGCELcHBtNnmnvwSRkHHaLOX4cKboMgTw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hUOQV0RmE5aJdJww1AR9rirJG4zOYPo+6cCkgn/BGvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "h4G2Of76AgxcUziBwCyH+ayMOpdBWzg4yFrTfehSC2c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VuamM75RzGfQpj2/Y1jSVuQLrhy6OAwlZxjuQLB/9Ss=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn9+hLq7hvw02xr9vrplOCDXKBTuFhfbX7d5v/l85Pg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fAiGqKyLZpGngBYFbtYUYt8LUrJ49vYafiboifTDjxs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BxRILymgfVJCczqjUIWXcfrfSgrrYkxTM5VTg0HkZLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CrFY/PzfPU2zsFkGLu/dI6mEeizZzCR+uYgjZBAHro0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "AEbrIuwvXLTtYgMjOqnGQ8y8axUn5Ukrn7UZRSyfQVw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ouWeVH3PEFg+dKWlXc6BmqirJOaVWjJbMzZbCsce4dA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+hd6xFB+EG+kVP7WH4uMd1CLaWMnt5xJRaY/Guuga9Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zmpGalfAOL3gmcUMJYcLYIRT/2VDO/1Dw4KdYZoNcng=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2PbHAoM/46J2UIZ/vyksKzmVVfxA7YUyIxWeL/N/vBk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7fD9x+zk5MVFesb59Klqiwwmve7P5ON/5COURXj5smE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tlrNQ4jaq051iaWonuv1sSrYhKkL1LtNZuHsvATha3s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fBodm28iClNpvlRyVq0dOdXQ08S7/N3aDwid+PdWvRo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "O+/nnRqT3Zv7yMMGug8GhKHaWy6u7BfRGtZoj0sdN1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5AZZ/RTMY4Photnm/cpXZr/HnFRi3eljacMsipkJLHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oFVyo/kgoMxBIk2VE52ySSimeyU+Gr0EfCwapXnTpKA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Z8v59DfcnviA0mzvnUk+URVO0UuqAWvtarEgJva/n1c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P64GOntZ+zBJEHkigoh9FSxSO+rJTqR20z5aiGQ9an4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xMbSuDPfWuO/Dm7wuVl06GnzG9uzTlJJX9vFy7boGlY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kXPB19mRClxdH2UsHwlttS6lLU2uHvzuZgZz7kC45jU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NDVjVYXAw4k0w4tFzvs7QDq39aaU3HQor4I2XMKKnCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uKw/+ErVfpTO1dGUfd3T/eWfZW3nUxXCdBGdjvHtZ88=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "av0uxEzWkizYWm0QUM/MN1hLibnxPvCWJKwjOV4yVQY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ERwUC47dvgOBzIsEESMIioLYbFOxOe8PtJTnmDkKuHM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2gseKlG5Le12fS/vj4eaED4lturF16kAgJ1TpW3HxEE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7Cvg0Y3j/5i2F1TeXxlMmU7xwif5dCmwkZAOrVC5K2Y=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalNoPrecision": { + "$gt": { + "$binary": { + "base64": "DR1jAAADcGF5bG9hZACxYgAABGcAnWIAAAMwAH0AAAAFZAAgAAAAAJu2KgiI8vM+kz9qD3ZQzFQY5qbgYqCqHG5R4jAlnlwXBXMAIAAAAAAAUXxFXsz764T79sGCdhxvNd5b6E/9p61FonsHyEIhogVsACAAAAAAt19RL3Oo5ni5L8kcvgOJYLgVYyXJExwP8pkuzLG7f/kAAzEAfQAAAAVkACAAAAAAPQPvL0ARjujSv2Rkm8r7spVsgeC1K3FWcskGGZ3OdDIFcwAgAAAAACgNn660GmefR8jLqzgR1u5O+Uocx9GyEHiBqVGko5FZBWwAIAAAAADflr+fsnZngm6KRWYgHa9JzK+bXogWl9evBU9sQUHPHQADMgB9AAAABWQAIAAAAAD2Zi6kcxmaD2mY3VWrP+wYJMPg6cSBIYPapxaFQxYFdQVzACAAAAAAM/cV36BLBY3xFBXsXJY8M9EHHOc/qrmdc2CJmj3M89gFbAAgAAAAAOpydOrKxx6m2gquSDV2Vv3w10GocmNCFeOo/fRhRH9JAAMzAH0AAAAFZAAgAAAAAOaNqI9srQ/mI9gwbk+VkizGBBH/PPWOVusgnfPk3tY1BXMAIAAAAAAc96O/pwKCmHCagT6T/QV/wz4vqO+R22GsZ1dse2Vg6QVsACAAAAAAgzIak+Q3UFLTHXPmJ+MuEklFtR3eLtvM+jdKkmGCV/YAAzQAfQAAAAVkACAAAAAA0XlQgy/Yu97EQOjronl9b3dcR1DFn3deuVhtTLbJZHkFcwAgAAAAACoMnpVl6EFJak8A+t5N4RFnQhkQEBnNAx8wDqmq5U/dBWwAIAAAAACR26FJif673qpwF1J1FEkQGJ1Ywcr/ZW6JQ7meGqzt1QADNQB9AAAABWQAIAAAAAAOtpNexRxfv0yRFvZO9DhlkpU4mDuAb8ykdLnE5Vf1VAVzACAAAAAAeblFKm/30orP16uQpZslvsoS8s0xfNPIBlw3VkHeekYFbAAgAAAAAPEoHj87sYE+nBut52/LPvleWQBzB/uaJFnosxp4NRO2AAM2AH0AAAAFZAAgAAAAAIr8xAFm1zPmrvW4Vy5Ct0W8FxMmyPmFzdWVzesBhAJFBXMAIAAAAABYeeXjJEzTHwxab6pUiCRiZjxgtN59a1y8Szy3hfkg+gVsACAAAAAAJuoY4rF8mbI+nKb+5XbZShJ8191o/e8ZCRHE0O4Ey8MAAzcAfQAAAAVkACAAAAAAl+ibLk0/+EwoqeC8S8cGgAtjtpQWGEZDsybMPnrrkwEFcwAgAAAAAHPPBudWgQ+HUorLDpJMqhS9VBF2VF5aLcxgrM1s+yU7BWwAIAAAAAAcCcBR2Vyv5pAFbaOU97yovuOi1+ATDnLLcAUqHecXcAADOAB9AAAABWQAIAAAAACR9erwLTb+tcWFZgJ2MEfM0PKI9uuwIjDTHADRFgD+SQVzACAAAAAAcOop8TXsGUVQoKhzUllMYWxL93xCOkwtIpV8Q6hiSYYFbAAgAAAAAKXKmh4V8veYwob1H03Q3p3PN8SRAaQwDT34KlNVUjiDAAM5AH0AAAAFZAAgAAAAALv0vCPgh7QpmM8Ug6ad5ioZJCh7pLMdT8FYyQioBQ6KBXMAIAAAAADsCPyIG8t6ApQkRk1fX/sfc1kpuWCWP8gAEpnYoBSHrQVsACAAAAAAJe/r67N6d8uTiogvfoR9rEXbIDjyLb9EVdqkayFFGaYAAzEwAH0AAAAFZAAgAAAAAIW4AxJgYoM0pcNTwk1RSbyjZGIqgKL1hcTJmNrnZmoPBXMAIAAAAAAZpfx3EFO0vY0f1eHnE0PazgqeNDTaj+pPJMUNW8lFrAVsACAAAAAAP+Um2vwW6Bj6vuz9DKz6+6aWkoKoEmFNoiz/xXm7lOsAAzExAH0AAAAFZAAgAAAAAKliO6L9zgeuufjj174hvmQGNRbmYYs9yAirL7OxwEW3BXMAIAAAAAAqU7vs3DWUQ95Eq8OejwWnD0GuXd+ASi/uD6S0l8MM1QVsACAAAAAAb9legYzsfctBPpHyl7YWpPmLr5QiNZFND/50N1vv2MUAAzEyAH0AAAAFZAAgAAAAAOGQcCBkk+j/Kzjt/Cs6g3BZPJG81wIHBS8JewHGpgk+BXMAIAAAAABjrxZXWCkdzrExwCgyHaafuPSQ4V4x2k9kUCAqUaYKDQVsACAAAAAADBU6KefT0v8zSmseaMNmQxKjJar72y7MojLFhkEHqrUAAzEzAH0AAAAFZAAgAAAAAPmCNEt4t97waOSd5hNi2fNCdWEkmcFJ37LI9k4Az4/5BXMAIAAAAABX7DuDPNg+duvELf3NbLWkPMFw2HGLgWGHyVWcPvSNCAVsACAAAAAAS7El1FtZ5STh8Q1FguvieyYX9b2DF1DFVsb9hzxXYRsAAzE0AH0AAAAFZAAgAAAAAD4vtVUYRNB+FD9yoQ2FVJH3nMeJeKbi6eZfth638YqbBXMAIAAAAAANCuUB4OdmuD6LaDK2f3vaqfgYYvg40wDXOBbcFjTqLwVsACAAAAAA9hqC2VoJBjwR7hcQ45xO8ZVojwC83jiRacCaDj6Px2gAAzE1AH0AAAAFZAAgAAAAAJPIRzjmTjbdIvshG6UslbEOd797ZSIdjGAhGWxVQvK1BXMAIAAAAABgmJ0Jh8WLs9IYs/a7DBjDWd8J3thW/AGJK7zDnMeYOAVsACAAAAAAi9zAsyAuou2oiCUHGc6QefLUkACa9IgeBhGu9W/r0X8AAzE2AH0AAAAFZAAgAAAAAABQyKQPoW8wGPIqnsTv69+DzIdRkohRhOhDmyVHkw9WBXMAIAAAAAAqWA2X4tB/h3O1Xlawtz6ndI6WaTwgU1QYflL35opu5gVsACAAAAAAWI/Gj5aZMwDIxztqmVL0g5LBcI8EdKEc2UA28pnekQoAAzE3AH0AAAAFZAAgAAAAACB7NOyGQ1Id3MYnxtBXqyZ5Ul/lHH6p1b10U63DfT6bBXMAIAAAAADpOryIcndxztkHSfLN3Kzq29sD8djS0PspDSqERMqokQVsACAAAAAADatsMW4ezgnyi1PiP7xk+gA4AFIN/fb5uJqfVkjg4UoAAzE4AH0AAAAFZAAgAAAAAKVfXLfs8XA14CRTB56oZwV+bFJN5BHraTXbqEXZDmTkBXMAIAAAAAASRWTsfGOpqdffiOodoqIgBzG/yzFyjR5CfUsIUIWGpgVsACAAAAAAkgCHbCwyX640/0Ni8+MoYxeHUiC+FSU4Mn9jTLYtgZgAAzE5AH0AAAAFZAAgAAAAAH/aZr4EuS0/noQR9rcF8vwoaxnxrwgOsSJ0ys8PkHhGBXMAIAAAAACd7ObGQW7qfddcvyxRTkPuvq/PHu7+6I5dxwS1Lzy5XAVsACAAAAAA3q0eKdV7KeU3pc+CtfypKR7BPxwaf30yu0j9FXeOOboAAzIwAH0AAAAFZAAgAAAAAKvlcpFFNq0oA+urq3w6d80PK1HHHw0H0yVWvU9aHijXBXMAIAAAAADWnAHQ5Fhlcjawki7kWzdqjM2f6IdGJblojrYElWjsZgVsACAAAAAAO0wvY66l24gx8nRxyVGC0QcTztIi81Kx3ndRhuZr6W4AAzIxAH0AAAAFZAAgAAAAAH/2aMezEOddrq+dNOkDrdqf13h2ttOnexZsJxG1G6PNBXMAIAAAAABNtgnibjC4VKy5poYjvdsBBnVvDTF/4mmEAxsXVgZVKgVsACAAAAAAqvadzJFLqQbs8WxgZ2D2X+XnaPSDMLCVVgWxx5jnLcYAAzIyAH0AAAAFZAAgAAAAAF2wZoDL6/V59QqO8vdRZWDpXpkV4h4KOCSn5e7x7nmzBXMAIAAAAADLZBu7LCYjbThaVUqMK14H/elrVOYIKJQCx4C9Yjw37gVsACAAAAAAEh6Vs81jLU204aGpL90fmYTm5i5R8/RT1uIbg6VU3HwAAzIzAH0AAAAFZAAgAAAAAH27yYaLn9zh2CpvaoomUPercSfJRUmBY6XFqmhcXi9QBXMAIAAAAAAUwumVlIYIs9JhDhSj0R0+59psCMsFk94E62VxkPt42QVsACAAAAAAT5x2hCCd2bpmpnyWaxas8nSxTc8e4C9DfKaqr0ABEysAAzI0AH0AAAAFZAAgAAAAALMg2kNAO4AFFs/mW3In04yFeN4AP6Vo0klyUoT06RquBXMAIAAAAAAgGWJbeIdwlpqXCyVIYSs0dt54Rfc8JF4b8uYc+YUj0AVsACAAAAAAWHeWxIkyvXTOWvfZzqtPXjfGaWWKjGSIQENTU3zBCrsAAzI1AH0AAAAFZAAgAAAAALas/i1T2DFCEmrrLEi7O2ngJZyFHialOoedVXS+OjenBXMAIAAAAAA1kK0QxY4REcGxHeMkgumyF7iwlsRFtw9MlbSSoQY7uAVsACAAAAAAUNlpMJZs1p4HfsD4Q4WZ4TBEi6Oc2fX34rzyynqWCdwAAzI2AH0AAAAFZAAgAAAAAP1TejmWg1CEuNSMt6NUgeQ5lT+oBoeyF7d2l5xQrbXWBXMAIAAAAABPX0kj6obggdJShmqtVfueKHplH4ZrXusiwrRDHMOKeQVsACAAAAAAIYOsNwC3DA7fLcOzqdr0bOFdHCfmK8tLwPoaE9uKOosAAzI3AH0AAAAFZAAgAAAAAMrKn+QPa/NxYezNhlOX9nyEkN1kE/gW7EuZkVqYl0b8BXMAIAAAAABUoZMSPUywRGfX2EEencJEKH5x/P9ySUVrhStAwgR/LgVsACAAAAAAMgZFH6lQIIDrgHnFeslv3ld20ynwQjQJt3cAp4GgrFkAAzI4AH0AAAAFZAAgAAAAAMmD1+a+oVbiUZd1HuZqdgtdVsVKwuWAn3/M1B6QGBM3BXMAIAAAAACLyytOYuZ9WEsIrrtJbXUx4QgipbaAbmlJvSZVkGi0CAVsACAAAAAA4v1lSp5H9BB+HYJ4bH43tC8aeuPZMf78Ng1JOhJh190AAzI5AH0AAAAFZAAgAAAAAOVKV7IuFwmYP1qVv8h0NvJmfPICu8yQhzjG7oJdTLDoBXMAIAAAAABL70XLfQLKRsw1deJ2MUvxSWKxpF/Ez73jqtbLvqbuogVsACAAAAAAvfgzIorXxE91dDt4nQxYfntTsx0M8Gzdsao5naQqcRUAAzMwAH0AAAAFZAAgAAAAAKS/1RSAQma+xV9rz04IcdzmavtrBDjOKPM+Z2NEyYfPBXMAIAAAAAAOJDWGORDgfRv8+w5nunh41wXb2hCA0MRzwnLnQtIqPgVsACAAAAAAf42C1+T7xdHEFF83+c2mF5S8PuuL22ogXXELnRAZ4boAAzMxAH0AAAAFZAAgAAAAAFeq8o82uNY1X8cH6OhdTzHNBUnCChsEDs5tm0kPBz3qBXMAIAAAAABaxMBbsaeEj/EDtr8nZfrhhhirBRPJwVamDo5WwbgvTQVsACAAAAAAMbH453A+BYAaDOTo5kdhV1VdND1avNwvshEG/4MIJjQAAzMyAH0AAAAFZAAgAAAAAI8IKIfDrohHh2cjspJHCovqroSr5N3QyVtNzFvT5+FzBXMAIAAAAABXHXteKG0DoOMmECKp6ro1MZNQvXGzqTDdZ0DUc8QfFAVsACAAAAAA/w5s++XYmO+9TWTbtGc3n3ndV4T9JUribIbF4jmDLSMAAzMzAH0AAAAFZAAgAAAAAJkHvm15kIu1OtAiaByj5ieWqzxiu/epK6c/9+KYIrB0BXMAIAAAAACzg5TcyANk0nes/wCJudd1BwlkWWF6zw3nGclq5v3SJQVsACAAAAAAvruXHTT3irPJLyWpI1j/Xwf2FeIE/IV+6Z49pqRzISoAAzM0AH0AAAAFZAAgAAAAAAYSOvEWWuSg1Aym7EssNLR+xsY7e9BcwsX4JKlnSHJcBXMAIAAAAABT48eY3PXVDOjw7JpNjOe1j2JyI3LjDnQoqZ8Je5B2KgVsACAAAAAAU2815RR57TQ9uDg0XjWjBkAKvf8yssxDMzrM4+FqP6AAAzM1AH0AAAAFZAAgAAAAAGQxC9L1e9DfO5XZvX1yvc3hTLtQEdKO9FPMkyg0Y9ZABXMAIAAAAADtmcMNJwdWLxQEArMGZQyzpnu+Z5yMmPAkvgq4eAKwNQVsACAAAAAAJ88zt4Y/Hoqh+zrf6KCOiUwHbOzCxSfp6k/qsZaYGEgAAzM2AH0AAAAFZAAgAAAAADLHK2LNCNRO0pv8n4fAsxwtUqCNnVK8rRgNiQfXpHSdBXMAIAAAAACf16EBIHRKD3SzjRW+LMOl+47QXA3CJhMzlcqyFRW22AVsACAAAAAAMGz4fAOa0EoVv90fUffwLjBrQhHATf+NdlgCR65vujAAAzM3AH0AAAAFZAAgAAAAAHiZJiXKNF8bbukQGsdYkEi95I+FSBHy1I5/hK2uEZruBXMAIAAAAADE+lZBa8HDUJPN+bF6xI9x4N7GF9pj3vBR7y0BcfFhBAVsACAAAAAAGIEN6sfqq30nyxW4dxDgXr/jz5HmvA9T1jx/pKCn4zgAAzM4AH0AAAAFZAAgAAAAAI1oa2OIw5TvhT14tYCGmhanUoYcCZtNbrVbeoMldHNZBXMAIAAAAAAx2nS0Ipblf2XOgBiUOuJFBupBhe7nb6QPLZlA4aMPCgVsACAAAAAA9xu828hugIgo0E3de9dZD+gTpVUGlwtDba+tw/WcbUoAAzM5AH0AAAAFZAAgAAAAABgTWS3Yap7Q59hii/uPPimHWXsr+DUmsqfwt/X73qsOBXMAIAAAAACKK05liW5KrmEAvtpCB1WUltruzUylDDpjea//UlWoOAVsACAAAAAAcgN4P/wakJ5aJK5c1bvJBqpVGND221dli2YicPFfuAYAAzQwAH0AAAAFZAAgAAAAABOAnBPXDp6i9TISQXvcNKwGDLepZTu3cKrB4vKnSCjBBXMAIAAAAADjjzZO7UowAAvpwyG8BNOVqLCccMFk3aDK4unUeft5ywVsACAAAAAA4zkCd4k9gvfXoD1C7vwTjNcdVJwEARh8h/cxZ4PNMfgAAzQxAH0AAAAFZAAgAAAAAHN8hyvT1lYrAsdiV5GBdd5jhtrAYE/KnSjw2Ka9hjz9BXMAIAAAAAD794JK7EeXBs+D7yOVK7nWF8SbZ/7U8gZ7nnT9JFNwTAVsACAAAAAAg8Wt1HO3NhByq2ggux2a4Lo6Gryr24rEFIqh2acrwWMAAzQyAH0AAAAFZAAgAAAAAO93bPrq8bsnp1AtNd9ETnXIz0lH/2HYN/vuw9wA3fyFBXMAIAAAAABHlls5fbaF2oAGqptC481XQ4eYxInTC29aElfmVZgDUgVsACAAAAAANoQXEWpXJpgrSNK/cKi/m7oYhuSRlp1IZBF0bqTEATcAAzQzAH0AAAAFZAAgAAAAAL1YsAZm1SA0ztU6ySIrQgCCA74V6rr0/4iIygCcaJL6BXMAIAAAAADTXWTHWovGmUR1Zg9l/Aqq9H5mOCJQQrb/Dfae7e3wKAVsACAAAAAA5dunyJK6/SVfDD0t9QlNBcFqoZnf9legRjHaLSKAoQMAAzQ0AH0AAAAFZAAgAAAAAEoFAeHk0RZ9kD+cJRD3j7PcE5gzWKnyBrF1I/MDNp5mBXMAIAAAAACgHtc2hMBRSZjKw8RAdDHK+Pi1HeyjiBuAslGVNcW5tAVsACAAAAAAXzBLfq+GxRtX4Wa9fazA49DBLG6AjZm2XODStJKH8D0AAzQ1AH0AAAAFZAAgAAAAAAW+7DmSN/LX+/0uBVJDHIc2dhxAGz4+ehyyz8fAnNGoBXMAIAAAAAA6Ilw42EvvfLJ3Eq8Afd+FjPoPcQutZO6ltmCLEr8kxQVsACAAAAAAbbZalyo07BbFjPFlYmbmv0z023eT9eLkHqeVUnfUAUAAAzQ2AH0AAAAFZAAgAAAAANBdV7M7kuYO3EMoQItAbXv4t2cIhfaT9V6+s4cg9djlBXMAIAAAAABvz4MIvZWxxrcJCL5qxLfFhXiUYB1OLHdKEjco94SgDgVsACAAAAAAK2GVGvyPIKolF/ECcmfmkVcf1/IZNcaTv96N92yGrkEAAzQ3AH0AAAAFZAAgAAAAAMoAoiAn1kc79j5oPZtlMWHMhhgwNhLUnvqkqIFvcH1NBXMAIAAAAADcJTW7WiCyW0Z9YDUYwppXhLj4Ac1povpJvcAq+i48MQVsACAAAAAAIGxGDzoeB3PTmudl4+j6piQB++e33EEzuzAiXcqGxvUAAzQ4AH0AAAAFZAAgAAAAACI3j5QP7dWHpcT6WO/OhsWwRJNASBYqIBDNzW8IorEyBXMAIAAAAABxUpBSjXwCKDdGP9hYU+RvyR+96kChfvyyRC4jZmztqAVsACAAAAAAvBCHguWswb4X0xdcAryCvZgQuthXzt7597bJ5VxAMdgAAzQ5AH0AAAAFZAAgAAAAAKsbycEuQSeNrF8Qnxqw3x3og8JmQabwGqnDbqzFRVrrBXMAIAAAAACno/3ef2JZJS93SVVzmOZSN+jjJHT8s0XYq2M46d2sLAVsACAAAAAAAt5zLJG+/j4K8rnkFtAn8IvdUVNefe6utJ3rdzgwudIAAzUwAH0AAAAFZAAgAAAAAPXIcoO8TiULqlxzb74NFg+I8kWX5uXIDUPnh2DobIoMBXMAIAAAAADR6/drkdTpnr9g1XNvKDwtBRBdKn7c2c4ZNUVK5CThdQVsACAAAAAAJqOA1c6KVog3F4Hb/GfDb3jCxXDRTqpXWSbMH4ePIJsAAzUxAH0AAAAFZAAgAAAAAEa03ZOJmfHT6/nVadvIw71jVxEuIloyvxXraYEW7u7pBXMAIAAAAADzRlBJK75FLiKjz3djqcgjCLo/e3yntI3MnPS48OORhgVsACAAAAAAnQhx4Rnyj081XrLRLD5NLpWmRWCsd0M9Hl7Jl19R0h8AAzUyAH0AAAAFZAAgAAAAAKx8NLSZUU04pSSGmHa5fh2oLHsEN5mmNMNHL95/tuC9BXMAIAAAAAA59hcXVaN3MNdHoo11OcH1aPRzHCwpVjO9mGfMz4xh3QVsACAAAAAAYIPdjV2XbPj7dBeHPwnwhVU7zMuJ+xtMUW5mIOYtmdAAAzUzAH0AAAAFZAAgAAAAAHNKAUxUqBFNS9Ea9NgCZoXMWgwhP4x0/OvoaPRWMquXBXMAIAAAAABUZ551mnP4ZjX+PXU9ttomzuOpo427MVynpkyq+nsYCQVsACAAAAAALnVK5p2tTTeZEh1zYt4iqKIQT9Z0si//Hy1L85oF+5IAAzU0AH0AAAAFZAAgAAAAALfGXDlyDVcGaqtyHkLT0qpuRhJQLgCxtznazhFtuyn/BXMAIAAAAABipxlXDq14C62pXhwAeen5+syA+/C6bN4rtZYcO4zKwAVsACAAAAAAXUf0pzUq0NhLYagWDap4uEiwq5rLpcx29rWbt1NYMsMAAzU1AH0AAAAFZAAgAAAAANoEr8sheJjg4UCfBkuUzarU9NFoy1xwbXjs5ifVDeA9BXMAIAAAAABPoyTf6M+xeZVGES4aNzVlq7LgjqZXJ/QunjYVusGUEAVsACAAAAAA1hA2gMeZZPUNytk9K+lB1RCqWRudRr7GtadJlExJf8oAAzU2AH0AAAAFZAAgAAAAAKvDiK+xjlBe1uQ3SZTNQl2lClIIvpP/5CHwY6Kb3WlgBXMAIAAAAAANnxImq5MFbWaRBHdJp+yD09bVlcFtiFDYsy1eDZj+iQVsACAAAAAAWtsyO+FxMPSIezwsV1TJD8ZrXAdRnQM6DJ+f+1V3qEkAAzU3AH0AAAAFZAAgAAAAAF49IlFH9RmSUSvUQpEPUedEksrQUcjsOv44nMkwXhjzBXMAIAAAAADJtWGbk0bZzmk20obz+mNsp86UCu/nLLlbg7ppxYn7PgVsACAAAAAA3k0Tj/XgPQtcYijH8cIlQoe/VXf15q1nrZNmg7yWYEgAAzU4AH0AAAAFZAAgAAAAAOuSJyuvz50lp3BzXlFKnq62QkN2quNU1Gq1IDsnFoJCBXMAIAAAAAAqavH1d93XV3IzshWlMnzznucadBF0ND092/2ApI1AcAVsACAAAAAAzUrK4kpoKCmcpdZlZNI13fddjdoAseVe67jaX1LobIIAAzU5AH0AAAAFZAAgAAAAALtgC4Whb4ZdkCiI30zY6fwlsxSa7lEaOAU3SfUXr02XBXMAIAAAAACgdZ6U1ZVgUaZZwbIaCdlANpCw6TZV0bwg3DS1NC/mnAVsACAAAAAAzI49hdpp0PbO7S2KexISxC16sE73EUAEyuqUFAC/J48AAzYwAH0AAAAFZAAgAAAAAF6PfplcGp6vek1ThwenMHVkbZgrc/dHgdsgx1VdPqZ5BXMAIAAAAACha3qhWkqmuwJSEXPozDO8y1ZdRLyzt9Crt2vjGnT7AAVsACAAAAAA7nvcU59+LwxGupSF21jAeAE0x7JE94tjRkJfgM1yKU8AAzYxAH0AAAAFZAAgAAAAAKoLEhLvLjKc7lhOJfx+VrGJCx9tXlOSa9bxQzGR6rfbBXMAIAAAAAAIDK5wNnjRMBzET7x/KAMExL/zi1IumJM92XTgXfoPoAVsACAAAAAAFkUYWFwNr815dEdFqp+TiIozDcq5IBNVkyMoDjharDQAAzYyAH0AAAAFZAAgAAAAADoQv6lutRmh5scQFvIW6K5JBquLxszuygM1tzBiGknIBXMAIAAAAADAD+JjW7FoBQ76/rsECmmcL76bmyfXpUU/awqIsZdO+wVsACAAAAAAPFHdLw3jssmEXsgtvl/RBNaUCRA1kgSwsofG364VOvQAAzYzAH0AAAAFZAAgAAAAAJNHUGAgn56KekghO19d11nai3lAh0JAlWfeP+6w4lJBBXMAIAAAAAD9XGJlvz59msJvA6St9fKW9CG4JoHV61rlWWnkdBRLzwVsACAAAAAAxwP/X/InJJHmrjznvahIMgj6pQR30B62UtHCthSjrP0AAzY0AH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzY1AH0AAAAFZAAgAAAAANpIljbxHOM7pydY877gpRQvYY2TGK7igqgGsavqGPBABXMAIAAAAAAqHyEu9gpurPOulApPnr0x9wrygY/7mXe9rAC+tPK80wVsACAAAAAA7gkPzNsS3gCxdFBWbSW9tkBjoR5ib+saDvpGSB3A3ogAAzY2AH0AAAAFZAAgAAAAAGR+gEaZTeGNgG9BuM1bX2R9ed4FCxBA9F9QvdQDAjZwBXMAIAAAAABSkrYFQ6pf8MZ1flgmeIRkxaSh/Eep4Btdx4QYnGGnwAVsACAAAAAApRovMiV00hm/pEcT4XBsyPNw0eo8RLAX/fuabjdU+uwAAzY3AH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzY4AH0AAAAFZAAgAAAAADgyPqQdqQrgfmJjRFAILTHzXbdw5kpKyfeoEcy6YYG/BXMAIAAAAAAE+3XsBQ8VAxAkN81au+f3FDeCD/s7KoZD+fnM1MJSSAVsACAAAAAAhRnjrXecwV0yeCWKJ5J/x12Xx4qVJahsCEVHB/1U2rcAAzY5AH0AAAAFZAAgAAAAAI0CT7JNngTCTUSei1Arw7eHWCD0jumv2rb7imjWIlWABXMAIAAAAABSP8t6ya0SyCphXMwnru6ZUDXWElN0NfBvEOhDvW9bJQVsACAAAAAAGWeGmBNDRaMtvm7Rv+8TJ2sJ4WNXKcp3tqpv5Se9Ut4AAzcwAH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcxAH0AAAAFZAAgAAAAAHIkVuNDkSS1cHIThKc/O0r2/ubaABTOi8Q1r/dvBAsEBXMAIAAAAADdHYqchEiJLM340c3Q4vJABmmth3+MKzwLYlsG6GS7sQVsACAAAAAADa+KP/pdTiG22l+ZWd30P1iHjnBF4zSNRdFm0oEK82kAAzcyAH0AAAAFZAAgAAAAAJmoDILNhC6kn3masElfnjIjP1VjsjRavGk1gSUIjh1NBXMAIAAAAAD97Ilvp3XF8T6MmVVcxMPcdL80RgQ09UoC6PnoOvZ1IQVsACAAAAAA2RK3Xng6v8kpvfVW9tkVXjpE+BSnx9/+Fw85Evs+kUEAAzczAH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzc0AH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzc1AH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzc2AH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzc3AH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzc4AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzc5AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzgwAH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzgxAH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzgyAH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzgzAH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzg0AH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzg1AH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzg2AH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzg3AH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzg4AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzg5AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzkwAH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzkxAH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzkyAH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzkzAH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzk0AH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzk1AH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzk2AH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzk3AH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzk4AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzk5AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzEwMAB9AAAABWQAIAAAAADJDdC9aEFl4Y8J/awHbnXGHjfP+VXQilPHJg7ewaJI7AVzACAAAAAAE+tqRl6EcBMXvbr4GDiNIYObTsYpa1n6BJk9EjIJVicFbAAgAAAAAJVc+HYYqa0m1Hq6OiRX8c0iRnJYOt6AJAJoG0sG3GMSAAMxMDEAfQAAAAVkACAAAAAA3F9rjEKhpoHuTULVGgfUsGGwJs3bISrXkFP1v6KoQLgFcwAgAAAAAIBf0tXw96Z/Ds0XSIHX/zk3MzUR/7WZR/J6FpxRWChtBWwAIAAAAABWrjGlvKYuTS2s8L9rYy8Hf0juFGJfwQmxVIjkTmFIGQADMTAyAH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzEwMwB9AAAABWQAIAAAAACMtPm12YtdEAvqu6Eji1yuRXnu1RJP6h0l7pH3lSH4MwVzACAAAAAAENyCFfyUAh1veQBGx+cxiB7Sasrj41jzCGflZkB5cRMFbAAgAAAAAKdI2LMqISr/T5vuJPg6ZRBm5fVi2aQCc4ra3A4+AjbDAAMxMDQAfQAAAAVkACAAAAAAvlI4lDcs6GB1cnm/Tzo014CXWqidCdyE5t2lknWQd4QFcwAgAAAAAD60SpNc4O2KT7J0llKdSpcX1/Xxs97N715a1HsTFkmBBWwAIAAAAABuuRkJWAH1CynggBt1/5sPh9PoGiqTlS24D/OE2uHXLQADMTA1AH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzEwNgB9AAAABWQAIAAAAABb6LXDWqCp1beQgQjj8I3sRTtFhlrmiBi+h/+ikmrvugVzACAAAAAA9stpgTecT7uTyaGNs3K9Bp0A7R0QaIAOfscyMXHBPX8FbAAgAAAAAHUt+McyXrJ1H8SwnHNVO181Ki8vDAM1f7XI26mg95ZDAAMxMDcAfQAAAAVkACAAAAAA97NTT+81PhDhgptNtp4epzA0tP4iNb9j1AWkiiiKGM8FcwAgAAAAAKPbHg7ise16vxmdPCzksA/2Mn/qST0L9Xe8vnQugVkcBWwAIAAAAABB0EMXfvju4JU/mUH/OvxWbPEl9NJkcEp4iCbkXI41fAADMTA4AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzEwOQB9AAAABWQAIAAAAADQnslvt6Hm2kJPmqsTVYQHE/wWeZ4bE1XSkt7TKy0r1gVzACAAAAAA8URTA4ZMrhHPvlp53TH6FDCzS+0+61qHm5XK6UiOrKEFbAAgAAAAAHQbgTCdZcbdA0avaTmZXUKnIS7Nwf1tNrcXDCw+PdBRAAMxMTAAfQAAAAVkACAAAAAAhujlgFPFczsdCGXtQ/002Ck8YWQHHzvWvUHrkbjv4rwFcwAgAAAAALbV0lLGcSGfE7mDM3n/fgEvi+ifjl7WZ5b3aqjDNvx9BWwAIAAAAACbceTZy8E3QA1pHmPN5kTlOx3EO8kJM5PUjTVftw1VpgADMTExAH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzExMgB9AAAABWQAIAAAAACfw9/te4GkHZAapC9sDMHHHZgmlTrccyJDPFciOMSOcwVzACAAAAAAIIC1ZpHObvmMwUfqDRPl4C1aeuHwujM1G/yJbvybMNAFbAAgAAAAAAs9x1SnVpMfNv5Bm1aXGwHmbbI9keWa9HRD35XuCBK5AAMxMTMAfQAAAAVkACAAAAAAkxHJRbnShpPOylLoDdNShfILeA1hChKFQY9qQyZ5VmsFcwAgAAAAAKidrY+rC3hTY+YWu2a7fuMH2RD/XaiTIBW1hrxNCQOJBWwAIAAAAACW0kkqMIzIFMn7g+R0MI8l15fr3k/w/mHtY5n6SYTEwAADMTE0AH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzExNQB9AAAABWQAIAAAAABxMy7X5hf7AXGDz3Y/POu1ZpkMlNcSvSP92NOO/Gs7wAVzACAAAAAAHJshWo2T5wU2zvqCyJzcJQKQaHFHpCpMc9oWBXkpUPoFbAAgAAAAAGeiJKzlUXAvL0gOlW+Hz1mSa2HsV4RGmyLmCHlzbAkoAAMxMTYAfQAAAAVkACAAAAAAlqbslixl7Zw3bRlibZbe/WmKw23k8uKeIzPKYEtbIy0FcwAgAAAAAHEKwpUxkxOfef5HYvulXPmdbzTivwdwrSYIHDeNRcpcBWwAIAAAAADuPckac21Hrg/h0kt5ShJwVEZ9rx6SOHd2+HDjqxEWTQADMTE3AH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzExOAB9AAAABWQAIAAAAAAm83FA9yDUpwkbKTihe7m53u+DivS9BU2b4vQMtCVQ2AVzACAAAAAAz3m1UB/AbZPa4QSKFDnUgHaT78+6iGOFAtouiBorEgEFbAAgAAAAAIgbpyYtJj5513Z5XYqviH/HXG/5+mqR52iBbfqMmDtZAAMxMTkAfQAAAAVkACAAAAAAJRzYK0PUwr9RPG2/7yID0WgcTJPB2Xjccp5LAPDYunkFcwAgAAAAAIIh24h3DrltAzNFhF+MEmPrZtzr1PhCofhChZqfCW+jBWwAIAAAAAAzRNXtL5o9VXMk5D5ylI0odPDJDSZZry1wfN+TedH70gADMTIwAH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzEyMQB9AAAABWQAIAAAAAAC/I4TQRtCl12YZmdGz17X4GqSQgfwCPgRBwdHmdwu+QVzACAAAAAAx8f3z2ut/RAZhleari4vCEE+tNIn4ikjoUwzitfQ588FbAAgAAAAAJci0w1ZB8W2spJQ+kMpod6HSCtSR2jrabOH+B0fj3A4AAMxMjIAfQAAAAVkACAAAAAADGB5yU2XT0fse/MPWgvBvZikVxrl5pf3S5K1hceKWooFcwAgAAAAAIxTmlLHMjNaVDEfJbXvRez0SEPWFREBJCT6qTHsrljoBWwAIAAAAAAlswzAl81+0DteibwHD+CG5mZJrfHXa9NnEFRtXybzzwADMTIzAH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzEyNAB9AAAABWQAIAAAAAAfPUoy7QyZKhIIURso+mkP9qr1izbjETqF5s22GwjCjAVzACAAAAAAvLMsIDQ/go4VUxeh50UHmsvMvfx51cwyONnRD2odvC0FbAAgAAAAAKMb+1CodEalAFnDrEL1Ndt8ztamZ+9134m9Kp3GQgd+AAMxMjUAfQAAAAVkACAAAAAAE3ZqUar0Bq2zWbARE0bAv98jBlK9UJ73/xcwdMWWlSkFcwAgAAAAAK4M+MmC+9sFiFsumMyJZQKxWmmJiuG9H7IzKw083xxkBWwAIAAAAAAqkAONzhvMhkyL1D/6h7QQxEkdhC3p2WjXH+VGq5qCqQADMTI2AH0AAAAFZAAgAAAAAMo8FJiOq63cAmyk2O7eI7GcbQh/1j4RrMTqly3rexftBXMAIAAAAADjVmpd0WiRGTw/gAqEgGolt2EI7Csv14vKdmYoMD0aAgVsACAAAAAA07XQBzBUQMNw7F2/YxJjZNuPVpHTTgbLd1oGk77+bygAAzEyNwB9AAAABWQAIAAAAACu5IGaIx7A3Jvly/kzlCsSA4s3iJwuIl8jEdRH0k93NwVzACAAAAAA9NRUyxYE+t0Xyosyt6vIfMFW/vBoYg6sR+jBNs4JAxIFbAAgAAAAAAzyZ91dx+0oMlOVAjRGiMrPySikY/U9eMEB4WJb3uWtAAMxMjgAfQAAAAVkACAAAAAALkRy0GJInXYLA+cgjs6Myb0a+Gu9hgXhHvhLNoGWfckFcwAgAAAAANbALyt9zCSvwnLaWCd2/y2eoB7qkWTvv1Ldu8r40JPuBWwAIAAAAAD4Fl5bV5sz4isIE9bX+lmAp+aAKaZgVYVZeVfrItkCZAADMTI5AH0AAAAFZAAgAAAAAGoUK/DSWhT8LZhszSUqDbTrp8cSA7rdqmADKL+MILtTBXMAIAAAAABHnEE9bVa6lvhfhEMkkV2kzSSxH/sMW/FIJuw3CzWs6wVsACAAAAAAanavcBdqZxgRGKvEK95wTmeL1K1CeDSXZsXUAs81uOgAAzEzMAB9AAAABWQAIAAAAAC922ZDQE3h2fQKibGMZ9hV0WNlmrPYYSdtaSyYxsWYqgVzACAAAAAAagMovciKK6WVjIc2cCj8nK5O/gVOFFVeVAJpRp89tmQFbAAgAAAAAKcTFfPQzaFiAtSFhqbN02sCE1BKWJSrRfGN5L6oZwzkAAMxMzEAfQAAAAVkACAAAAAAtK+JqX3K/z2txjAU15DgX4y90DS2YLfIJFolCOkJJJwFcwAgAAAAAMnR5V7gfX7MNqqUdL5AkWlkhyFXaBRVNej+Rcn8lrQkBWwAIAAAAAA2cDNRXZuiC241TGRvdFyctJnrNcdbZOP9zHio81tkngADMTMyAH0AAAAFZAAgAAAAAAeGrIMK/bac6kPczxbvRYqKMkcpeI2FjdMpD91FDWIvBXMAIAAAAAAix62z1LeS8yvSXCl5gHSIomjyx76fF3S1lp9k900hygVsACAAAAAAiYwzf2m71aWFD5ajcXyW2JX2EzQOkBroTGMg29nLPYIAAzEzMwB9AAAABWQAIAAAAACphf298InM0Us4HT8o1W1MGw0D/02vd7Jh+U0h7qaFaQVzACAAAAAAFXtk7YpqsOJxsqGWSIL+YcBE96G3Zz9D31gPqDW94y8FbAAgAAAAAAOrS1KVA94rjB1jZ1pPocpCeBG+B14RzWoHqVDpp7JbAAMxMzQAfQAAAAVkACAAAAAATLDS2cuDVM3yDMuWNgk2iGKBTzPpfJMbvxVOSY39ZfcFcwAgAAAAAPT5wRi2cLHIUflXzm6EQB/m7xdThP80ir1VV/JBBqvxBWwAIAAAAAB9lEtZS0aXCFbCtSbhnis27S5IPcfWGygHW8AHn3QqzwADMTM1AH0AAAAFZAAgAAAAAJNjExiZVX7jfFGfYpQu16qxLN0YPqVU/5CQ/Y67YSinBXMAIAAAAABMpm2+6KrkRUlXzQoMPHrQmIO6dkQz66tYdfTeA3dKqQVsACAAAAAAFXobHiMLvNZuEPr8jtewCX2J93EZG3JNeyVg92fue6YAAzEzNgB9AAAABWQAIAAAAABlFkYtLCx901X6QVVMkSn6Z7k30UF4xHaA0OZJJ9bdyQVzACAAAAAATez+F9GHcGzTp7jjv4feboUNb8JCkIp4EqcPFisnq7MFbAAgAAAAACE7JvOpBgMoZ7kRd4QbxIhxukPTUxXpzhjnBHiR7XoRAAMxMzcAfQAAAAVkACAAAAAA8NJKN0IxZnruhswGQkiruv8Ih0EMwDcSZx/Xasup9dkFcwAgAAAAAKaJZRxzA+Igeydvuk6cSwUHXcrmT4PjhuPu//FslpdnBWwAIAAAAAD53Rok1Vq/PMAnXmarqoHJ0PEyYUBmVESa9hIpCv/G9QADMTM4AH0AAAAFZAAgAAAAABHxHdEClz7hbSSgE58+dWLlSMJnoPz+jFxp4bB1GmLQBXMAIAAAAAD3nSvT6aGD+A110J/NwEfp0nPutlmuB5B+wA3CC3noGAVsACAAAAAA3Apjd+TapONB7k5wBVwTWgn8t+Sq2oyyU5/+as109RcAAzEzOQB9AAAABWQAIAAAAAC/o8qW/ifk3KuJ01VFkyNLgQafxB5/bGs2G5VyyVafOwVzACAAAAAA1bMqAFGDHSl6BYNLbxApvkAv2K1/oafywiX0MDz1dGUFbAAgAAAAAHJXLlId3edFoniLD/9K2A5973MeP2Ro31flDyqm3l5QAAMxNDAAfQAAAAVkACAAAAAAY2V8I1bz3a1AxTtmED6UhdhA09huFkuuEX8R+d/WDPUFcwAgAAAAAPTVoNRiI76tcRKqd+JBBVyy4+YcKST42p0QX2BtmQ2VBWwAIAAAAACcxt9hg14WqPNiDv1MkqVljM2e2KJEv53lA17LhV6ZigADMTQxAH0AAAAFZAAgAAAAAO2kSsW0WGN9AOtK4xK2SHrGhWiaAbMEKT4iZkRpaDN/BXMAIAAAAABKGzQcPM8LT2dwOggxoWjv/1imYWabbG/G4kBw8OWaxAVsACAAAAAAC9hLK1dScQTAqg+YAG3ObdPzg2Xet57HmOFpGmyUR9UAAzE0MgB9AAAABWQAIAAAAAAiCwzNEEaH/mDam68IdDftnhthyUFdb+ZCNSBQ91WlHQVzACAAAAAA7tHyHcxCzmbJeFYZyPm4mEgkTGKOvwY4MX82OvH0Jn8FbAAgAAAAAAb5IAbZ1hXCNegQ+S+C9i/Z8y6sS8KeU04V6hXa2ml6AAMxNDMAfQAAAAVkACAAAAAAGuCHVNJSuoVkpPOnS5s89GuA+BLi2IPBUr2Bg1sWEPIFcwAgAAAAAEl1gncS5/xO7bQ/KQSstRV3rOT2SW6nV92ZANeG2SR6BWwAIAAAAAA9LOcKmhek8F2wAh8yvT/vjp2gaouuO+Hmv10lwAeWPAADMTQ0AH0AAAAFZAAgAAAAAMfxz7gEaoCdPvXrubDhCZUS0ARLZc1svgbXgMDlVBPgBXMAIAAAAAB6a5dDA3fuT5Vz2KvAcbUEFX/+B7Nw2p1QqbPoQ5TTuAVsACAAAAAAcf/y75UOuI62A6vWH7bYr/5Jz+nirZVYK/81trN6XOQAAzE0NQB9AAAABWQAIAAAAACnYsqF/VzmjIImC9+dqrHO1TM6lJ6fRwM0mM6Wf6paOwVzACAAAAAA5tgZzch8uDCR1ky3SllVaKVpxAlbrhvlNDTazZZRZOAFbAAgAAAAALeGiLJS4z2zhgVpxzyPdRYyACP9QzQBOob34YrIZumCAAMxNDYAfQAAAAVkACAAAAAAEC0sIVmadtW4YMuRXH7RpAhXclsd+3bmqGXCMeaT014FcwAgAAAAABPpXh0uzpsJJB+IRUNajmMB9WGwswfpw5T9xk3Xj6ANBWwAIAAAAAAmf+NYh9TZ/QRu3w/GQz66n7DtfbJijN3G7KzeL8lstAADMTQ3AH0AAAAFZAAgAAAAABaIB3n49Xm9cOafSrQsE0WCcYp8rMIO/qVwIlMF5YLRBXMAIAAAAAC9EyWJV3xOu9bzgdJ/yX+ko7qLf1u3AxNMataW2C9EzQVsACAAAAAAvVbDkLxXx2DcMLifIQ3K0IIJcLcAG9DUrNfI6aoUjNcAAzE0OAB9AAAABWQAIAAAAAA5rZItA/cocRnngYqcJ3nBXQ+l688aKz3EQyLbYYunPAVzACAAAAAAwKyA+L7TgxztPClLrIMk2JXR+w7c04N3ZOqPgjvrIvsFbAAgAAAAACzvZ33h6aWEe8hmo+1f6OXJ72FY5hvWaUuha64ZV3KFAAMxNDkAfQAAAAVkACAAAAAA3htn7oHJ0YYpIrs+Mzyh85Ys67HwAdv5LQl1mCdoMWkFcwAgAAAAAEHjCtNNLenHuSIYux6ezAHsXDaj2DlTF67ToDhDDe6HBWwAIAAAAAD+P4H0sk9jOd+7vOANt2/1Ectb+4ZRGPE8GkHWNXW3MgADMTUwAH0AAAAFZAAgAAAAAEnt18Km/nqggfIJWxzTr9r3hnXNaueG6XO9A5G11LnGBXMAIAAAAAD7QxzGMN/ard5TfFLecE6uusMmXG2+RBsBR+/NCQHUwAVsACAAAAAAQEZ1ZZ8GC8rdbg7s87OM5Gr9qkTXS9+P5DuAZxj5Gl4AAzE1MQB9AAAABWQAIAAAAAAVAKK/GoY8AACu/hyMpO4hdLq6JnEyWNzkyci9sbaD/wVzACAAAAAA2HmeqpMlvvBpV2zQTYIRmsc4MFlfHRwLof0ycJgMg/MFbAAgAAAAACdltCeWi5E/q1Li1eXLChpM2D9QQSGLBZ82NklQSc0oAAMxNTIAfQAAAAVkACAAAAAAhHyq1GQC/GiMwpYjcsfkNxolJ10ARKjIjfkW1Wipzi0FcwAgAAAAAD/uaGWxTDq87F8XZ6CrFI+RNa8yMqfSZdqK00Kj833BBWwAIAAAAAD6aEdOO0CsQGagioOCvANPCEHSpJ8BSixlPBq5ERhB7AADMTUzAH0AAAAFZAAgAAAAABAJJxHoZD+MQBWqm9UM9Dd3z5ZohIZGWRaRVRsMptKQBXMAIAAAAADrE/ca+gqj/SH4oao4wE4qn2ovoTydzcMbDbrfnUs3zAVsACAAAAAAeNCIQN6hVnGJinytQRFGlQ2ocoprXNqpia+BSxzl+uwAAzE1NAB9AAAABWQAIAAAAAAv01wz7VG9mTepjXQi6Zma+7b/OVBaKVkWNbgDLr1mFgVzACAAAAAA0I5sxz8r6wkCp5Tgvr+iL4p6MxSOq5d3e1kZG+0b7NkFbAAgAAAAAIA32v6oGkAOS96HexGouNTex+tLahtx9QF2dgGClk6WAAMxNTUAfQAAAAVkACAAAAAAWXecRwxSon68xaa9THXnRDw5ZfzARKnvvjTjtbae6T0FcwAgAAAAAPh0UfUMEo7eILCMv2tiJQe1bF9qtXq7GJtC6H5Va4fIBWwAIAAAAADqFr1ThRrTXNgIOrJWScO9mk86Ufi95IDu5gi4vP+HWQADMTU2AH0AAAAFZAAgAAAAAEY5WL8/LpX36iAB1wlQrMO/xHVjoO9BePVzbUlBYo+bBXMAIAAAAABoKcpadDXUARedDvTmzUzWPe1jTuvD0z9oIcZmKuiSXwVsACAAAAAAJuJbwuaMrAFoI+jU/IYr+k4RzAqITrOjAd3HWCpJHqEAAzE1NwB9AAAABWQAIAAAAADnJnWqsfx0xqNnqfFGCxIplVu8mXjaHTViJT9+y2RuTgVzACAAAAAAWAaSCwIXDwdYxWf2NZTly/iKVfG/KDjHUcA1BokN5sMFbAAgAAAAAJVxavipE0H4/JQvhagdytXBZ8qGooeXpkbPQ1RfYMVHAAMxNTgAfQAAAAVkACAAAAAAsPG7LaIpJvcwqcbtfFUpIjj+vpNj70Zjaw3eV9T+QYsFcwAgAAAAAJQ71zi0NlCyY8ZQs3IasJ4gB1PmWx57HpnlCf3+hmhqBWwAIAAAAACD58TO6d+71GaOoS+r73rAxliAO9GMs4Uc8JbOTmC0OwADMTU5AH0AAAAFZAAgAAAAAAGiSqKaQDakMi1W87rFAhkogfRAevnwQ41onWNUJKtuBXMAIAAAAAASgiDpXfGh7E47KkOD8MAcX8+BnDShlnU5JAGdnPdqOAVsACAAAAAAI+2TTQIgbFq4Yr3lkzGwhG/tqChP7hRAx2W0fNaH6jcAAzE2MAB9AAAABWQAIAAAAAB7L4EnhjKA5xJD3ORhH2wOA1BvpnQ+7IjRYi+jjVEaJAVzACAAAAAAuhBIm0nL3FJnVJId+7CKDASEo+l2E89Z9/5aWSITK4AFbAAgAAAAALtSICOzQDfV9d+gZuYxpEj6cCeHnKTT+2G3ceP2H65kAAMxNjEAfQAAAAVkACAAAAAAaROn1NaDZFOGEWw724dsXBAm6bgmL5i0cki6QZQNrOoFcwAgAAAAANVT8R6UvhrAlyqYlxtmnvkR4uYK/hlvyQmBu/LP6/3ZBWwAIAAAAAD+aHNMP/X+jcRHyUtrCNkk1KfMtoD3GTmShS8pWGLt+AADMTYyAH0AAAAFZAAgAAAAADqSR5e0/Th59LrauDA7OnGD1Xr3H3NokfVxzDWOFaN7BXMAIAAAAACt30faNwTWRbvmykDpiDYUOCwA6QDbBBYBFWS7rdOB4AVsACAAAAAAF7SvnjjRk5v2flFOKaBAEDvjXaL1cpjsQLtK2fv9zdQAAzE2MwB9AAAABWQAIAAAAADmtb1ZgpZjSeodPG/hIVlsnS8hoRRwRbrTVx89VwL62AVzACAAAAAAi38e1g6sEyVfSDkzZbaZXGxKI/zKNbMasOl2LYoWrq8FbAAgAAAAAALACk0KcCDN/Kv8WuazY8ORtUGkOZ5Dsm0ys1oOppp/AAMxNjQAfQAAAAVkACAAAAAAf/f7AWVgBxoKjr7YsEQ4w/fqSvuQWV2HMiA3rQ7ur0sFcwAgAAAAADkkeJozP6FFhUdRIN74H4UhIHue+eVbOs1NvbdWYFQrBWwAIAAAAAB55FlHAkmTzAYj/TWrGkRJw2EhrVWUnZXDoMYjyfB/ZwADMTY1AH0AAAAFZAAgAAAAAI2WEOymtuFpdKi4ctanPLnlQud+yMKKb8p/nfKmIy56BXMAIAAAAADVKrJmhjr1rfF3p+T+tl7UFd1B7+BfJRk0e7a4im7ozgVsACAAAAAA5E7Ti3PnFiBQoCcb/DN7V1uM3Xd6VKiexPKntssFL7kAAzE2NgB9AAAABWQAIAAAAAAuHU9Qd79hjyvKOujGanSGDIQlxzsql8JytTZhEnPw+AVzACAAAAAAjF2gV/4+sOHVgDd/oR5wDi9zL7NGpGD+NsEpGXy/a4QFbAAgAAAAAJzMoyojYV6Ed/LpVN5zge93Odv3U7JgP7wxeRaJZGTdAAMxNjcAfQAAAAVkACAAAAAA7dQDkt3iyWYCT94d7yqUtPPwp4qkC0ddu+HFdHgVKEkFcwAgAAAAANuYvtvZBTEq4Rm9+5eb7VuFopowkrAuv86PGP8Q8/QvBWwAIAAAAACeqXoAOQOE4j0zRMlkVd8plaW0RX1npsFvB38Xmzv7sAADMTY4AH0AAAAFZAAgAAAAAAwnZSDhL4tNGYxlHPhKYB8s28dY5ScSwiKZm3UhT8U3BXMAIAAAAABDoY6dhivufTURQExyC9Gx3ocpl09bgbbQLChj3qVGbgVsACAAAAAAF+1nS7O0v85s3CCy+9HkdeoEfm2C6ZiNbPMMnSfsMHUAAzE2OQB9AAAABWQAIAAAAAC2VuRdaC4ZJmLdNOvD6R2tnvkyARteqXouJmI46V306QVzACAAAAAAMn1Z6B35wFTX9mEYAPM+IiJ5hauEwfD0CyIvBrxHg7IFbAAgAAAAAOG6DvDZkT9B/xZWmjao2AevN7MMbs3Oh9YJeSd/hZ+hAAMxNzAAfQAAAAVkACAAAAAAVerb7qVNy457rNOHOgDSKyWl5ojun7iWrv1uHPXrIZQFcwAgAAAAAIDcYS9j5z+gx0xdJj09L7876r/vjvKTi/d3bXDE3PhyBWwAIAAAAADuhVLqb1Bkrx8aNymS+bx2cL8GvLFNH4SAi690DUgnWQADMTcxAH0AAAAFZAAgAAAAAH/E44yLxKCJjuSmU9A8SEhbmkDOx1PqqtYcZtgOzJdrBXMAIAAAAABgLh9v2HjBbogrRoQ82LS6KjZQnzjxyJH4PH+F3jupSAVsACAAAAAAIlO46ehXp4TqpDV0t6op++KO+uWBFh8iFORZjmx2IjkAAzE3MgB9AAAABWQAIAAAAAAlNUdDL+f/SSQ5074mrq0JNh7CTXwTbbhsQyDwWeDVMwVzACAAAAAANIH2IlSNG0kUw4qz0budjcWn8mNR9cJlYUqPYdonucAFbAAgAAAAAJMrOUOyiu5Y3sV76zwEFct8L7+i8WGlQI2+8z2W2kzaAAMxNzMAfQAAAAVkACAAAAAASZ+CvUDtlk/R4HAQ3a+PHrKeY/8ifAfh0oXYFqliu80FcwAgAAAAAJelpzPgM65OZFt/mvGGpwibclQ49wH+1gbUGzd9OindBWwAIAAAAAD9qeDchteEpVXWcycmD9kl9449C1dOw0r60TBm5jK+cQADMTc0AH0AAAAFZAAgAAAAAN9fkoUVbvFV2vMNMAkak4gYfEnzwKI3eDM3pnDK5q3lBXMAIAAAAACnDkgVNVNUlbQ9RhR6Aot2nVy+U4km6+GHPkLr631jEAVsACAAAAAANzg/BnkvkmvOr8nS4omF+q9EG/4oisB+ul4YHi938hwAAzE3NQB9AAAABWQAIAAAAAASyK3b1nmNCMptVEGOjwoxYLLS9fYWm/Zxilqea0jpEQVzACAAAAAADDHsGrbqlKGEpxlvfyqOJKQJjwJrzsrB7k3HG0AUJbkFbAAgAAAAAKwx3S4XfDZh4+LuI9jf7XgUh5qiefNv87JD4qvVRfPSAAMxNzYAfQAAAAVkACAAAAAAlSP9iK31GlcG9MKGbLmq+VXMslURr+As736rrVNXcsUFcwAgAAAAAAvbj0zfq9zzi8XReheKFbCB+h9IsOLgXPPpI5vrEJNZBWwAIAAAAABXvoZhaQE7ogWjeBjceVkp03N20cKYP3TA8vuNsgpfAgADMTc3AH0AAAAFZAAgAAAAAOJNORH8Bev97gVU7y6bznOxJ+E6Qoykur1QP76hG1/7BXMAIAAAAAC+C1PtOOrSZgzBAGhr+dPe/kR0JUw9GTwLVNr61xC1aAVsACAAAAAAeA/L8MQIXkamaObtMPLpoDoi5FypA5WAPtMeMrgi0eQAAzE3OAB9AAAABWQAIAAAAAAKcHzLUomavInN6upPkyWhAqYQACP/vdVCIYpiy6U6HgVzACAAAAAATsR4KItY6R2+U7Gg6sJdaEcf58gjd1OulyWovIqfxKcFbAAgAAAAAFbm10ko67ahboAejQdAV0U2uA5OhZYdb8XUFJ8OL46LAAMxNzkAfQAAAAVkACAAAAAAqTOLiMpCdR59tLZzzIPqJvbCNvz2XQL9ust0qYaehtcFcwAgAAAAAArefox/3k5xGOeiw2m6NUdzuGxmPwcu5IFcj+jMwHgHBWwAIAAAAADLZGFJ7MQd5JXMgMXjqZO5LDLxcFClcXPlnRMWRn+1oAADMTgwAH0AAAAFZAAgAAAAAIPSqSeVzSRgNVNmrPYHmUMgykCY27NbdDUNhE5kx/SgBXMAIAAAAAAhX90nNfxyXmZe/+btZ7q6xMX4PFyj0paM1ccJ/5IUUQVsACAAAAAA419oHmD2W0SYoOMwhrhrp8jf68fg9hTkaRdCuVd3CN0AAzE4MQB9AAAABWQAIAAAAACLn5DxiqAosHGXIAY96FwFKjeqrzXWf3VJIQMwx1fl4gVzACAAAAAAindvU27nveutopdvuHmzdENBbeGFtI3Qcsr07jxmvm8FbAAgAAAAAPvl9pBStQvP4OGkN5v0MghUY6djm9n7XdKKfrW0l1sMAAMxODIAfQAAAAVkACAAAAAA7i2S6rHRSPBwZEn59yxaS7HiYBOmObIkeyCcFU42kf8FcwAgAAAAAGb3RSEyBmgarkTvyLWtOLJcPwCKbCRkESG4RZjVmY4iBWwAIAAAAADB2/wo5CSHR4ANtifY6ZRXNTO5+O8qP82DfAiAeanpZwADMTgzAH0AAAAFZAAgAAAAAFz+M+H/Z94mdPW5oP51B4HWptp1rxcMWAjnlHvWJDWrBXMAIAAAAACBFEOQyL7ZHu4Cq33QvXkmKuH5ibG/Md3RaED9CtG5HwVsACAAAAAAfggtJTprQ/yZzj7y5z9KvXsdeXMWP0yUXMMJqpOwI88AAzE4NAB9AAAABWQAIAAAAAAE7c2x3Z3aM1XGfLNk/XQ9jCazNRbGhVm7H8c2NjS5ywVzACAAAAAARJ9h8fdcwA19velF3L/Wcvi2rCzewlKZ2nA0p8bT9uwFbAAgAAAAAJtWe6b4wK2Hae2dZm/OEpYQnvoZjz4Sz5IgJC2wInecAAMxODUAfQAAAAVkACAAAAAAVoRt9B9dNVvIMGN+ea5TzRzQC+lqSZ8dd/170zU5o9cFcwAgAAAAAEwM95XZin5mv2yhCI8+ugtKuvRVmNgzzIQN0yi1+9aIBWwAIAAAAAAMGBq72n00rox3uqhxSB98mkenTGCdbbUF1gXrgottzgADMTg2AH0AAAAFZAAgAAAAAKRDkjyWv/etlYT4GyoXrmBED2FgZHnhc+l9Wsl06cH2BXMAIAAAAABohlpm3K850Vndf3NmNE0hHqDlNbSR8/IvMidQ3LnIZAVsACAAAAAAW42nGHa6q2MCAaaPVwaIDfr8QLyQwjKq23onZJYsqVsAAzE4NwB9AAAABWQAIAAAAAC3DFh5oklLCNLY90bgWm68dFXz65JpAZSp1K99MBTPAQVzACAAAAAAQgZecmxEUZVHoptEQClDwAf8smI3WynQ/i+JBP0g+kQFbAAgAAAAAEUSQGVnAPISD6voD0DiBUqyWKgt2rta0tjmoe+LNt6IAAMxODgAfQAAAAVkACAAAAAAQ5WKvWSB503qeNlOI2Tpjd5blheNr6OBO8pfJfPNstcFcwAgAAAAAKwHgQLSDJ5NwLBQbY5OnblQIsVDpGV7q3RCbFLD1U4/BWwAIAAAAACQ5nED99LnpbqXZuUOUjnO2HTphEAFBjLD4OZeDEYybgADMTg5AH0AAAAFZAAgAAAAAGfhFY3RGRm5ZgWRQef1tXxHBq5Y6fXaLAR4yJhrTBplBXMAIAAAAACKEF0ApLoB6lP2UqTFsTQYNc9OdDrs/vziPGzttGVLKQVsACAAAAAArOO6FyfNRyBi0sPT5iye7M8d16MTLcwRfodZq4uCYKEAAzE5MAB9AAAABWQAIAAAAAAIM73gPcgzgotYHLeMa2zAU4mFsr7CbILUZWfnuKSwagVzACAAAAAAJCSu98uV8xv88f2BIOWzt6p+6EjQStMBdkGPUkgN79cFbAAgAAAAAMGqPGMPxXbmYbVfSa/japvUljht1zZT33TY7ZjAiuPfAAMxOTEAfQAAAAVkACAAAAAAkWmHCUsiMy1pwZTHxVPBzPTrWFBUDqHNrVqcyyt7nO8FcwAgAAAAAMv2CebFRG/br7USELR98sIdgE9OQCRBGV5JZCO+uPMgBWwAIAAAAABt7qSmn3gxJu7aswsbUiwvO+G6lXj/Xhx+J/zQyZxzLAADMTkyAH0AAAAFZAAgAAAAAGInUYv0lP/rK7McM8taEHXRefk8Q2AunrvWqdfSV7UaBXMAIAAAAACE+WPxJ3gan7iRTbIxXXx+bKVcaf8kP4JD8DcwU0aL7wVsACAAAAAAUC4eTprX4DUZn2X+UXYU6QjtiXk+u57yoOPBbPQUmDkAAzE5MwB9AAAABWQAIAAAAACmHlg2ud3cplXlTsNTpvNnY6Qm1Fce0m899COamoDjaQVzACAAAAAArtJQeJIlepBWRU2aYar7+YGYVQ7dfDc1oxgTmA8r9q0FbAAgAAAAAOk45vg5VqZHAFCO3i0Z52SZi5RADf8NXwf68T5yad/DAAMxOTQAfQAAAAVkACAAAAAApzcWSAbZWV/Rq+ylRNqqlJqNVR4fhXrz4633/MQOQgcFcwAgAAAAAN/jz/bsEleiuCl+li83EWlG6UMHA8CyaOMRKCkXkSCPBWwAIAAAAAC3Sd+Qg+uFDKpGZHbrQgokXHQ1az1aFl4YK343OB6hcQAAEmNtAAAAAAAAAAAAABBwYXlsb2FkSWQAAAAAABBmaXJzdE9wZXJhdG9yAAEAAAASc3AAAQAAAAAAAAAQdGYAAQAAABNtbgD/////Y46NN8CHrb4J7f/fE214AP////9jjo03wIetvgnt/18A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalNoPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json new file mode 100644 index 0000000000..41ba49112b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Aggregate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json new file mode 100644 index 0000000000..bc4e1f4508 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 200, + "encryptedDecimalPrecision": { + "$numberDecimal": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lte": { + "$numberDecimal": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$lt": { + "$numberDecimal": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0.0" + }, + "$lt": { + "$numberDecimal": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberDecimal": "0.0" + }, + "$lte": { + "$numberDecimal": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDecimalPrecision": { + "$in": [ + { + "$numberDecimal": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json new file mode 100644 index 0000000000..1912f68ee5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Delete.json @@ -0,0 +1,539 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..9cf4488622 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate.json @@ -0,0 +1,651 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": { + "$numberInt": "0" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json new file mode 100644 index 0000000000..a9c3a8a46a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json new file mode 100644 index 0000000000..7f8ea38ae0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DecimalPrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DecimalPrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DecimalPrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDecimalPrecision": { + "$numberDecimal": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDecimalPrecision": { + "$numberDecimal": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDecimalPrecision": { + "$gt": { + "$numberDecimal": "0" + } + } + }, + "update": { + "$set": { + "encryptedDecimalPrecision": { + "$numberDecimal": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDecimalPrecision": { + "$gt": { + "$binary": { + "base64": "DRYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAABNtbgAAAAAAAAAAAAAAAAAAAD4wE214ANAHAAAAAAAAAAAAAAAAPjAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDecimalPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDecimalPrecision", + "bsonType": "decimal", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDecimal": "0.0" + }, + "max": { + "$numberDecimal": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json new file mode 100644 index 0000000000..cdbd169676 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Defaults.json @@ -0,0 +1,444 @@ +{ + "description": "fle2v2-Rangev2-Defaults", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range applies defaults for trimFactor and sparsity", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + } + ] + }, + { + "_id": { + "$numberInt": "1" + }, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DRgbAAADcGF5bG9hZADEGgAABGcAsBoAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAA30oqY6NKy1KWDWf6Z36DtA2QsL9JRALvHX6smxz8cb4FcwAgAAAAADIhM0hCHwFGH+k7kPGuZlO+v5TjV6RRwA5FqUKM60o0BWwAIAAAAABTMPNUweBKrILSCxc5gcgjn9pTkkKX7KqWXgNMk4q7XgADMgB9AAAABWQAIAAAAACnCDvYEbgR9fWeQ8SatKNX43p0XIXTyFfzc7/395V2swVzACAAAAAAp8pkn2wJrZRBLlD18oE1ZRRiujmtFtuHYTZDzdGNE4kFbAAgAAAAAE2eptD2Jp126h5cd7S6k8IjRB6QJhuuWzPU/SEynDXTAAMzAH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzQAfQAAAAVkACAAAAAA8Ci9z02yMVsDNyHvLStLAHR25LO22UO5P/gbUG/IStQFcwAgAAAAAOdfFhaFVq1JPr3dIeLm1EYKWgceZ7hZ5FJT5u/lL/I+BWwAIAAAAADqUyU1hSFDLCmqsz2dhPhefzCShUV/Z2x+4P9xcGw8rwADNQB9AAAABWQAIAAAAAD3g2atCWYVOXW0YbCbvIturqNIAsy210bkL9KmqVMlAAVzACAAAAAAVGEb7L0QCjV/PBTAvUyhlddo467ToKjlMdwI9hsjuE4FbAAgAAAAAJe0bDhUH1sZldnDGWn0xMa1CQuN6cgv/i/6XqnpPS39AAM2AH0AAAAFZAAgAAAAANQOKUE9FOmCoMva2IYg45LZXJX0cMpUR1OvIwFmjLDYBXMAIAAAAAB6dyIKkQ86l/8j8zeWcDYeVGRYKd0USz6To3LbOBAKsAVsACAAAAAAELK0ExI0g4/WxNs+mf+Ua+mie3MuMO3daPGukA23VUYAAzcAfQAAAAVkACAAAAAARQp+fGA08v1bhcnYbfsP0ubXl9yg18QmYMfh2sd8EdEFcwAgAAAAABhe79wEznE298tt02xyRF7bk7a2NH9kwVg1TPY5/lT1BWwAIAAAAAADiGV5f/RRPkwpSrZMGHNBSarmwyqV+SYXI73QW/PmnwADOAB9AAAABWQAIAAAAABnW3CpmSFTglPNKYHJHhJHC/vd5BMWQpztIXQBL0sCngVzACAAAAAAC21qRBu2Px7VUz1lW95Dfn/0tw2yq9AVBtka34HijLgFbAAgAAAAAP8S1s5OA5cJT6ILpA94LanuLsSl9BsRCWHBtufFTMVrAAM5AH0AAAAFZAAgAAAAAJRIWu6DI2LR+2Pi09OaBZEmS2FInyBnGs9wf9Jf2wiIBXMAIAAAAABoDqKzj11qyOfXl4dcfkmGHqZxXyAsnGlgA9wsJRWWUQVsACAAAAAAIsDousyo/D8e4BCwUqvFhrKtOnpcGCSqpN94oFtWaC0AAzEwAH0AAAAFZAAgAAAAAE0h7vfdciFBeqIk1N14ZXw/jzFT0bLfXcNyiPRsg4W4BXMAIAAAAAB0Kbvm3VLBphtd8/OpgNuJtJaJJLhHBCKZJJeK+GcthAVsACAAAAAAKfjHp8xww1JDjzyjTnfamOvjFDc1Z3Hp/v/ZuQnFOOEAAzExAH0AAAAFZAAgAAAAACL9+rQRyywIXa5Pr7g2SnB0s0EjIct7PQtzjEkA69acBXMAIAAAAADz54imCCbu/qQkYP9wW2f5pHoBS+EyCe+xuDwC0UTiYgVsACAAAAAAKv602j4c3Bpn2t10qGl68eAD/fQsIH5lKMj8ANwrf7oAAzEyAH0AAAAFZAAgAAAAAKTK0NLhQ/+Y/HMxjRwBlXpXJAhAmCoWf1fReTegPnVpBXMAIAAAAAD7AlW+P4FfQS4r8d7EEvPVEP1diSbrVDBqg8ZvNl1XRAVsACAAAAAATTSEkff+/JMBjNwUciY2RQ6M66uMQMAtwU+UidDv1y4AAzEzAH0AAAAFZAAgAAAAAGMbgPxi2Wu1AlqoDKTgyBnCZlnCjHm2naxRcizkIbYJBXMAIAAAAADMvSM3VZzVyRFCfUvcLXAXQFRIxlhm0t0dUsnaRZG4hgVsACAAAAAAI7uGriMAQc4A/a70Yi1Y7IAC7o/mfNYf7/FvwELYf80AAzE0AH0AAAAFZAAgAAAAAPnZ1bdmrcX0fsSxliuSqvDbRqwIiVg0tYp0PViRX0nOBXMAIAAAAAAqBdZGg9O74mnwyQF+lILtyzHdLOErDjPSf9sM8EqCugVsACAAAAAAwhuDsz+fCtqY8mW8QvEVQERjDChwrYTw4y7dinlCCOMAAzE1AH0AAAAFZAAgAAAAAJ40Dmb5BUT1AlWjfXB43nIbJgDn9rBg9FAeYR80WK0vBXMAIAAAAAAMPqLMDdNmnKzA3Hq49/NkJfs+/cjnyjSAbmiOFUE5FgVsACAAAAAAxbi7ql49Y4pduqWlLJqpwimRzrEnC7w5fWaMBiinHL8AAzE2AH0AAAAFZAAgAAAAAGelnhqWM2gUVy4P5QE/2Zfd7s9BugPqB/tcnSsFg5X0BXMAIAAAAAAWUhif3G+NMvZ3YPLB5OMuIhfPEu6U8KR9gTvJFz5uIwVsACAAAAAADEs8/aVSj2sJjxjv1K7o/aH8vZzt1bga73YiIKUx5DYAAzE3AH0AAAAFZAAgAAAAAD1xX2wCyf1aK1MoXnBAPfWLeBxsJI2i06tWbuiYKgElBXMAIAAAAACW1NW4RibvY0JRUzPvCmKnVbEy8AIS70fmsY08WgJOEgVsACAAAAAAQq9eIVoLcd4WxXUC3vub+EnxmcI2uP/yUWr3cz0jv9EAAzE4AH0AAAAFZAAgAAAAAHwU1LYeJmTch640sTu3VRRRdQg4YZ7S9IRfVXWHEWU8BXMAIAAAAACozWKD2YlqbQiBVVwJKptfAVM+R2FPJPtXkxVFAhHNXQVsACAAAAAAn7LS0QzTv9sOJzxH0ZqxsLYBYoArEo/PIXkU/zTnpM0AAzE5AH0AAAAFZAAgAAAAAHKaToAsILpmJyCE02I1iwmF/FibqaOb4b5nteuwOayfBXMAIAAAAABPxYjSK5DKgsdUZrZ+hM6ikejPCUK6Rqa0leoN7KOM0QVsACAAAAAAH9rPq5vvOIe9nTAcM1W1dVhQZ+gSkBohgoWLPcZnQXcAAzIwAH0AAAAFZAAgAAAAANTGiHqJVq28n7mMZsJD6gHxVQp1A6z8wgZVW+xV/lhmBXMAIAAAAABCR4BfdNVy7WE+IyQ312vYuIW0aGcXxr2II/MbNz8ZdAVsACAAAAAAng0GYpYJTypRLQUd5tIXWaAjZX5na04T/BypmwwrXPoAAzIxAH0AAAAFZAAgAAAAABooumzjEqp9Hvvd+sn1L82NI2iUGRl0nXQNJTHM7oyVBXMAIAAAAADgjz5L2ursK4C+pXXsJ6XHABhyallj9s/vSUgxXvjiiwVsACAAAAAAPjlAM0tbO6EUmLAeIZt57YMkMsuQfuC3T3d9vtnxgjwAAzIyAH0AAAAFZAAgAAAAAMA4jmE8U2uGkYUeKoYSlb22tfrRq2VlhV1Jq1kn4hV9BXMAIAAAAADG4fLeJUcINPSb1pMfAASJkuYsgS/59Eq/51mET/Y7RQVsACAAAAAAmwwcWOnzvpxm4pROXOL+BlxjEG/7v7hIautb2ubFT44AAzIzAH0AAAAFZAAgAAAAAK8/E3VHzHM6Kjp39GjFy+ci1IiUG5oxh0W6elV+oiX2BXMAIAAAAAA4/F4Q94xxb2TvZcMcji/DVTFrZlH8BL/HzD86RRmqNAVsACAAAAAAif3HPf6B1dTX/W+Vlp6ohadEQk/GAmHYzXfJia2zHeIAAzI0AH0AAAAFZAAgAAAAAGUX9ttLN1cCrOjlzsl/E6jEzQottNDw8Zo94nbO1133BXMAIAAAAAA7uVthFvXH+pbBrgQmnkPcpiHFEVCAi0WA7sAt9tlt3gVsACAAAAAAznaMStSbtGXU1Pb5z9KDTvEd79s6gmWYCKOKdzeijpEAAzI1AH0AAAAFZAAgAAAAAKnT/qg8N85Q9EQvpH7FBqUooxHFgrIjqLlIDheva2QSBXMAIAAAAABGAKkFMKoSIrvClWF7filoYM6fI9xSqOJVNS3dv4lxYwVsACAAAAAAgITE31hQA4ZOxpUFYSYv0mzWbd/6RKgbUXiUY96fBQEAAzI2AH0AAAAFZAAgAAAAAHRDRDT2hJrJ8X9zB9ELT28q8ZsfkYr92chaZYakiLlqBXMAIAAAAAAT0Le67ObldDta/Qb17dYfdslPsJTfGj3bWAgC0JIingVsACAAAAAAMGDrqys8iJ3fCT2Cj+zXIuXtsf4OAXWJl5HoPUMlbNoAAzI3AH0AAAAFZAAgAAAAAOOJcUjYOE0KqcYS1yZ363zglQXfr3XSD+R5fWLSivDoBXMAIAAAAABjeLe+tg37lNa+DdVxtlCtY77tV9PqfJ5X4XEKrfwu0AVsACAAAAAAlbpHiQAPLLTvSF+u58RBCLnYQKB5wciIQmANV9bkzsoAAzI4AH0AAAAFZAAgAAAAAMwWOOaWDDYUusdA1nyoaEB3C4/9GRpFNGags95Ddp4LBXMAIAAAAACLrsQXGWK15fW4mPEUXJ/90by13aG+727qWJep8QJ/WgVsACAAAAAAuThwsAsKUB56QAXC0MjJsZ9736atbiHPlK2tE0urf9QAAzI5AH0AAAAFZAAgAAAAABPRXBK0z8UANcvMDWntBjN9yF7iGMPLbhbaKrvHwcplBXMAIAAAAACZlqWsYPIb+ydmH03BxD3TqSGsSNoI7EVCy0VgW0TpYgVsACAAAAAAD2uaBv8oc7l4EeC5PWx5sfeyGZoas0JdFJ33M3jjgjMAAzMwAH0AAAAFZAAgAAAAAOn9/6pbzjIxFEApugaVOvVKXq23sDCJELv5UtLPDZI3BXMAIAAAAACHIwSDTlof0vFoigF4drbeM/8rdlj/4U386zQsNLtPGwVsACAAAAAAsYt/rXnpL55J9rlWSFRA4seaU6ggix7RgxbrJPu6gO4AAzMxAH0AAAAFZAAgAAAAAIMCESykv5b5d6mYjU5DlnO709lOFCaNoJBLtzBIqmg4BXMAIAAAAADs1Bfuaun4Es3nQ4kr29BzheLRDcFv+9a0gOGkSEcrDgVsACAAAAAA5kW6i/jOBSdoGAsZEZxVNRvt6miv86bP8JfUT+1KJg8AAzMyAH0AAAAFZAAgAAAAAFSPmr27XgKhUkbEvvC6Br5K1w7280NZrrhdzfYF+YGjBXMAIAAAAADv2h+Xq6kM7MHYTLMACRwbe2MzGHu4sdB67FGzDR6H4QVsACAAAAAAKII0MMC7o6GKVfGo2qBW/p35NupBp7MI6Gp0zXYwJOcAAzMzAH0AAAAFZAAgAAAAAPSV9qprvlNZK6OSQZNxKhJmBMs6QCKFESB/oeIvAS0iBXMAIAAAAAA835Jh22/pvZgKoYH6KjE+RRpYkaM1G35TWq6uplk/rgVsACAAAAAA162IdSb079yVlS7GkuSdHU3dOw03a+NS55ZPVBxbD08AAzM0AH0AAAAFZAAgAAAAAGsadEBJFax/UltPXB86G/YPxo6h353ZT+rC62iGy7qqBXMAIAAAAADs9TP3h91f6bTuG8QCQMA3atAVGs8k0ZjVzX3pM8HNAgVsACAAAAAA2ed4R4wYD6DT0P+N6o3gDJPE0DjljbRAv5vme3jb42sAAzM1AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzM2AH0AAAAFZAAgAAAAAKJY+8+7psFzJb5T+Mg9UWb6gA9Y8NN9j/ML2jZkNDNPBXMAIAAAAAA2R/nCtSYfCim89BzdUPS+DTQGwYDk+2ihFPEBS8h+ygVsACAAAAAAaEQra7xyvA3JS0BasIpRVrz7ZXsp6RpH7OpfJBFzFG8AAzM3AH0AAAAFZAAgAAAAAI4qr+sJiRaqwZRhnenAzD7tTKq+jP1aaLyAln3w1HQuBXMAIAAAAADNYpqV73NpwN+Ta0ms1SRiu+6WNOOdGT+syghL+JAFhQVsACAAAAAAN07Fo9SK+fXp5Odk1J806pyVWc2WHXCtb1gJQknTgqsAAzM4AH0AAAAFZAAgAAAAAISgN1Hid7IWvDESN/3tywFZiBsZPYapOUx9/QjDDxLfBXMAIAAAAAA7lxpEz3+CGdv6/WKIAlIwRYURREKgn7+StwNoVekkDwVsACAAAAAAx+Oa2v1e1R7VomfsvcKO8VkY4eTl7LzjNQQL6Cj6GBQAAzM5AH0AAAAFZAAgAAAAAOTLdk1RIUzCsvK7xCXy+LxGhJf87fEL406U9QKta3JRBXMAIAAAAAD8+6UnUn8sN6AgQuuf7uFxW+2ZJNpZLgp3eKVtjbo9ewVsACAAAAAAQN3mZHmaDM0ZbUnk2O/+wCUjiCs4bnshfHjd/4ygLXcAAzQwAH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzQxAH0AAAAFZAAgAAAAAPLX4XT1eMfokMvj73G6loHEotbdivVFM6cpMbU0zIOmBXMAIAAAAABuTqwm6E60kVBN5iClzLnMBozIQRYjMozzRNKVhixkEAVsACAAAAAAjvY9G0Of8EQcZ4GVfSEVz7jrNn7i4qps2r82jJmngKoAAzQyAH0AAAAFZAAgAAAAAGzGJAUZBcVKRb4bCSNaRxtcDH2TqIgHqMElD9RL7SzDBXMAIAAAAABbJfrLwBrqZ2Ylm9QfL7nkW+GJ8vTlaeMUDT5620ebaAVsACAAAAAASiaS1IlBls5Tan57XqqbR1cuvyOcoSibJJQGREzm4c0AAzQzAH0AAAAFZAAgAAAAAC028abAppwE/ApZHU5RbzZZ8OPD5eJ8/6+NgiSFf4d+BXMAIAAAAAD3THvDUYWULR+AVLuRRPPAMVMeZ2ldWpBYSODboszWbQVsACAAAAAAATOaeYj+kx3MTDeNUcKGbUxLZDeMjC8JrWnlHmWTamQAAzQ0AH0AAAAFZAAgAAAAAHWr8wQYIKLiKeb3wd8kZQuXD/GUHDqXj12K/EQWV11CBXMAIAAAAADo3aFHDuyfls9tcWCxlFqJn4zDXd3WT9CIFYFjJnTYswVsACAAAAAAeMbIatR7DgefzuvF4WyNVDjJxP8KPA6U/rmMQIBvpM0AAzQ1AH0AAAAFZAAgAAAAAMdRi6AAjF1Z9ucMqYl2Ud1PLUGOlOPJFgSrPTjs27u8BXMAIAAAAAAqOdI7+P8srvqCTFadwMM3iggaVOGcf1BB0EjBYeV6RAVsACAAAAAAU+V2GrqgxJYs9mxuak/8JMFICXwQ2vksrBdOvSwWFpoAAzQ2AH0AAAAFZAAgAAAAADKKe++fqh4sn0a8Bb+w3QMFnOqSE5hDI3zGQTcmJGcOBXMAIAAAAAC8ebHa++JmxVISv6LzjuMgEZqzKSZlJyujnSV9syRD9AVsACAAAAAAQcVNSjyetScLu78IrAYaAigerY4kWtnbctmIyb19Wa4AAzQ3AH0AAAAFZAAgAAAAAMKoHwhZcocaQy7asIuRG8+P1qPENgFAwzc3X1gZWYnJBXMAIAAAAAB+R01s+WdJjLa5p7STuEylradWr+2JDxsWx9bKDgXNDQVsACAAAAAADeXTBHsm+FH2pQVoqOBPPIJiTJLqrzGisNnQ3S3xYJAAAzQ4AH0AAAAFZAAgAAAAAF41XuyBvREKcxjDl+wbnillseykpAjCKHmwIu+RNvM7BXMAIAAAAAC2Wzq+2mfO7howoOZxquqvOuH1D2WdlzA1nK+LUp0FMgVsACAAAAAARha+D6DVeDxSjNyXXO5DMY+W70EGyfc7gxR4TjzcYusAAzQ5AH0AAAAFZAAgAAAAAAfONgdhLPEjvsMxTY9K4//7WjREuRmZ6Bpcf3yvdMf3BXMAIAAAAABCy/zjmzucxQkbJ96l5vS5x6SeyHE0Z+Aqp9oZgBcC6QVsACAAAAAAasG/uN4DnWHZLkLhH4cMzXk5F/HL2D+72WH+1jjgH8UAAzUwAH0AAAAFZAAgAAAAAA5ZsebFm5NrSGs2E17+fUt4qkzsVmy4IJA5nGehtSBVBXMAIAAAAAAOzteKfp+YGPqn1fi8u/lKXP7E2Zgouwgt6KAADHX9AQVsACAAAAAA2+FaAbl8JZogfNCI0FFbmZZPy/KLF1u16FGrPspSbEIAAzUxAH0AAAAFZAAgAAAAAHf6LIjrvy6I31w/8b910U9qU8cBIYiWn9mW55NYZF8VBXMAIAAAAACONPisRtnFG9vV2mTQ3hRR/hGuVRA9dGd9Lt9JqDoM8wVsACAAAAAA+h7V/jIYJcd0ALIvFBlwxkFqWxBVlkqT9wFkmumr4QcAAzUyAH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAIAAAAAAAAAEHRmAAYAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json new file mode 100644 index 0000000000..c0211a1a34 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Aggregate.json @@ -0,0 +1,1195 @@ +{ + "description": "fle2v2-Rangev2-Double-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json new file mode 100644 index 0000000000..3bffc95191 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Correctness.json @@ -0,0 +1,1018 @@ +{ + "description": "fle2v2-Rangev2-Double-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoubleNoPrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json new file mode 100644 index 0000000000..ac82c52b14 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Delete.json @@ -0,0 +1,795 @@ +{ + "description": "fle2v2-Rangev2-Double-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json new file mode 100644 index 0000000000..ce1be99a3a --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-FindOneAndUpdate.json @@ -0,0 +1,1199 @@ +{ + "description": "fle2v2-Rangev2-Double-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json new file mode 100644 index 0000000000..cac8bcafea --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-InsertFind.json @@ -0,0 +1,1186 @@ +{ + "description": "fle2v2-Rangev2-Double-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "2FIZh/9N+NeJEQwxYIX5ikQT85xJzulBNReXk8PnG/s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I93Md7QNPGmEEGYU1+VVCqBPBEvXdqHPtTJtMOn06Yk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "GecBFQ1PemlECWZWCl7f74vmsL6eB6mzQ9n6tK6FYfs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QpjhZl+O1ORifgtCZuWAdcP6OKL7IZ2cA46v8FJcV28=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FWXI/yZ1M+2fIboeMCDMlp+I2NwPQDtoM/wWselOPYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uk26nvN/LdRLaBphiBgIZzT0sSpoO1z0RdDWRm/xrSA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hiiYSH1KZovAULc7rlmEU74wCjzDR+mm6ZnsgvFQjMw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hRzvMvWPX0sJme+wck67lwbKDFaWOa+Eyef+JSdc1s4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PSx5D+zqC9c295dguX4+EobT4IEzfffdfjzC8DWpB5Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QzfXQCVTjPQv2h21v95HYPq8uCsVJ2tPnjv79gAaM9M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XcGDO/dlTcEMLqwcm55UmOqK+KpBmbzZO1LIzX7GPaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Lf+o4E7YB5ynzUPC6KTyW0lj6Cg9oLIu1Sdd1ODHctA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wAuVn02LAVo5Y+TUocvkoenFYWzpu38k0NmGZOsAjS4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yJGDtveLbbo/0HtCtiTSsvVI/0agg/U1bFaQ0yhK12o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KsEy0zgYcmkM+O/fWF9z3aJGIk22XCk+Aw96HB6JU68=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "p+AnMI5ZxdJMSIEJmXXya+FeH5yubmOdViwUO89j0Rc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/jLix56jzeywBtNuGw55lCXyebQoSIhbful0hOKxKDY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fvDvSPomtJsl1S3+8/tzFCE8scHIdJY5hB9CdTEsoFo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oV5hOJzPXxfTuRdKIlF4uYEoMDuqH+G7/3qgndDr0PM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3ALwcvLj3VOfgD6OqXAO13h1ZkOv46R6+Oy6SUKh53I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gxaB9FJj0IM+InhvAjwWaex3UIZ9SAnDiUd5WHSY/l0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "66NPvDygJzKJqddfNuDuNOpvGajjFRtvhkwfUkiYmXw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1dWcQIocRAcO9XnXYqbhl83jc0RgjQpsrWd8dC27trg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "npos0Uf1DT3ztSCjPVY9EImlRnTHB1KLrvmVSqBQ/8E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "TEI9qBx/tK1l1H0v1scMG8Srmtwo5VxWHADPBSlWrXk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3wUN2ypQKoj+5ASkeIK9ycxhahVxyTmGopigoUAlyYs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o/oksSnUS+nIq6ozWTbB5bJh+NoaPj8deAA23uxiWCk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KExYPruhA31e8xuSwvfUfDcyY/H2Va6taUd0k4yFgLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/x+dNfxdd/lkx8Z8VZVfoYl7LPoaZ/iKEzZXBrAtIJc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DE4cmjFLPqZlmRomO0qQiruUBtzoCe8ZdNRcfNH92pU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M6EKNcLPw/iojAChgYUSieaBYWcbsjKtB94SaHOr8vk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+qP49lDPeyhaduTvXJgtJEqHNEYANVu9Bg3Bxz7Td9w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ruMrC2VIS+VKbJwCFb3bfkaLTju9nE+yPONV9s0M0Vo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EbjDlSB5JKnDKff4d8hOmaOwJ7B9Q6NQFisLj+DPC+0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C/yYOTB94edyqAbiQNu8/H7FoG3yRRjHDkMykz4+Mv0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CBxqrejG+qQQq2YTd6iP/06kiu2CxxzBFaZK3Ofb1CM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2ZOQ/fpho+AbDENWBZaln7wRoepIRdhyT648dr8O5cU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "EghIgEPz01+myPgj8oid+PgncvobvC7vjvG3THEEQ0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "92CysZYNF8riwAMhdrIPKxfODw9p07cKQy/Snn8XmVY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VO0LeTBQmsEf7sCHzTnZwUPNTqRZ49R8V5E9XnZ/5N4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "exs8BQMJq7U6ZXYgIizT7XN+X/hOmmn4YEuzev9zgSI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qHpS4k1I+gPniNp4CA8TY8lLN36vBYmgbKMFpbYMEqg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+7lWKCKAWFw6gPZdHE6E8KIfI14/fSvtWUmllb5WLi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YiH/US0q6679hWblFDDKNqUjCgggoU8sUCssTIF1QbU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YgwkKElEubNfvXL9hJxzqQUQtHiXN/OCGxNL1MUZZlM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hZFST4INZTTuhvJlGJeMwlUAK270UCOTCDeBAnN4a7g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "24I1Zw35AuGnK3CqJhbCwYb0IPuu5sCRrM5iyeITOLc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vgD12JB4Q1S/kGPSQ1KOgp386KnG1GbM/5+60oRGcGw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+wNE+OL+CB9d4AUJdVxd56jUJCAXmmk9fapuB2TAc4g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uhQh1B2Pe4RkNw/kPEcgaLenuikKoRf1iyfZhpXdodc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eu8gjAUIp8ybO204AgeOq5v1neI1yljqy5v3I6lo1lM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7QG6oVbASBAjrnCPxzzUNnuFSFNlKhbuBafkF8pr7Is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "PUS1xb2oHSDTdYltutoSSxBiJ1NjxH3l2kA4P1CZLEs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XPMh/JDC/O93gJJCwwgJDb8ssWZvRvezNmKmyn3nIfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jWz+KGwMk/GOvFAK2rOxF3OjxeZAWfmUQ1HGJ7icw4A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o7XbW68pc6flYigf3LW4WAGUWxpeqxaQLkHUhUR9RZ8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nqR+g60+5U0okbqJadSqGgnC+j1JcP8rwMcfzOs2ACI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Hz43qVK95tSfbYFtaE/8fE97XMk1RiO8XpWjwZHB80o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "noZUWlZ8M6KXU5rkifyo8/duw5IL7/fXbJvT7bNmW9k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WONVHCuPSanXDRQQ/3tmyJ0Vq+Lu/4hRaMUf0g0kSuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UEaj6vQRoIghE8Movd8AGXhtwIOXlP4cBsECIUvE5Y8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "D3n2YcO8+PB4C8brDo7kxKjF9Y844rVkdRMLTgsQkrw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C+YA0G9KjxZVaWwOMuh/dcnHnHAlYnbFrRl0IEpmsY0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rUnmbmQanxrbFPYYrwyQ53x66OSt27yAvF+s48ezKDc=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json new file mode 100644 index 0000000000..938657c91c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Double-Update.json @@ -0,0 +1,1205 @@ +{ + "description": "fle2v2-Rangev2-Double-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Double. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoubleNoPrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "6YrBn2ofIw1b5ooakrLOwF41BWrps8OO0H9WH4/rtlE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "n+XAuFnP8Dov9TnhGFxNx0K/MnVM9WbJ7RouEu0ndO0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yRXojuVdn5GQtD97qYlaCL6cOLmZ7Cvcb3wFjkLUIdM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DuIkdRPITRs55I4SZmgomAHCIsDQmXRhW8+MOznkzSk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SsBk+Et1lTbU+QRPx+xyJ/jMkmfG+QCvQEpip2YYrzA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "crCIzOd8KhHvvUlX7M1v9bhvU4pLdTc+X2SuqoKU5Ek=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "YOWdCw4UrqnxkAaVjqmC4sKQDMVMHEpFGnlxpxdaU6E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "M3SShp81Ff8tQ632qKbv9MUcN6wjDaBReI0VXNu6Xh4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gzHlSPxpM0hT75kQvWFzGlOxKvDoiKQZOr19V6l2zXI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "s3JnppOGYw9SL2Q1kMAZs948v2F5PrpXjGei/HioDWs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cG6+3Gk/zEH68P/uuuwiAUVCuyJwa1LeV+t29FlPPAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dupdvR3AyJtM+g9NDKiaLVOtGca387JQp8w+V03m7Ig=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JqEQc5svj2jTvZ6LLA5ivE+kTb/0aRemSEmxk4G7Zrg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "szcXXXKnob+p3SoM4yED2R920LeJ7cVsclPMFTe4CeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o1QoGVXmuBdHwHm7aCtGMlMVKrjFdYvJXpoq6uhIAZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Jfm5wPlqqLCJRGQIqRq2NGmpn7s0Vrih2H3YAOoI2YU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zMHLb8ARbsYo8Ld05bqnGFf1Usha6EGb8QKwdSAyps0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yQdtq9lh5pugL7/i0Bj/PuZUUBUIzf+7wj1rl5y736w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wGWVZdO7qIuyDg/BqDgqjgoQ02h5YYgwXQB1oCin2NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "by9HMLj6NTEpgztZ5HSN6GxImkXPcaFINYDzgZY33X8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tWo0vbasi7bXmn/MsOx13VC1IsWtpx/nYp0uj4iMzdA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tQQpndUYd5O87lOtrGjH3wl9VsOK0ray7RMasL90sBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cQjXEDCMsOpKLLf+vlTgIHA+cbSJdzqhbSX9Wvh95aA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7yMpU48IxK9SzP2cx3VnTownGEwFmeFofuuFT97SuuY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kSOx1kz0CmBgzKQHZlo65ZUY1DIv9A99JRm+Us2y6Ew=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ubQpdPBe6/xvtr+AcXdfYLSvYCR4ot0tivehkCsupb4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xal+iCJ6FTefRQToyoNksc9NCZShyn04NDGi4IYrcoM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d7jU4iOK50xHxlkSifcxlZFCM46TSgQzoYivxG3HNLY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tJvl2nsBLBVzL3pp6sKWCL4UXeh3q/roYBJjSb74ve0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OIUCaKRvIx9t1w6Hxlz1IcQTdPNCfdRNwnnTm10W+X0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A9tvzsiElotOUVIB4CqfQp9mAwqvTM35YkmAR170aHA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lI8gpK7hpb7c9x4RQugsxMnQay5LZJmwslZdvMx/dcE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dNCzh40U0XvdKnSDi3HRQOWQftEsDVqc4uUvsVFGoq8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IP+iwEBWBwVVZIdpaMu8k5+soFCz+TZkYn3drKZ9grE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pnqyh6e0y5svHkJDShlN9CHV0WvMBE4QbtJpQw5ZCXc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "elEl42tbVDoRTLjAhZUFEtXiut4b3PVhg/1ZLZSQdtE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vHuu2FxwclMHqyE6JBYbTYgbEkB0dqb/JuaxsvfwsmY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xTf7NCe3Gf8QpE78HR5OknlLTKfs9J+RN9UZpH6fnso=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XiWSasRnJAulGR6+LCVD3mwRObXylqYWR9jvpywq12c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MZMxEQ5ikx0PG1YFIExv0UnTZogsvgeOEZTpzvBDn4w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yZMyMZBDrWbAhvnic7vvIYhmO9m5H2iuv0c8KNZrBzY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xxM14hTPY5j0vvcK2C7YAEjzdsfUTFHozHC0hEo1bxI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+01rqR1xVwkpGXcstbk1ItJqFVjH6Q8MGxEN3Cm9Y1A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xOpLV0Z2VTRJ3iWtnWZcsyjXubTIkYWo31cO+HV1o1k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BWUOLqgLBqc5NwxVlSV5H3KFQPXbCp7mdo+jF+8cJqY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fuQb1S6xZDGlrEbK+kI23aL53PP1PVNwqICnZNt9Yzg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfscnoibFttahLdPVC4Ee+47ewGFKpDSU7M6HX19bKE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rpSW2awybNVeKtat91VFxqbINoTfNhPfQAu+d73Xtf8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9M/CP9ccOIIj2LLFmE0GFDO0Ban2wsNalEXfM6+h+1s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrEMG49l1ye4MhXs5ZS9tz8P6h+hDvthIg/2wW9ne1Q=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ImNhbfeyfH8qIEeA5ic0s3dAQBdzzTBS+CPsNih9vZ0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dWP33YDSn04UKJN2ogh2Rui0iW/0q2y18OCDRVcfyoo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "lYv0isAtfGh6H9tdp3cp2eHU7q2J+uk7QrgcxtK3w7Y=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VGMoamB/+7zTOYcY/pqJc96xlv2PdW4hwsIAEIslTDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yNeBWMF7BnD9wVwz2PgJsvWr77QiVvvWUvJF0+fqBug=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SfpvObJ+tJBXSvqeN7vlOfmhYign635lciYAJIjUtY8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dsen4NqjzVGjpjufiTMs3+gqeD09EbnuogPgxrJECwg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pxCWVM3sn19NsFEpgHbgLa+PmYlhN3mMiP0Wk8kJhYw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q11KNvJszjYIB9n9HcC+N4uz11a3eRj1L3BH9scKMDQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "A1PmkgcEToWh1JiVWE6mI5jUu7poxWWuCUt/cgRUUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "qJo3Hu4PJeanL7XEaWXO/n3YsodhZyd+MJOOmB9Kpd8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BkBKLO8URFscfRY9Bav/1+L9mLohDgNr/MkZtGiraIs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rZq5WA3Hx3xthOyHAJXK//f8pE2qbz7YKu3TIMp9GFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X07a/Lm80p5xd4RFs1dNmw+90tmPDPdGiAKVZkxd4zY=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "HI88j1zrIsFoijIXKybr9mYubNV5uVeODyLHFH4Ueco=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wXVD/HSbBljko0jJcaxJ1nrzs2+pchLQqYR3vywS8SU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "KhscCh+tt/pp8lxtKZQSPPUU94RvJYPKG/sjtzIa4Ws=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RISnuNrTTVNW5HnwCgQJ301pFw8DOcYrAMQIwVwjOkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ra5zukLh2boua0Bh74qA+mtIoixGXlsNsxiJqHtqdTI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eqr0v+NNWXWszi9ni8qH58Q6gw5x737tJvH3lPaNHO4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d42QupriWIwGrFAquXNFi0ehEuidIbHLFZtg1Sm2nN8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "2azRVxaaTIJKcgY2FU012gcyP8Y05cRDpfUaMnCBaQU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "3nlgkM4K/AAcHesRYYdEu24UGetHodVnVfHzw4yxZBM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hqy91FNmAAac2zUaPO6eWFkx0/37rOWGrwXN+fzL0tU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "akX+fmscSDSF9pB5MPj56iaJPtohr0hfXNk/OPWsGv8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1ZvUb10Q7cN4cNLktd5yNjqgtawsYnkbeVBZV6WuY/I=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "otCwtuKiY4hCyXvYzXvo10OcnzZppebo38KsAlq49QM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Mty8EscckeT/dhMfrPFyDbLnmMOcYRUQ3mLK4KTu6V8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "tnvgLLkJINO7csREYu4dEVe1ICrBeu7OP+HdfoX3M2E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kOefsHgEVhkJ17UuP7Dxogy6sAQbzf1SFPKCj6XRlrQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F+JQ79xavpaHdJzdhvwyHbzdZJLNHAymc/+67La3gao=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "NCZ9zp5rDRceENuSgAfTLEyKg0YgmXAhK0B8WSj7+Pw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wL1CJ7cYR5slx8mHq++uMdjDfkt9037lQTUztEMF56M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "txefkzTMITZE+XvvRFZ7QcgwDT/7m8jNmxRk4QBaoZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jFunW3v1tSYMyZtQQD28eEy9qqDp4Kqo7gMN29N4bfQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QMO915KUiS3X3R1bU1YoafVM2s0NeHo3EjgTA9PnGwY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nwdKJEXdilzvb7494vbuDJ+y6SrfJahza1dYIsHIWVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vpWMX+T/VXXajFo0UbuYjtp0AEzBU0Y+lP+ih2EQ7mg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1lmzG0J1DhKDRhhq5y5Buygu4G8eV2X0t7kUY90EohM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SiKqpXqO0trwhFvBWK274hMklpCgMhNs/JY84yyn/NE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7cPGPYCKPTay+ZR9Gx6oOueduOgaFrSuAXmNDpDHXdI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4THEYvAkjs2Fh7FIe5LC45P4i4N0L7ob67UOVbhp6Nk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "B+UGsChLLZR7iqnt8yq91OgmTgwiUKTJhFxY4NT0O6c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X1uYwBCsCg1H+PnKdwtBqXlt0zKEURi8bOM940GcPfk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xYOgT5l7shlNXCwHlguovmDkcEnF8dXyYlTyYrgZ8GE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vFMTZqV8bh1+gcKzTkXweMddJlgdUnwX0DWzUUaMok4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4HI0y9FrtleZxZ7M6INdNhLelrQ2Rv/+ykWCBl+tMC8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpJ0bBE474OUkn1vUiLWumIBtYmwc7J5+LQU/nyeLQc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jQTPeXZvdxY/DjtPfYfKUArIDsf0E9MVFy2O26sv1ec=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "QLLto0ExR2ZYMGqlyaMZc/hXFFTlwmgtKbiVq/xJIeI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yBJNviU1nchbGbhx6InXCVRXa90sEepz1EwbYuKXu2U=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jpEf0vHxrPu9gTJutNXSi2g/2Mc4WXFEN7yHonZEb7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E09kLFckMYwNuhggMxmPtwndyvIAx+Vl+b2CV6FP75s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N+ue6/cLPb5NssmJCCeo18LlbKPz6r2z20AsnTKRvOo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "yVQNZP8hhsvNGyDph2QP2qTNdXZTiIEVineKg+Qf33o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cSC9uI+9c5S8X+0G7amVyug1p0ZlgBsbEDYYyezBevQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1NpZGjoQzuQtekj80Rifxe9HbE08W07dfwxaFHaVn84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "5Ghuq/8l11Ug9Uf/RTwf9On3OxOwIXUcb9soiy4J7/w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0LWKaEty6ywxLFhDaAqulqfMnYc+tgPfH4apyEeKg80=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OwSthmCBtt6NIAoAh7aCbj82Yr/+9t8U7WuBQhFT3AQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "iYiyg6/1isqbMdvFPIGucu3cNM4NAZNtJhHpGZ4eM+c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "waBgs8jWuGJPIF5zCRh6OmIyfK5GCBQgTMfmKSR2wyY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1Jdtbe2BKJXPU2G9ywOrlODZ/cNYEQlKzAW3aMe1Hy4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xaLEnNUS/2ySerBpb9dN/D31t+wYcKekwTfkwtni0Mc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bIVBrOhOvr6cL55Tr24+B+CC9MiG7U6K54aAr2IXXuw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6Cdq5wroGu2TEFnekuT7LhOpd/K/+PcipIljcHU9QL4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K5l64vI4S/pLviLW6Pl0U3iQkI3ge0xg4RAHcEsyKJo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "bzhuvZ0Ls22yIOX+Hz51eAHlSuDbWR/e0u4EhfdpHbc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Qv+fr6uD4o0bZRp69QJCFL6zvn3G82c7L+N1IFzj7H0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XAmISMbD3aEyQT+BQEphCKFNa0F0GDKFuhM9cGceKoQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4VLCokntMfm1AogpUnYGvhV7nllWSo3mS3hVESMy+hA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "xiXNLj/CipEH63Vb5cidi8q9X47EF4f3HtJSOH7mfM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "4XlCYfYBjI9XA5zOSgTiEBYcZsdwyXL+f5XtH2xUIOc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "k6DfQy7ZYJIkEly2B5hjOZznL4NcgMkllZjJLb7yq7w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZzM6gwWesa3lxbZVZthpPFs2s3GV0RZREE2zOMhBRBo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US+jeMeeOd7J0wR0efJtq2/18lcO8YFvhT4O3DeaonQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b6iSxiI1FM9SzxuG1bHqGA1i4+3GOi0/SPW00XB4L7o=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kn3LsxAVkzIZKK9I6fi0Cctr0yjXOYgaQWMCoj4hLpM=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoubleNoPrecision": { + "$gt": { + "$binary": { + "base64": "DbMkAAADcGF5bG9hZABXJAAABGcAQyQAAAMwAH0AAAAFZAAgAAAAAHgYoMGjEE6fAlAhICv0+doHcVX8CmMVxyq7+jlyGrvmBXMAIAAAAAC/5MQZgTHuIr/O5Z3mXPvqrom5JTQ8IeSpQGhO9sB+8gVsACAAAAAAuPSXVmJUAUpTQg/A9Bu1hYczZF58KEhVofakygbsvJQAAzEAfQAAAAVkACAAAAAA2kiWNvEc4zunJ1jzvuClFC9hjZMYruKCqAaxq+oY8EAFcwAgAAAAACofIS72Cm6s866UCk+evTH3CvKBj/uZd72sAL608rzTBWwAIAAAAADuCQ/M2xLeALF0UFZtJb22QGOhHmJv6xoO+kZIHcDeiAADMgB9AAAABWQAIAAAAABkfoBGmU3hjYBvQbjNW19kfXneBQsQQPRfUL3UAwI2cAVzACAAAAAAUpK2BUOqX/DGdX5YJniEZMWkofxHqeAbXceEGJxhp8AFbAAgAAAAAKUaLzIldNIZv6RHE+FwbMjzcNHqPESwF/37mm43VPrsAAMzAH0AAAAFZAAgAAAAAFNprhQ3ZwIcYbuzLolAT5n/vc14P9kUUQComDu6eFyKBXMAIAAAAAAcx9z9pk32YbPV/sfPZl9ALIEVsqoLXgqWLVK/tP+heAVsACAAAAAA/qxvuvJbAHwwhfrPVpmCFzNvg2cU/NXaWgqgYUZpgXwAAzQAfQAAAAVkACAAAAAAODI+pB2pCuB+YmNEUAgtMfNdt3DmSkrJ96gRzLphgb8FcwAgAAAAAAT7dewFDxUDECQ3zVq75/cUN4IP+zsqhkP5+czUwlJIBWwAIAAAAACFGeOtd5zBXTJ4JYonkn/HXZfHipUlqGwIRUcH/VTatwADNQB9AAAABWQAIAAAAACNAk+yTZ4Ewk1EnotQK8O3h1gg9I7pr9q2+4po1iJVgAVzACAAAAAAUj/LesmtEsgqYVzMJ67umVA11hJTdDXwbxDoQ71vWyUFbAAgAAAAABlnhpgTQ0WjLb5u0b/vEydrCeFjVynKd7aqb+UnvVLeAAM2AH0AAAAFZAAgAAAAAD/FIrGYFDjyYmVb7oTMVwweWP7A6F9LnyIuNO4MjBnXBXMAIAAAAACIZgJCQRZu7NhuNMyOqCn1tf+DfU1qm10TPCfj5JYV3wVsACAAAAAA5hmY4ptuNxULGf87SUFXQWGAONsL9U29duh8xqsHtxoAAzcAfQAAAAVkACAAAAAAciRW40ORJLVwchOEpz87Svb+5toAFM6LxDWv928ECwQFcwAgAAAAAN0dipyESIkszfjRzdDi8kAGaa2Hf4wrPAtiWwboZLuxBWwAIAAAAAANr4o/+l1OIbbaX5lZ3fQ/WIeOcEXjNI1F0WbSgQrzaQADOAB9AAAABWQAIAAAAACZqAyCzYQupJ95mrBJX54yIz9VY7I0WrxpNYElCI4dTQVzACAAAAAA/eyJb6d1xfE+jJlVXMTD3HS/NEYENPVKAuj56Dr2dSEFbAAgAAAAANkSt154Or/JKb31VvbZFV46RPgUp8ff/hcPORL7PpFBAAM5AH0AAAAFZAAgAAAAAI5bm3YO0Xgf0VT+qjVTTfvckecM3Cwqj7DTKZXf8/NXBXMAIAAAAAD/m+h8fBhWaHm6Ykuz0WX1xL4Eme3ErLObyEVJf8NCywVsACAAAAAAfb1VZZCqs2ivYbRzX4p5CtaCkKW+g20Pr57FWXzEZi8AAzEwAH0AAAAFZAAgAAAAANqo4+p6qdtCzcB4BX1wQ6llU7eFBnuu4MtZwp4B6mDlBXMAIAAAAAAGiz+VaukMZ+6IH4jtn4KWWdKK4/W+O+gRioQDrfzpMgVsACAAAAAAG4YYkTp80EKo59mlHExDodRQFR7njhR5dmISwUJ6ukAAAzExAH0AAAAFZAAgAAAAAPrFXmHP2Y4YAm7b/aqsdn/DPoDkv7B8egWkfe23XsM1BXMAIAAAAAAGhwpKAr7skeqHm3oseSbO7qKNhmYsuUrECBxJ5k+D2AVsACAAAAAAAqPQi9luYAu3GrFCEsVjd9z2zIDcp6SPTR2w6KQEr+IAAzEyAH0AAAAFZAAgAAAAABzjYxwAjXxXc0Uxv18rH8I3my0Aguow0kTwKyxbrm+cBXMAIAAAAADVbqJVr6IdokuhXkEtXF0C2gINLiAjMVN20lE20Vmp2QVsACAAAAAAD7K1Fx4gFaaizkIUrf+EGXQeG7QX1jadhGc6Ji471H8AAzEzAH0AAAAFZAAgAAAAAFMm2feF2fFCm/UC6AfIyepX/xJDSmnnolQIBnHcPmb5BXMAIAAAAABLI11kFrQoaNVZFmq/38aRNImPOjdJh0Lo6irI8M/AaAVsACAAAAAAOWul0oVqJ9CejD2RqphhTC98DJeRQy5EwbNerU2+4l8AAzE0AH0AAAAFZAAgAAAAAJvXB3KyNiNtQko4SSzo/9b2qmM2zU9CQTTDfLSBWMgRBXMAIAAAAAAvjuVP7KsLRDeqVqRziTKpBrjVyqKiIbO9Gw8Wl2wFTAVsACAAAAAADlE+oc1ins+paNcaOZJhBlKlObDJ4VQORWjFYocM4LgAAzE1AH0AAAAFZAAgAAAAAPGdcxDiid8z8XYnfdDivNMYVPgBKdGOUw6UStU+48CdBXMAIAAAAAARj6g1Ap0eEfuCZ4X2TsEw+Djrhto3fA5nLwPaY0vCTgVsACAAAAAAoHqiwGOUkBu8SX5U1yHho+UIFdSN2MdQN5s6bQ0EsJYAAzE2AH0AAAAFZAAgAAAAAP5rGPrYGt3aKob5f/ldP0qrW7bmWvqnKY4QwdDWz400BXMAIAAAAADTQkW2ymaaf/bhteOOGmSrIR97bAnJx+yN3yMj1bTeewVsACAAAAAADyQnHGH2gF4w4L8axUsSTf6Ubk7L5/eoFOJk12MtZAoAAzE3AH0AAAAFZAAgAAAAAAlz6wJze5UkIxKpJOZFGCOf3v2KByWyI6NB6JM9wNcBBXMAIAAAAABUC7P/neUIHHoZtq0jFVBHY75tSFYr1Y5S16YN5XxC1QVsACAAAAAAgvxRbXDisNnLY3pfsjDdnFLtkvYUC4lhA68eBXc7KAwAAzE4AH0AAAAFZAAgAAAAAFJ8AtHcjia/9Y5pLEc3qVgH5xKiXw12G9Kn2A1EY8McBXMAIAAAAAAxe7Bdw7eUSBk/oAawa7uicTEDgXLymRNhBy1LAxhDvwVsACAAAAAAxKPaIBKVx3jTA+R/el7P7AZ7efrmTGjJs3Hj/YdMddwAAzE5AH0AAAAFZAAgAAAAAO8uwQUaKFb6vqR3Sv3Wn4QAonC2exOC9lGG1juqP5DtBXMAIAAAAABZf1KyJgQg8/Rf5c02DgDK2aQu0rNCOvaL60ohDHyY+gVsACAAAAAAqyEjfKC8lYoIfoXYHUqHZPoaA6EK5BAZy5dxXZmay4kAAzIwAH0AAAAFZAAgAAAAAE8YtqyRsGCeiR6hhiyisR/hccmK4nZqIMzO4lUBmEFzBXMAIAAAAAC1UYOSKqAeG1UJiKjWFVskRhuFKpj9Ezy+lICZvFlN5AVsACAAAAAA6Ct9nNMKyRazn1OKnRKagm746CGu+jyhbL1qJnZxGi0AAzIxAH0AAAAFZAAgAAAAAPhCrMausDx1QUIEqp9rUdRKyM6a9AAx7jQ3ILIu8wNIBXMAIAAAAACmH8lotGCiF2q9VQxhsS+7LAZv79VUAsOUALaGxE/EpAVsACAAAAAAnc1xCKfdvbUEc8F7XZqlNn1C+hZTtC0I9I3LL06iaNkAAzIyAH0AAAAFZAAgAAAAAOBi/GAYFcstMSJPgp3VkMiuuUUCrZytvqYaU8dwm8v2BXMAIAAAAACEZSZVyD3pKzGlbdwlYmWQhHHTV5SnNLknl2Gw8IaUTQVsACAAAAAAfsLZsEDcWSuNsIo/TD1ReyQW75HPMgmuKZuWFOLKRLoAAzIzAH0AAAAFZAAgAAAAAIQuup+YGfH3mflzWopN8J1X8o8a0d9CSGIvrA5HOzraBXMAIAAAAADYvNLURXsC2ITMqK14LABQBI+hZZ5wNf24JMcKLW+84AVsACAAAAAACzfjbTBH7IwDU91OqLAz94RFkoqBOkzKAqQb55gT4/MAAzI0AH0AAAAFZAAgAAAAAKsh0ADyOnVocFrOrf6MpTrNvAj8iaiE923DPryu124gBXMAIAAAAADg24a8NVE1GyScc6tmnTbmu5ulzO+896fE92lN08MeswVsACAAAAAAaPxcOIxnU7But88/yadOuDJDMcCywwrRitaxMODT4msAAzI1AH0AAAAFZAAgAAAAAKkVC2Y6HtRmv72tDnPUSjJBvse7SxLqnr09/Uuj9sVVBXMAIAAAAABYNFUkH7ylPMN+Bc3HWX1e0flGYNbtJNCY9SltJCW/UAVsACAAAAAAZYK/f9H4OeihmpiFMH7Wm7uLvs2s92zNA8wyrNZTsuMAAzI2AH0AAAAFZAAgAAAAADDggcwcb/Yn1Kk39sOHsv7BO/MfP3m/AJzjGH506Wf9BXMAIAAAAAAYZIsdjICS0+BDyRUPnrSAZfPrwtuMaEDEn0/ijLNQmAVsACAAAAAAGPnYVvo2ulO9z4LGd/69NAklfIcZqZvFX2KK0s+FcTUAAzI3AH0AAAAFZAAgAAAAAEWY7dEUOJBgjOoWVht1wLehsWAzB3rSOBtLgTuM2HC8BXMAIAAAAAAAoswiHRROurjwUW8u8D5EUT+67yvrgpB/j6PzBDAfVwVsACAAAAAA6NhRTYFL/Sz4tao7vpPjLNgAJ0FX6P/IyMW65qT6YsMAAzI4AH0AAAAFZAAgAAAAAPZaapeAUUFPA7JTCMOWHJa9lnPFh0/gXfAPjA1ezm4ZBXMAIAAAAACmJvLY2nivw7/b3DOKH/X7bBXjJwoowqb1GtEFO3OYgAVsACAAAAAA/JcUoyKacCB1NfmH8vYqC1f7rd13KShrQqV2r9QBP44AAzI5AH0AAAAFZAAgAAAAAK00u6jadxCZAiA+fTsPVDsnW5p5LCr4+kZZZOTDuZlfBXMAIAAAAAAote4zTEYMDgaaQbAdN8Dzv93ljPLdGjJzvnRn3KXgtQVsACAAAAAAxXd9Mh6R3mnJy8m7UfqMKi6oD5DlZpkaOz6bEjMOdiwAAzMwAH0AAAAFZAAgAAAAAFbgabdyymiEVYYwtJSWa7lfl/oYuj/SukzJeDOR6wPVBXMAIAAAAADAFGFjS1vPbN6mQEhkDYTD6V2V23Ys9gUEUMGNvMPkaAVsACAAAAAAL/D5Sze/ZoEanZLK0IeEkhgVkxEjMWVCfmJaD3a8uNIAAzMxAH0AAAAFZAAgAAAAABNMR6UBv2E627CqLtQ/eDYx7OEwQ7JrR4mSHFa1N8tLBXMAIAAAAAAxH4gucI4UmNVB7625C6hFSVCuIpJO3lusJlPuL8H5EQVsACAAAAAAVLHNg0OUVqZ7WGOP53BkTap9FOw9dr1P4J8HxqFqU04AAzMyAH0AAAAFZAAgAAAAAG8cd6WBneNunlqrQ2EmNf35W7OGObGq9WL4ePX+LUDmBXMAIAAAAAAjJ2+sX87NSis9hBsgb1QprVRnO7Bf+GObCGoUqyPE4wVsACAAAAAAs9c9SM49/pWmyUQKslpt3RTMBNSRppfNO0JBvUqHPg0AAzMzAH0AAAAFZAAgAAAAAFWOUGkUpy8yf6gB3dio/aOfRKh7XuhvsUj48iESFJrGBXMAIAAAAAAY7sCDMcrUXvNuL6dO0m11WyijzXZvPIcOKob6IpC4PQVsACAAAAAAJOP+EHz6awDb1qK2bZQ3kTV7wsj5Daj/IGAWh4g7omAAAzM0AH0AAAAFZAAgAAAAAGUrIdKxOihwNmo6B+aG+Ag1qa0+iqdksHOjQj+Oy9bZBXMAIAAAAABwa5dbI2KmzBDNBTQBEkjZv4sPaeRkRNejcjdVymRFKQVsACAAAAAA4ml/nm0gJNTcJ4vuD+T2Qfq2fQZlibJp/j6MOGDrbHMAAzM1AH0AAAAFZAAgAAAAAOx89xV/hRk64/CkM9N2EMK6aldII0c8smdcsZ46NbP8BXMAIAAAAADBF6tfQ+7q9kTuLyuyrSnDgmrdmrXkdhl980i1KHuGHgVsACAAAAAACUqiFqHZdGbwAA+hN0YUE5zFg+H+dabIB4dj5/75W/YAAzM2AH0AAAAFZAAgAAAAAMkN0L1oQWXhjwn9rAdudcYeN8/5VdCKU8cmDt7BokjsBXMAIAAAAAAT62pGXoRwExe9uvgYOI0hg5tOxilrWfoEmT0SMglWJwVsACAAAAAAlVz4dhiprSbUero6JFfxzSJGclg63oAkAmgbSwbcYxIAAzM3AH0AAAAFZAAgAAAAANxfa4xCoaaB7k1C1RoH1LBhsCbN2yEq15BT9b+iqEC4BXMAIAAAAACAX9LV8Pemfw7NF0iB1/85NzM1Ef+1mUfyehacUVgobQVsACAAAAAAVq4xpbymLk0trPC/a2MvB39I7hRiX8EJsVSI5E5hSBkAAzM4AH0AAAAFZAAgAAAAAOYIYoWkX7dGuyKfi3XssUlc7u/gWzqrR9KMkikKVdmSBXMAIAAAAABVF2OYjRTGi9Tw8XCAwZWLpX35Yl271TlNWp6N/nROhAVsACAAAAAA0nWwYzXQ1+EkDvnGq+SMlq20z+j32Su+i/A95SggPb4AAzM5AH0AAAAFZAAgAAAAAIy0+bXZi10QC+q7oSOLXK5Fee7VEk/qHSXukfeVIfgzBXMAIAAAAAAQ3IIV/JQCHW95AEbH5zGIHtJqyuPjWPMIZ+VmQHlxEwVsACAAAAAAp0jYsyohKv9Pm+4k+DplEGbl9WLZpAJzitrcDj4CNsMAAzQwAH0AAAAFZAAgAAAAAL5SOJQ3LOhgdXJ5v086NNeAl1qonQnchObdpZJ1kHeEBXMAIAAAAAA+tEqTXODtik+ydJZSnUqXF9f18bPeze9eWtR7ExZJgQVsACAAAAAAbrkZCVgB9Qsp4IAbdf+bD4fT6Boqk5UtuA/zhNrh1y0AAzQxAH0AAAAFZAAgAAAAAKl8zcHJRDjSjJeV/WvMxulW1zrTFtaeBy/aKKhadc6UBXMAIAAAAADBdWQl5SBIvtZZLIHszePwkO14W1mQ0izUk2Ov21cPNAVsACAAAAAAHErCYycpqiIcCZHdmPL1hi+ovLQk4TAvENpfLdTRamQAAzQyAH0AAAAFZAAgAAAAAFvotcNaoKnVt5CBCOPwjexFO0WGWuaIGL6H/6KSau+6BXMAIAAAAAD2y2mBN5xPu5PJoY2zcr0GnQDtHRBogA5+xzIxccE9fwVsACAAAAAAdS34xzJesnUfxLCcc1U7XzUqLy8MAzV/tcjbqaD3lkMAAzQzAH0AAAAFZAAgAAAAAPezU0/vNT4Q4YKbTbaeHqcwNLT+IjW/Y9QFpIooihjPBXMAIAAAAACj2x4O4rHter8ZnTws5LAP9jJ/6kk9C/V3vL50LoFZHAVsACAAAAAAQdBDF3747uCVP5lB/zr8VmzxJfTSZHBKeIgm5FyONXwAAzQ0AH0AAAAFZAAgAAAAAMqpayM2XotEFmm0gwQd9rIzApy0X+7HfOhNk6VU7F5lBXMAIAAAAACJR9+q5T9qFHXFNgGbZnPubG8rkO6cwWhzITQTmd6VgwVsACAAAAAAOZLQ6o7e4mVfDzbpQioa4d3RoTvqwgnbmc5Qh2wsZuoAAzQ1AH0AAAAFZAAgAAAAANCeyW+3oebaQk+aqxNVhAcT/BZ5nhsTVdKS3tMrLSvWBXMAIAAAAADxRFMDhkyuEc++WnndMfoUMLNL7T7rWoeblcrpSI6soQVsACAAAAAAdBuBMJ1lxt0DRq9pOZldQqchLs3B/W02txcMLD490FEAAzQ2AH0AAAAFZAAgAAAAAIbo5YBTxXM7HQhl7UP9NNgpPGFkBx871r1B65G47+K8BXMAIAAAAAC21dJSxnEhnxO5gzN5/34BL4von45e1meW92qowzb8fQVsACAAAAAAm3Hk2cvBN0ANaR5jzeZE5TsdxDvJCTOT1I01X7cNVaYAAzQ3AH0AAAAFZAAgAAAAABm/6pF96j26Jm7z5KkY1y33zcAEXLx2n0DwC03bs/ixBXMAIAAAAAD01OMvTZI/mqMgxIhA5nLs068mW+GKl3OW3ilf2D8+LgVsACAAAAAAaLvJDrqBESTNZSdcXsd+8GXPl8ZkUsGpeYuyYVv/kygAAzQ4AH0AAAAFZAAgAAAAAJ/D3+17gaQdkBqkL2wMwccdmCaVOtxzIkM8VyI4xI5zBXMAIAAAAAAggLVmkc5u+YzBR+oNE+XgLVp64fC6MzUb/Ilu/Jsw0AVsACAAAAAACz3HVKdWkx82/kGbVpcbAeZtsj2R5Zr0dEPfle4IErkAAzQ5AH0AAAAFZAAgAAAAAJMRyUW50oaTzspS6A3TUoXyC3gNYQoShUGPakMmeVZrBXMAIAAAAACona2Pqwt4U2PmFrtmu37jB9kQ/12okyAVtYa8TQkDiQVsACAAAAAAltJJKjCMyBTJ+4PkdDCPJdeX695P8P5h7WOZ+kmExMAAAzUwAH0AAAAFZAAgAAAAAByuYl8dBvfaZ0LO/81JW4hYypeNmvLMaxsIdvqMPrWoBXMAIAAAAABNddwobOUJzm9HOUD8BMZJqkNCUCqstHZkC76FIdNg9AVsACAAAAAAQQOkIQtkyNavqCnhQbNg3HfqrJdsAGaoxSJePJl1qXsAAzUxAH0AAAAFZAAgAAAAAHEzLtfmF/sBcYPPdj8867VmmQyU1xK9I/3Y0478azvABXMAIAAAAAAcmyFajZPnBTbO+oLInNwlApBocUekKkxz2hYFeSlQ+gVsACAAAAAAZ6IkrOVRcC8vSA6Vb4fPWZJrYexXhEabIuYIeXNsCSgAAzUyAH0AAAAFZAAgAAAAAJam7JYsZe2cN20ZYm2W3v1pisNt5PLiniMzymBLWyMtBXMAIAAAAABxCsKVMZMTn3n+R2L7pVz5nW804r8HcK0mCBw3jUXKXAVsACAAAAAA7j3JGnNtR64P4dJLeUoScFRGfa8ekjh3dvhw46sRFk0AAzUzAH0AAAAFZAAgAAAAAMXrXx0saZ+5gORmwM2FLuZG6iuO2YS+1IGPoAtDKoKBBXMAIAAAAADIQsxCr8CfFKaBcx8kIeSywnGh7JHjKRJ9vJd9x79y7wVsACAAAAAAcvBV+SykDYhmRFyVYwFYB9oBKBSHr55Jdz2cXeowsUQAAzU0AH0AAAAFZAAgAAAAACbzcUD3INSnCRspOKF7ubne74OK9L0FTZvi9Ay0JVDYBXMAIAAAAADPebVQH8Btk9rhBIoUOdSAdpPvz7qIY4UC2i6IGisSAQVsACAAAAAAiBunJi0mPnnXdnldiq+If8dcb/n6apHnaIFt+oyYO1kAAzU1AH0AAAAFZAAgAAAAACUc2CtD1MK/UTxtv+8iA9FoHEyTwdl43HKeSwDw2Lp5BXMAIAAAAACCIduIdw65bQMzRYRfjBJj62bc69T4QqH4QoWanwlvowVsACAAAAAAM0TV7S+aPVVzJOQ+cpSNKHTwyQ0mWa8tcHzfk3nR+9IAAzU2AH0AAAAFZAAgAAAAAHSaHWs/dnmI9sc7nB50VB2Bzs0kHapMHCQdyVEYY30TBXMAIAAAAACkV22lhEjWv/9/DubfHBAcwJggKI5mIbSK5L2nyqloqQVsACAAAAAAS19m7DccQxgryOsBJ3GsCs37yfQqNi1G+S6fCXpEhn4AAzU3AH0AAAAFZAAgAAAAAAL8jhNBG0KXXZhmZ0bPXtfgapJCB/AI+BEHB0eZ3C75BXMAIAAAAADHx/fPa639EBmGV5quLi8IQT600ifiKSOhTDOK19DnzwVsACAAAAAAlyLTDVkHxbayklD6Qymh3odIK1JHaOtps4f4HR+PcDgAAzU4AH0AAAAFZAAgAAAAAAxgeclNl09H7HvzD1oLwb2YpFca5eaX90uStYXHilqKBXMAIAAAAACMU5pSxzIzWlQxHyW170Xs9EhD1hURASQk+qkx7K5Y6AVsACAAAAAAJbMMwJfNftA7Xom8Bw/ghuZmSa3x12vTZxBUbV8m888AAzU5AH0AAAAFZAAgAAAAABmO7QD9vxWMmFjIHz13lyOeV6vHT6mYCsWxF7hb/yOjBXMAIAAAAACT9lmgkiqzuWG24afuzYiCeK9gmJqacmxAruIukd0xEAVsACAAAAAAZa0/FI/GkZR7CtX18Xg9Tn9zfxkD0UoaSt+pIO5t1t4AAzYwAH0AAAAFZAAgAAAAAB89SjLtDJkqEghRGyj6aQ/2qvWLNuMROoXmzbYbCMKMBXMAIAAAAAC8sywgND+CjhVTF6HnRQeay8y9/HnVzDI42dEPah28LQVsACAAAAAAoxv7UKh0RqUAWcOsQvU123zO1qZn73Xfib0qncZCB34AAzYxAH0AAAAFZAAgAAAAABN2alGq9Aats1mwERNGwL/fIwZSvVCe9/8XMHTFlpUpBXMAIAAAAACuDPjJgvvbBYhbLpjMiWUCsVppiYrhvR+yMysNPN8cZAVsACAAAAAAKpADjc4bzIZMi9Q/+oe0EMRJHYQt6dlo1x/lRquagqkAAzYyAH0AAAAFZAAgAAAAAL8YB6VAqGBiWD4CBv16IBscg5J7VQCTZu87n6pj+86KBXMAIAAAAAAmxm8e68geeyAdUjSMWBHzUjneVB0pG9TBXIoE6467hAVsACAAAAAAV76JZAlYpgC/Zl8awx2ArCg1uuyy2XVTSkp0wUMi/7UAAzYzAH0AAAAFZAAgAAAAAL4yLkCTV5Dmxa5toBu4JT8ge/cITAaURIOuFuOtFUkeBXMAIAAAAAAXoFNQOMGkAj7qEJP0wQafmFSXgWGeorDVbwyOxWLIsgVsACAAAAAAc4Un6dtIFe+AQ+RSfNWs3q63RTHhmyc+5GKRRdpWRv8AAzY0AH0AAAAFZAAgAAAAAEU8DoUp46YtYjNFS9kNXwdYxQ9IW27vCTb+VcqqfnKNBXMAIAAAAADe7vBOgYReE8X78k5ARuUnv4GmzPZzg6SbConf4L2G3wVsACAAAAAA78YHWVkp6HbZ0zS4UL2z/2pj9vPDcMDt7zTv6NcRsVsAAzY1AH0AAAAFZAAgAAAAAPa4yKTtkUtySuWo1ZQsp2QXtPb5SYqzA5vYDnS1P6c0BXMAIAAAAADKnF58R1sXlHlsHIvCBR3YWW/qk54z9CTDhZydkD1cOQVsACAAAAAAHW3ERalTFWKMzjuXF3nFh0pSrQxM/ojnPbPhc4v5MaQAAzY2AH0AAAAFZAAgAAAAAN5WJnMBmfgpuQPyonmY5X6OdRvuHw4nhsnGRnFAQ95VBXMAIAAAAACwftzu7KVV1rmGKwXtJjs3cJ1gE3apr8+N0SAg1F2cHwVsACAAAAAATDW0reyaCjbJuVLJzbSLx1OBuBoQu+090kgW4RurVacAAzY3AH0AAAAFZAAgAAAAACHvDsaPhoSb6DeGnKQ1QOpGYAgK82qpnqwcmzSeWaJHBXMAIAAAAABRq3C5+dOfnkAHM5Mg5hPB3O4jhwQlBgQWLA7Ph5bhgwVsACAAAAAAqkC8zYASvkVrp0pqmDyFCkPaDmD/ePAJpMuNOCBhni8AAzY4AH0AAAAFZAAgAAAAAOBePJvccPMJmy515KB1AkXF5Pi8NOG4V8psWy0SPRP+BXMAIAAAAAB3dOJG9xIDtEKCRzeNnPS3bFZepMj8UKBobKpSoCPqpgVsACAAAAAAPG3IxQVOdZrr509ggm5FKizWWoZPuVtOgOIGZ3m+pdEAAzY5AH0AAAAFZAAgAAAAABUvRrDQKEXLMdhnzXRdhiL6AGNs2TojPky+YVLXs+JnBXMAIAAAAAD1kYicbEEcPzD4QtuSYQQWDPq8fuUWGddpWayKn3dT9QVsACAAAAAA9+Sf7PbyFcY45hP9oTfjQiOUS3vEIAT8C0vOHymwYSUAAzcwAH0AAAAFZAAgAAAAAOvSnpujeKNen4pqc2HR63C5s5oJ1Vf4CsbKoYQvkwl5BXMAIAAAAACw2+vAMdibzd2YVVNfk81yXkFZP0WLJ82JBxJmXnYE+QVsACAAAAAArQ/E1ACyhK4ZyLqH9mNkCU7WClqRQTGyW9tciSGG/EMAAzcxAH0AAAAFZAAgAAAAAAo0xfGG7tJ3GWhgPVhW5Zn239nTD3PadShCNRc9TwdNBXMAIAAAAADZh243oOhenu0s/P/5KZLBDh9ADqKHtSWcXpO9D2sIjgVsACAAAAAAlgTPaoQKz+saU8rwCT3UiNOdG6hdpjzFx9GBn08ZkBEAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAABbW4A////////7/8BbXgA////////738A", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoubleNoPrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoubleNoPrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json new file mode 100644 index 0000000000..2046630a7b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Aggregate.json @@ -0,0 +1,643 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json new file mode 100644 index 0000000000..939a12c9f8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Correctness.json @@ -0,0 +1,1418 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 200, + "encryptedDoublePrecision": { + "$numberDouble": "200.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "1.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lte": { + "$numberDouble": "1.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$lt": { + "$numberDouble": "0.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "200.0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range max" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0.0" + }, + "$lt": { + "$numberDouble": "2.0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$gte": { + "$numberDouble": "0.0" + }, + "$lte": { + "$numberDouble": "200.0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1.0" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedDoublePrecision": { + "$in": [ + { + "$numberDouble": "0.0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0.0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Int", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json new file mode 100644 index 0000000000..db615d6fe3 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Delete.json @@ -0,0 +1,537 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json new file mode 100644 index 0000000000..a8f87596e8 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate.json @@ -0,0 +1,647 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json new file mode 100644 index 0000000000..5e4aa5f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-InsertFind.json @@ -0,0 +1,634 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "mVZb+Ra0EYjQ4Zrh9X//E2T8MRj7NMqm5GUJXhRrBEI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "MgwakFvPyBlwqFTbhWUF79URJQWFoJTGotlEVSPPUsQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "DyBERpMSD5lEM5Nhpcn4WGgxgn/mkUVJp+PYSLX5jsE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "I43iazc0xj1WVbYB/V+uTL/tughN1bBlxh1iypBnNsA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wjOBa/ATMuOywFmuPgC0GF/oeLqu0Z7eK5udzkTPbis=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "gRQVwiR+m+0Vg8ZDXqrQQcVnTyobwCXNaA4BCJVXtMc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WUZ6huwx0ZbLb0R00uiC9FOJzsUocUN8qE5+YRenkvQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7s79aKEuPgQcS/YPOOVcYNZvHIo7FFsWtFCrnDKXefA=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json new file mode 100644 index 0000000000..10cae6be89 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-DoublePrecision-Update.json @@ -0,0 +1,653 @@ +{ + "description": "fle2v2-Rangev2-DoublePrecision-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range DoublePrecision. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedDoublePrecision": { + "$numberDouble": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedDoublePrecision": { + "$numberDouble": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedDoublePrecision": { + "$gt": { + "$numberDouble": "0" + } + } + }, + "update": { + "$set": { + "encryptedDoublePrecision": { + "$numberDouble": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "Dri0CXmL78L2DOgk9w0DwxHOMGMzih7m6l59vgy+WWo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "b7d8mRzD1kI1tdc7uNL+YAUonJ6pODLsRLkArfEKSkM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Xg8C1/A0KJaXOw4i+26Rv03/CydaaunOzXh0CIT+gn8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "UoKUDw2wJYToUCcFaIs03YQSTksYR0MIOTJllwODqKc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c/5cwAT0C5jber2xlJnWD3a5tVDy0nRtr5HG02hoFOY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wSUrRXavAGaajNeqC5mEUH1K67oYl5Wy9RNIzKjwLAM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6vrp4wWDtHEgHWR99I70WVDzevg1Fk/Pw5U8gUDa0OU=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "V6knyt7Zq2CG3++l75UtBx2m32iGAPjHiAe439Bf02w=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "0OKSXELxPP85SBVwDGf3LtMEQCJ8TTkFUl/+6jlkdb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uEw0lpQtBppR3vqV9j9+NQRSBF1BzZukb8c9IhyWvxc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zVhZ7Q59O087ji49oMJvBIgeir2oqvUpnh4p53GcTow=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "dowrzKs+qJhRMZyKDbhjXbuX43FbmUKOaw9I8YlOZDw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ep5B6cska6THLIF7Mn3tn3RvV9EiwLSt0eZM/CLRUDc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "URNp/YmmDh5wIZUfAzzgPyJeMNiVx9PMsz52DZRujGY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wlM4IAQhhKQEzoVqS8b1Ddd50GB95OFb9LnzOwyjCP4=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedDoublePrecision": { + "$gt": { + "$binary": { + "base64": "DQYKAAADcGF5bG9hZACiCQAABGcAjgkAAAMwAH0AAAAFZAAgAAAAAHdJ2Vnb4MMzqVYVssjSdDy8XU4GVzMTfGifGETgQ2mYBXMAIAAAAAD7cFfKJGIXo6PjyeX2ria02CckW7dWFDoY/3FyBdm1NQVsACAAAAAAhEPSNv4M023A3hzNFuy83+hIKuZ2mKRY954N++aEOBUAAzEAfQAAAAVkACAAAAAAlmvfDrZoydUet4eCVMq7z6a58Ea+1HLJOWxN5lNcrWEFcwAgAAAAAEBo5AWZyC41b9ayjWNQSL4iYEAIwR/JG+ssN8bdoK9RBWwAIAAAAACEndE0SLxFSElOrNnqeX0EPmgDio3udZjVREy4JLS3sQADMgB9AAAABWQAIAAAAABbiLaoxAA6rinMJw1hC8ZUiq6UU1AQaPFn/py/Y06WuQVzACAAAAAAhtDasFkvYE7SCNu1je/hxdE9TJtAvvH3NtdEbKzNbCUFbAAgAAAAAIGepU1RSCF8sWODHEpKglsoqw3VBBH4a/URGxgGzbq2AAMzAH0AAAAFZAAgAAAAALORWwSr+tYNxcil2KIGSbNhTHvcPbdj+rLVQNx21S/KBXMAIAAAAAD6diZBkPEJ1cQy06LAxdbNK8Nlxbb44fH4Wk3Y3260nQVsACAAAAAA1eYAZBFHlDiaDAljWi8blGQ2nvvZa5AO5doeo0SFZsgAAzQAfQAAAAVkACAAAAAAG5XMK96PjClNlUvg82j4pMY1YxsznZfj4uNweD394FoFcwAgAAAAAKHgQLdGJHkrfFg9nB93Ac+3VgBw6aU44MTkKIQ91dZoBWwAIAAAAAAPxXmi+SDJ+40A0KdwfRczexlZQrHjIA+D3oUB0EY9tAADNQB9AAAABWQAIAAAAAA6M++b9I0YFemmWBAWAE3glu2Ah3Ta1FBxAQEIWS0toAVzACAAAAAANXYTqPf1Y6X3Ns6YQIX0C3FKCyWUo+Kk+fNcQvc0WSoFbAAgAAAAAA+uJUw1ICYgyeygSRe206VTWVtUnhdci3iHbyP5YtEVAAM2AH0AAAAFZAAgAAAAAKl8bV1riH/uyJ+X0HHd3+18k2cJl2dQFXCdoagutFcaBXMAIAAAAABm8F2Ew9f0VOABdcF+lP0Bi+zWvEUPniWgrxPq/Sx3uwVsACAAAAAAJfFErjZ6BPhsw5LjJLqNtKDLJ4zV0eIZppQpd9b0wZoAAzcAfQAAAAVkACAAAAAAsYZD8JEP6kYsPncFnNZwJxhu4YtUTKPNcjHtv67H+rYFcwAgAAAAAI4LqZcRkvbs/2F62Flu0pixNcor4WmBD0DHGaf039wLBWwAIAAAAAD4wUR3xd9lKltcqqo8LYvdMQWzCRobkV/ppKB/yn5dUgADOAB9AAAABWQAIAAAAAC0vdAi+dmoIXvZ5LqUqvyKV9/tHqSI2SWiSJO5pTnA2wVzACAAAAAAS2qvf9fvfVUH5WtsVxjxmskpGjYTQV34LwvQQw1y9wIFbAAgAAAAAE0+FKuK7HxbypvCeEJzMTcjOWE0ScYOlTBMUNlIv55hAAM5AH0AAAAFZAAgAAAAAH31lb/srBcrOXkzddCwAnclsR5/3QijEVgECs2JjOWBBXMAIAAAAABg7+prDT73YcCvLE5QbuIrqGcjLc5pQD2Miq0d29yrxgVsACAAAAAAetRiPwDSFWBzpWSWkOKWM6fKStRJ8SyObnpc79ux8p0AAzEwAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzExAH0AAAAFZAAgAAAAAFdthRhe2Q8CvxGIhjTJZv0Lk97GkHciTPxZ/mckLoNaBXMAIAAAAAAqOxsAr23LOVB0DIHbPf9UDJJRFXY2YoKbjhRqw5psbQVsACAAAAAA0G2GD8ZQjDBntjLpW4rqwKRS6HiUjL03g1N6chANozcAAzEyAH0AAAAFZAAgAAAAAMWymwwbvIeMqmnKWWifUqoCxOsdpnonM2qdLPyjqJO/BXMAIAAAAAB6IDmmpUhBD2zpRj8/y/kmOSXcjuIU14sNh6GKSsg2uwVsACAAAAAAWMFPNOk3EMSQDS9JGPSMIQP0oNGVugxXKKUrIPPlhHgAAzEzAH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzE0AH0AAAAFZAAgAAAAAJaRYmo8zqI2BEUzdSwp4tVRpPmVWsfydkYN3UHh6TMuBXMAIAAAAAAeD6mDnQeLlbC9i0sVgE8+RH6y+e94OJQ0tJ0PvblVSgVsACAAAAAAWp4jvretbDEsqEMzP/WLTnwOiJwCtfrCiB6m8k+yEMoAAzE1AH0AAAAFZAAgAAAAAAZZ538coNPwyRjhEwr5P8Xw32oWOJF+R+nfCGgy2qO3BXMAIAAAAACOPLnJlKwGNPDBReRKnHfteq0wFb3ezhrc7BVXs8RUHwVsACAAAAAA+lGesNk3+SyB/60rSvdQ2aN2vfJPR7llJVhufGTNhHkAAzE2AH0AAAAFZAAgAAAAAFH9l9GGA1I52atJV5jNUf1lx8jBjoEoVoME97v5GFJiBXMAIAAAAAC1qH3Kd78Dr9NGbw7y9D/XYBwv5h1LLO8la5OU7g8UkQVsACAAAAAArZ6atJCYrVfHB8dSNPOFf6nnDADBMJcIEj8ljPvxHp8AAzE3AH0AAAAFZAAgAAAAADtbVEI2tdkrowEMdkacD2w0Y3T3Ofi7PH6HmA6sP0c/BXMAIAAAAADuBSROnZHA+NgUPH8d0LnWFiDsM2bY8bzjC1+elSsIygVsACAAAAAAR0G2m+uANoWknkr/NerFcG+fECVxNIs0cqbY1t/U/0MAAzE4AH0AAAAFZAAgAAAAAAh3WpeMVlikPFYj9hLj+fmIqVt6omCSF75W3TPExyWpBXMAIAAAAAAsQkRmwqeVj2gGE03orb6PtrIzDt6dDU3hgSQi8E2wKgVsACAAAAAA3GHaRE2RAcaBRd8VzmYzWeBD2Gmy91eTK1k8YdWObZcAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHBuAAIAAAAQdGYAAQAAAAFtbgAAAAAAAAAAAAFteAAAAAAAAABpQAA=", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedDoublePrecision": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedDoublePrecision", + "bsonType": "double", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberDouble": "0.0" + }, + "max": { + "$numberDouble": "200.0" + }, + "precision": { + "$numberInt": "2" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json new file mode 100644 index 0000000000..77a8f43e9c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Int-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json new file mode 100644 index 0000000000..dde5ec371b --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Int-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 200, + "encryptedInt": { + "$numberInt": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lte": { + "$numberInt": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$lt": { + "$numberInt": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + }, + "$lt": { + "$numberInt": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$numberInt": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$gte": { + "$numberInt": "0" + }, + "$lte": { + "$numberInt": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + }, + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedInt": { + "$in": [ + { + "$numberInt": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json new file mode 100644 index 0000000000..1c54c6e0f6 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Int-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json new file mode 100644 index 0000000000..265a0c6f0d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Int-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json new file mode 100644 index 0000000000..08b6d2c2a5 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Int-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json new file mode 100644 index 0000000000..9f28f768bb --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Int-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Int-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Int. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": { + "$numberInt": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedInt": { + "$gt": { + "$numberInt": "0" + } + } + }, + "update": { + "$set": { + "encryptedInt": { + "$numberInt": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedInt": { + "$gt": { + "$binary": { + "base64": "DW0FAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedInt": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json new file mode 100644 index 0000000000..01ff139a55 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Aggregate.json @@ -0,0 +1,547 @@ +{ + "description": "fle2v2-Rangev2-Long-Aggregate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Aggregate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "default", + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + } + } + ], + "cursor": {}, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "aggregate" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json new file mode 100644 index 0000000000..cc5388b1f0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Correctness.json @@ -0,0 +1,1412 @@ +{ + "description": "fle2v2-Rangev2-Long-Correctness", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Find with $gt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Find with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Find with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Find with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Find with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Find with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Insert out of range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "-1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "value must be greater than or equal to the minimum value" + } + } + ] + }, + { + "description": "Insert min and max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 200, + "encryptedLong": { + "$numberLong": "200" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $gt with no results", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [] + } + ] + }, + { + "description": "Aggregate with $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "1" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lte", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lte": { + "$numberLong": "1" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $lt below min", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$lt": { + "$numberLong": "0" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be greater than the range minimum" + } + } + ] + }, + { + "description": "Aggregate with $gt above max", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "200" + } + } + } + } + ] + }, + "object": "coll", + "expectError": { + "errorContains": "must be less than the range maximum" + } + } + ] + }, + { + "description": "Aggregate with $gt and $lt", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + }, + "$lt": { + "$numberLong": "2" + } + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with equality", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "0" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$numberLong": "1" + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with full range", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$gte": { + "$numberLong": "0" + }, + "$lte": { + "$numberLong": "200" + } + } + } + }, + { + "$sort": { + "_id": 1 + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + }, + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $in", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "encryptedLong": { + "$in": [ + { + "$numberLong": "0" + } + ] + } + } + } + ] + }, + "object": "coll", + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + ] + } + ] + }, + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gte": { + "$numberDouble": "0" + } + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json new file mode 100644 index 0000000000..0a8580110c --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Delete.json @@ -0,0 +1,483 @@ +{ + "description": "fle2v2-Rangev2-Long-Delete", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Delete.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "deleteOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": { + "deletedCount": 1 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "default", + "deletes": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "limit": 1 + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json new file mode 100644 index 0000000000..f014e1a4ac --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-FindOneAndUpdate.json @@ -0,0 +1,551 @@ +{ + "description": "fle2v2-Rangev2-Long-FindOneAndUpdate", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. FindOneAndUpdate.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + }, + "returnDocument": "Before" + }, + "object": "coll", + "expectResult": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "default", + "query": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "findAndModify" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json new file mode 100644 index 0000000000..2896df0032 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-InsertFind.json @@ -0,0 +1,538 @@ +{ + "description": "fle2v2-Rangev2-Long-InsertFind", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Insert and Find.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "25j9sQXZCihCmHKvTHgaBsAVZFcGPn7JjHdrCGlwyyw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FA74j21GUEJb1DJBOpR9nVnjaDZnd8yAQNuaW9Qi26g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kJv//KVkbrobIBf+QeWC5jxn20mx/P0R1N6aCSMgKM8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "zB+Whi9IUUGxfLEe+lGuIzLX4LFbIhaIAm5lRk65QTc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ybO1QU3CgvhO8JgRXH+HxKszWcpl5aGDYYVa75fHa1g=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "X3Y3eSAbbMg//JgiHHiFpYOpV61t8kkDexI+CQyitH4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SlNHXyqVFGDPrX/2ppwog6l4pwj3PKda2TkZbqgfSfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McjV8xwTF3xI7863DYOBdyvIv6UpzThl6v9vBRk05bI=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json new file mode 100644 index 0000000000..4f8cd1d80d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-Long-Update.json @@ -0,0 +1,557 @@ +{ + "description": "fle2v2-Rangev2-Long-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "FLE2 Range Long. Update.", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedLong": { + "$numberLong": "0" + } + } + }, + "object": "coll" + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedLong": { + "$numberLong": "1" + } + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedLong": { + "$gt": { + "$numberLong": "0" + } + } + }, + "update": { + "$set": { + "encryptedLong": { + "$numberLong": "2" + } + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "RjBYT2h3ZAoHxhf8DU6/dFbDkEBZp0IxREcsRTu2MXs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "x7GR49EN0t3WXQDihkrbonK7qNIBYC87tpL/XEUyIYc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JfYUqWF+OoGjiYkRI4L5iPlF+T1Eleul7Fki22jp4Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q1RyGfIgsaQHoZFRw+DD28V26rN5hweApPLwExncvT8=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "L2PFeKGvLS6C+DLudR6fGlBq3ERPvjWvRyNRIA2HVb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CWxaNqL3iP1yCixDkcmf9bmW3E5VeN8TJkg1jJe528s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "+vC6araOEo+fpW7PSIP40/EnzBCj1d2N10Jr3rrXJJM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6SV63Mf51Z6A6p2X3rCnJKCu6ku3Oeb45mBYbz+IoAo=", + "subType": "00" + } + } + ] + }, + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "hyDcE6QQjPrYJaIS/n7evEZFYcm31Tj89CpEYGF45cI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "F08nMDWDZc+DbWM7XCEJNNCEYyinRmrvGP7EWhmp4is=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cXH4688amcDc8kZOJq4UP8cE3R58Zl7e+Qo/1jyspps=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uURBxvTp3FBCVkd+LPqyuY7d6rMW6SGIJQEPY/wtkZI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jG3hax1L3RBp9t38vUt53FsBxgr/+Si/vVISpAylYpE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "kwtIW8MhH9Ky5xNjBx8gFA/SHh2YVphie7g5FGBzals=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "FHflwFuEMu4xX0ZApHi+pdlBH+oevAtXckCUb5Wv0xU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ty4cnzJdAlbQKnh7px3GEYjBnvO+jIOaKjoTRDtmh3M=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 0, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedLong": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "ordered": true, + "updates": [ + { + "q": { + "encryptedLong": { + "$gt": { + "$binary": { + "base64": "DXUFAAADcGF5bG9hZAAZBQAABGcABQUAAAMwAH0AAAAFZAAgAAAAALGGQ/CRD+pGLD53BZzWcCcYbuGLVEyjzXIx7b+ux/q2BXMAIAAAAACOC6mXEZL27P9hethZbtKYsTXKK+FpgQ9Axxmn9N/cCwVsACAAAAAA+MFEd8XfZSpbXKqqPC2L3TEFswkaG5Ff6aSgf8p+XVIAAzEAfQAAAAVkACAAAAAAtL3QIvnZqCF72eS6lKr8ilff7R6kiNklokiTuaU5wNsFcwAgAAAAAEtqr3/X731VB+VrbFcY8ZrJKRo2E0Fd+C8L0EMNcvcCBWwAIAAAAABNPhSriux8W8qbwnhCczE3IzlhNEnGDpUwTFDZSL+eYQADMgB9AAAABWQAIAAAAAB99ZW/7KwXKzl5M3XQsAJ3JbEef90IoxFYBArNiYzlgQVzACAAAAAAYO/qaw0+92HAryxOUG7iK6hnIy3OaUA9jIqtHdvcq8YFbAAgAAAAAHrUYj8A0hVgc6VklpDiljOnykrUSfEsjm56XO/bsfKdAAMzAH0AAAAFZAAgAAAAAOK8brUuc2onBNDRtfYMR736dHj4dQqXod8JG7tAMTsDBXMAIAAAAAAW6SrGAL6Bx0s7ZlsYULFfOAiYIGhEWu6md3r+Rk40awVsACAAAAAAIHYXP8RLcCboUmHN3+OlnEw1DxaLSnbTB9PdF228fFAAAzQAfQAAAAVkACAAAAAAV22FGF7ZDwK/EYiGNMlm/QuT3saQdyJM/Fn+ZyQug1oFcwAgAAAAACo7GwCvbcs5UHQMgds9/1QMklEVdjZigpuOFGrDmmxtBWwAIAAAAADQbYYPxlCMMGe2MulbiurApFLoeJSMvTeDU3pyEA2jNwADNQB9AAAABWQAIAAAAADFspsMG7yHjKppyllon1KqAsTrHaZ6JzNqnSz8o6iTvwVzACAAAAAAeiA5pqVIQQ9s6UY/P8v5Jjkl3I7iFNeLDYehikrINrsFbAAgAAAAAFjBTzTpNxDEkA0vSRj0jCED9KDRlboMVyilKyDz5YR4AAM2AH0AAAAFZAAgAAAAAPcLmtq+V1e+MRlZ7NHq1+mrRVBQje5zj685ZvdsfKvSBXMAIAAAAABdHz/3w2k5km97QN9m7oLFYJaVJneNlMboIlz5yUASQAVsACAAAAAAWbp8JVJnx8fEVAJFa7WMfMa7wXeP5M3C8MX20J/i9n0AAzcAfQAAAAVkACAAAAAAYfLwnoxK6XAGQrJFy8+TIJoq38ldBaO75h4zA4ZX5tQFcwAgAAAAAC2wk8UcJH5X5XGnDBYmel6srpBkzBhHtt3Jw1u5TSJ1BWwAIAAAAAA9/YU9eI3D7QbXKIw/3/gzWJ6MZrCYhG0j1wNKgRQp5wADOAB9AAAABWQAIAAAAADGvyrtKkIcaV17ynZA7b2k5Pz6OhvxdWNkDvDWJIja8wVzACAAAAAAOLypVKNxf/wR1G8OZjUUsTQzDYeNNhhITxGMSp7euS4FbAAgAAAAAA9EsxoV1B2DcQ1NJRwuxXnvVR+vkD0wbbDYEI/zFEnDAAM5AH0AAAAFZAAgAAAAAEocREw1L0g+roFUchJI2Yd0M0ME2bnErNUYnpyJP1SqBXMAIAAAAAAcE2/JK/8MoSeOchIuAkKh1X3ImoA7p8ujAZIfvIDo6QVsACAAAAAA+W0+zgLr85/PD7P9a94wk6MgNgrizx/XU9aCxAkp1IwAABJjbQAAAAAAAAAAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgABAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAASbW4AAAAAAAAAAAASbXgAyAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedLong": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedLong", + "bsonType": "long", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberInt": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberLong": "0" + }, + "max": { + "$numberLong": "200" + } + } + } + ] + } + } + }, + "$db": "default" + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json new file mode 100644 index 0000000000..03681947ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Rangev2-WrongType.json @@ -0,0 +1,204 @@ +{ + "description": "fle2v2-Rangev2-WrongType", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.0.0", + "maxServerVersion": "8.99.99", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "range", + "contention": { + "$numberLong": "0" + }, + "trimFactor": { + "$numberLong": "1" + }, + "sparsity": { + "$numberLong": "1" + }, + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + } + } + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Wrong type: Insert Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberDouble": "0" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot encrypt element" + } + } + ] + }, + { + "description": "Wrong type: Find Double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 0, + "encryptedInt": { + "$numberInt": "0" + } + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "encryptedInt": { + "$gte": { + "$numberDouble": "0" + } + } + }, + "sort": { + "_id": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "field type is not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-Update.json b/test/client-side-encryption/spec/unified/fle2v2-Update.json new file mode 100644 index 0000000000..9c39c4d83d --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-Update.json @@ -0,0 +1,633 @@ +{ + "description": "fle2v2-Update", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Update can query an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "foo": "bar" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "foo": "bar", + "__safeContent__": [ + { + "$binary": { + "base64": "ThpoKfQ8AkOzkFfNC1+9PF0pY2nIzfXvRdxQgjkNbBw=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "foo": "bar" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "Update can modify an FLE2 indexed field", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": "value123" + } + }, + "object": "coll" + }, + { + "name": "updateOne", + "arguments": { + "filter": { + "encryptedIndexed": "value123" + }, + "update": { + "$set": { + "encryptedIndexed": "value456" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "encryptedIndexed": "value456" + } + ] + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhe7/w8Ob8Unl44rGr/moScx6m5VODQnscDhF4Nkn6g=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encryptedIndexed": { + "$eq": { + "$binary": { + "base64": "DIkAAAAFZAAgAAAAAPtVteJQAlgb2YMa/+7YWH00sbQPyt7L6Rb8OwBdMmL2BXMAIAAAAAAd44hgVKnEnTFlwNVC14oyc9OZOTspeymusqkRQj57nAVsACAAAAAAaZ9s3G+4znfxStxeOZwcZy1OhzjMGc5hjmdMN+b/w6kSY20AAAAAAAAAAAAA", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encryptedIndexed": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": { + "$eq": 1 + } + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..54cc60a3b1 --- /dev/null +++ b/test/client-side-encryption/spec/unified/fle2v2-validatorAndPartialFieldExpression.json @@ -0,0 +1,304 @@ +{ + "description": "fle2v2-validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "encryptedFieldsMap": { + "default.encryptedCollection": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encryptedIndexed": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "coll", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encryptedIndexed": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/gcpKMS.json b/test/client-side-encryption/spec/unified/gcpKMS.json new file mode 100644 index 0000000000..6468b5b6ce --- /dev/null +++ b/test/client-side-encryption/spec/unified/gcpKMS.json @@ -0,0 +1,292 @@ +{ + "description": "gcpKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "gcp": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0WyktnB4dfYHo5SLZ41K4ASQrjJUaSzl5vvVH0G12G0SiQEAjlV8XPlbnHDEDFbdTO4QIe8ER2/172U1ouLazG0ysDtFFIlSvWX5ZnZUrRMmp/R2aJkzLXEt/zf8Mn4Lfm+itnjgo5R9K4pmPNvvPKNZX5C16lrPT+aA+rd+zXFSmlMg3i5jnxvTdLHhg3G7Q/Uv1ZIJskKt95bzLoe0tUVzRWMYXLIEcohnQg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1601574333107" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyAltNames": [ + "altname", + "gcp_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using GCP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_gcp": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_gcp": { + "$binary": { + "base64": "ARgj/gAAAAAAAAAAAAAAAAACwFd+Y5Ojw45GUXNvbcIpN9YkRdoHDHkR4kssdn0tIMKlDQOLFkWFY9X07IRlXsxPD8DcTiKnl6XINK28vhcGlg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/getKey.json b/test/client-side-encryption/spec/unified/getKey.json index 6a7269b2ca..2ea3fe7358 100644 --- a/test/client-side-encryption/spec/unified/getKey.json +++ b/test/client-side-encryption/spec/unified/getKey.json @@ -133,7 +133,9 @@ } } }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/getKeyByAltName.json b/test/client-side-encryption/spec/unified/getKeyByAltName.json index f94459bbd8..2505abc16e 100644 --- a/test/client-side-encryption/spec/unified/getKeyByAltName.json +++ b/test/client-side-encryption/spec/unified/getKeyByAltName.json @@ -128,7 +128,9 @@ "arguments": { "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/getMore.json b/test/client-side-encryption/spec/unified/getMore.json new file mode 100644 index 0000000000..adaa59b01e --- /dev/null +++ b/test/client-side-encryption/spec/unified/getMore.json @@ -0,0 +1,321 @@ +{ + "description": "getMore", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "getMore with encryption", + "operations": [ + { + "name": "find", + "arguments": { + "batchSize": 2, + "filter": {} + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + }, + { + "_id": 2, + "encrypted_string": "string1" + }, + { + "_id": 3, + "encrypted_string": "string2" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "batchSize": 2 + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "default", + "batchSize": 2 + }, + "commandName": "getMore" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/insert.json b/test/client-side-encryption/spec/unified/insert.json new file mode 100644 index 0000000000..23e4e6c2ae --- /dev/null +++ b/test/client-side-encryption/spec/unified/insert.json @@ -0,0 +1,421 @@ +{ + "description": "insert", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "insertOne with encryption", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "insertMany with encryption", + "operations": [ + { + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + }, + { + "_id": 2, + "encrypted_string": "string1" + } + ] + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/keyAltName.json b/test/client-side-encryption/spec/unified/keyAltName.json new file mode 100644 index 0000000000..826f43df22 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyAltName.json @@ -0,0 +1,299 @@ +{ + "description": "keyAltName", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption using key alt name", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_w_altname": "string0", + "altname": "altname" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": [ + "altname" + ] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_w_altname": { + "$$type": "binData" + }, + "altname": "altname" + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Replace with key alt name fails", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "encrypted_w_altname": "string0" + } + }, + "upsert": true + }, + "object": "coll", + "expectError": { + "errorContains": "A non-static (JSONPointer) keyId is not supported" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/keyCache.json b/test/client-side-encryption/spec/unified/keyCache.json new file mode 100644 index 0000000000..a39701e286 --- /dev/null +++ b/test/client-side-encryption/spec/unified/keyCache.json @@ -0,0 +1,198 @@ +{ + "description": "keyCache-explicit", + "schemaVersion": "1.22", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "OCTP9uKPPmvuqpHlqq83gPk4U6rUPxKVRRyVtrjFmVjdoa4Xzm1SzUbr7aIhNI42czkUBmrCtZKF31eaaJnxEBkqf0RFukA9Mo3NEHQWgAQ2cn9duOcRbaFUQo2z0/rB" + } + }, + "keyExpirationMS": 1 + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + }, + "keyAltNames": [], + "keyMaterial": { + "$binary": { + "base64": "iocBkhO3YBokiJ+FtxDTS71/qKXQ7tSWhWbcnFTXBcMjarsepvALeJ5li+SdUd9ePuatjidxAdMo7vh1V2ZESLMkQWdpPJ9PaJjA67gKQKbbbB4Ik5F2uKjULvrMBnFNVRMup4JNUwWFQJpqbfMveXnUVcD06+pUpAkml/f+DSXrV3e5rxciiNVtz03dAG8wJrsKsFXWj6vTjFhsfknyBA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "decrypt, wait, and decrypt again", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 50 + } + }, + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AWvmFs3coEwButv3DVJKmYkCJ6lUzRX9R28WNlw5uyndb+8gurA+p8q14s7GZ04K2ZvghieRlAr5UwZbow3PMq27u5EIhDDczwBFcbdP1amllw==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "a+YWzdygTAG62/cNUkqZiQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/kmipKMS.json b/test/client-side-encryption/spec/unified/kmipKMS.json new file mode 100644 index 0000000000..e19f85882b --- /dev/null +++ b/test/client-side-encryption/spec/unified/kmipKMS.json @@ -0,0 +1,415 @@ +{ + "description": "kmipKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "kmip": { + "endpoint": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string_aws": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_azure": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AZURE+AAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_gcp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "GCP+AAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_local": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "encrypted_string_kmip_delegated": { + "encrypt": { + "keyId": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "eUYDyB0HuWb+lQgUwO+6qJQyTTDTY2gp9FbemL7ZFo0pvr0x6rm6Ff9OVUTGH6HyMKipaeHdiIJU1dzsLwvqKvi7Beh+U4iaIWX/K0oEg1GOsJc0+Z/in8gNHbGUYLmycHViM3LES3kdt7FdFSUl5rEBHrM71yoNEXImz17QJWMGOuT4x6yoi2pvnaRJwfrI4DjpmnnTrDMac92jgZehbg==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "keyId": "1" + }, + "keyAltNames": [ + "altname", + "kmip_altname" + ] + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + }, + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1634220190041" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": "11" + }, + "keyAltNames": [ + "delegated" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using KMIP KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "dBHpr8aITfeBQ15grpbLpQ==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6UCKCwtKFIsL8eKObDVxvqGupJNUk7kXswHhB7G5j/C1D+6no+Asra0KgSU43bTL3ooIBLVyIzbV5CDJYqzAsa4WQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Insert a document with auto encryption using KMIP delegated KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string_kmip_delegated": "string0" + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba6" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string_kmip_delegated": { + "$binary": { + "base64": "AXQR6a/GiE33gUNeYK6Wy6YCkB+8NVfAAjIbvLqyXIg6g1a8tXrym92DPoqmxpcdQyH0vQM3aFNMz7tZwQBimKs29ztZV/LWjM633HhO5ACl9A==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localKMS.json b/test/client-side-encryption/spec/unified/localKMS.json new file mode 100644 index 0000000000..03b8486484 --- /dev/null +++ b/test/client-side-encryption/spec/unified/localKMS.json @@ -0,0 +1,261 @@ +{ + "description": "localKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "Ce9HSz/HKKGkIt4uyy+jDuKGA+rLC2cycykMo6vc8jXxqa1UVDYHWq1r+vZKbnnSRBfB981akzRKZCFpC05CTyFqDhXv6OnMjpG97OZEREGIsHEYiJkBW0jJJvfLLgeLsEpBzsro9FztGGXASxyxFRZFhXvHxyiLOKrdWfs7X1O/iK3pEoHMx6uSNSfUOgbebLfIqW7TO++iQS5g1xovXA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + } + ], + "tests": [ + { + "description": "Insert a document with auto encryption using local KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll" + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault" + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACV/+zJmpqMU47yxS/xIVAviGi7wHDuFwaULAixEAoIh0xHz73UYOM3D8D44gcJn67EROjbz4ITpYzzlCJovDL0Q==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/localSchema.json b/test/client-side-encryption/spec/unified/localSchema.json new file mode 100644 index 0000000000..685ee39d7c --- /dev/null +++ b/test/client-side-encryption/spec/unified/localSchema.json @@ -0,0 +1,337 @@ +{ + "description": "localSchema", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "test": { + "bsonType": "string" + } + }, + "bsonType": "object", + "required": [ + "test" + ] + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "database": { + "id": "encryptedDB2", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl2", + "database": "encryptedDB2", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "A local schema should override", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ] + } + ] + }, + { + "description": "A local schema with no encryption is an error", + "operations": [ + { + "object": "encryptedColl2", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "expectError": { + "isError": true, + "errorContains": "JSON schema keyword 'required' is only allowed with a remote schema" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/malformedCiphertext.json b/test/client-side-encryption/spec/unified/malformedCiphertext.json new file mode 100644 index 0000000000..550928f1e0 --- /dev/null +++ b/test/client-side-encryption/spec/unified/malformedCiphertext.json @@ -0,0 +1,241 @@ +{ + "description": "malformedCiphertext", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQ==", + "subType": "06" + } + } + }, + { + "_id": 3, + "encrypted_string": { + "$binary": { + "base64": "AQAAa2V2aW4gYWxiZXJ0c29uCg==", + "subType": "06" + } + } + } + ] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Wrong subtype", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "Empty data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 2 + } + }, + "object": "coll", + "expectError": { + "errorContains": "malformed ciphertext" + } + } + ] + }, + { + "description": "Malformed data", + "operations": [ + { + "name": "find", + "arguments": { + "filter": { + "_id": 3 + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/maxWireVersion.json b/test/client-side-encryption/spec/unified/maxWireVersion.json new file mode 100644 index 0000000000..f7a5f0b7db --- /dev/null +++ b/test/client-side-encryption/spec/unified/maxWireVersion.json @@ -0,0 +1,108 @@ +{ + "description": "maxWireVersion", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + }, + "extraOptions": { + "mongocryptdBypassSpawn": true + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "operation fails with maxWireVersion < 8", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "encrypted_string": "string0" + } + }, + "expectError": { + "errorContains": "Auto-encryption requires a minimum MongoDB version of 4.2" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/missingKey.json b/test/client-side-encryption/spec/unified/missingKey.json new file mode 100644 index 0000000000..af0fd5812a --- /dev/null +++ b/test/client-side-encryption/spec/unified/missingKey.json @@ -0,0 +1,233 @@ +{ + "description": "missingKey", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.different", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "Insert with encryption on a missing key", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "errorContains": "not all keys requested were satisfied" + } + } + ], + "outcome": [ + { + "documents": [], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "different", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json new file mode 100644 index 0000000000..4d75e4cf51 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-createDataKey.json @@ -0,0 +1,396 @@ +{ + "description": "namedKMS-createDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [] + } + ], + "tests": [ + { + "description": "create data key with named AWS KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "aws:name1", + "opts": { + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named Azure KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "azure:name1", + "opts": { + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named GCP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "gcp:name1", + "opts": { + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named KMIP KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "kmip:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "create datakey with named local KMS provider", + "operations": [ + { + "name": "createDataKey", + "object": "clientEncryption0", + "arguments": { + "kmsProvider": "local:name1" + }, + "expectResult": { + "$$type": "binData" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "insert": "datakeys", + "documents": [ + { + "_id": { + "$$type": "binData" + }, + "keyMaterial": { + "$$type": "binData" + }, + "creationDate": { + "$$type": "date" + }, + "updateDate": { + "$$type": "date" + }, + "status": { + "$$exists": true + }, + "masterKey": { + "provider": "local:name1" + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-explicit.json b/test/client-side-encryption/spec/unified/namedKMS-explicit.json new file mode 100644 index 0000000000..e28d7e8b30 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-explicit.json @@ -0,0 +1,130 @@ +{ + "description": "namedKMS-explicit", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name2" + ], + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "can explicitly encrypt with a named KMS provider", + "operations": [ + { + "name": "encrypt", + "object": "clientEncryption0", + "arguments": { + "value": "foobar", + "opts": { + "keyAltName": "local:name2", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "expectResult": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + } + ] + }, + { + "description": "can explicitly decrypt with a named KMS provider", + "operations": [ + { + "name": "decrypt", + "object": "clientEncryption0", + "arguments": { + "value": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC4yX2LTAuN253GAkEO2ZXp4GpCyM7yoVNJMQQl+6uzxMs03IprLC7DL2vr18x9LwOimjTS9YbMJhrnFkEPuNhbg==", + "subType": "06" + } + } + }, + "expectResult": "foobar" + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json new file mode 100644 index 0000000000..b3b9bd2477 --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS-rewrapManyDataKey.json @@ -0,0 +1,1385 @@ +{ + "description": "namedKMS-rewrapManyDataKey", + "schemaVersion": "1.18", + "runOnRequirements": [ + { + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name1": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + }, + "azure:name1": { + "tenantId": { + "$$placeholder": 1 + }, + "clientId": { + "$$placeholder": 1 + }, + "clientSecret": { + "$$placeholder": 1 + } + }, + "gcp:name1": { + "email": { + "$$placeholder": 1 + }, + "privateKey": { + "$$placeholder": 1 + } + }, + "kmip:name1": { + "endpoint": { + "$$placeholder": 1 + } + }, + "local:name1": { + "key": { + "$$placeholder": 1 + } + }, + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + }, + "aws:name2": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "keyvault" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "datakeys" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "YXdzYXdzYXdzYXdzYXdzYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "aws:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gFXJqbF0Fy872MD7xl56D/2AAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDO7HPisPUlGzaio9vgIBEIB7/Qow46PMh/8JbEUbdXgTGhLfXPE+KIVW7T8s6YEMlGiRvMu7TV0QCIUJlSHPKZxzlJ2iwuz5yXeOag+EdY+eIQ0RKrsJ3b8UTisZYzGjfzZnxUKLzLoeXremtRCm3x47wCuHKd1dhh6FBbYt5TL2tDaj+vL2GBrKat2L", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + } + }, + { + "_id": { + "$binary": { + "base64": "YXp1cmVhenVyZWF6dXJlYQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "azure:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "Z2NwZ2NwZ2NwZ2NwZ2NwZw==", + "subType": "04" + } + }, + "keyAltNames": [ + "gcp:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CiQAIgLj0USbQtof/pYRLQO96yg/JEtZbD1UxKueaC37yzT5tTkSiQEAhClWB5ZCSgzHgxv8raWjNB4r7e8ePGdsmSuYTYmLC5oHHS/BdQisConzNKFaobEQZHamTCjyhy5NotKF8MWoo+dyfQApwI29+vAGyrUIQCXzKwRnNdNQ+lb3vJtS5bqvLTvSxKHpVca2kqyC9nhonV+u4qru5Q2bAqUgVFc8fL4pBuvlowZFTQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + }, + { + "_id": { + "$binary": { + "base64": "a21pcGttaXBrbWlwa21pcA==", + "subType": "04" + } + }, + "keyAltNames": [ + "kmip:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip:name1", + "keyId": "1" + } + }, + { + "_id": { + "$binary": { + "base64": "bG9jYWxrZXlsb2NhbGtleQ==", + "subType": "04" + } + }, + "keyAltNames": [ + "local:name1_key" + ], + "keyMaterial": { + "$binary": { + "base64": "ABKBldDEoDW323yejOnIRk6YQmlD9d3eQthd16scKL75nz2LjNL9fgPDZWrFFOlqlhMCFaSrNJfGrFUjYk5JFDO7soG5Syb50k1niJoKg4ilsj0L4mpimFUtTpOr2nzZOeQtvAksEXc7gsFgq8gV7t/U3lsaXPY7I0t42DfSE8EGlPdxRjFdHnxh+OR8h7U9b8Qs5K5UuhgyeyxaBZ1Hgw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local:name1" + } + } + ] + } + ], + "tests": [ + { + "description": "rewrap to aws:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name1", + "masterKey": { + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name1", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to azure:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "opts": { + "provider": "azure:name1", + "masterKey": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "azure:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure:name1", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to gcp:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "opts": { + "provider": "gcp:name1", + "masterKey": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "gcp:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp:name1", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to kmip:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "opts": { + "provider": "kmip:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip:name1", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap to local:name1", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "opts": { + "provider": "local:name1" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from local:name1 to local:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "opts": { + "provider": "local:name2" + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "local:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local:name2" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap from aws:name1 to aws:name2", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "opts": { + "provider": "aws:name2", + "masterKey": { + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$eq": "aws:name1_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws:name2", + "key": "arn:aws:kms:us-east-1:857654397073:key/0f8468f0-f135-4226-aa0b-bd05c4c30df5", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/namedKMS.json b/test/client-side-encryption/spec/unified/namedKMS.json new file mode 100644 index 0000000000..5e203865fd --- /dev/null +++ b/test/client-side-encryption/spec/unified/namedKMS.json @@ -0,0 +1,241 @@ +{ + "description": "namedKMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local:name2": { + "key": "local+name2+YUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "DX3iUuOlBsx6wBX9UZ3v/qXk1HNeBace2J+h/JwsDdF/vmSXLZ1l1VmZYIcpVFy6ODhdbzLjd4pNgg9wcm4etYig62KNkmtZ0/s1tAL5VsuW/s7/3PYnYGznZTFhLjIVcOH/RNoRj2eQb/sRTyivL85wePEpAU/JzuBj6qO9Y5txQgs1k0J3aNy10R9aQ8kC1NuSSpLAIXwE6DlNDDJXhw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local:name2" + } + } + ] + } + ], + "tests": [ + { + "description": "Automatically encrypt and decrypt with a named KMS provider", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "object": "coll", + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "local+name2+AAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AZaHGpfp2pntvgAAAAAAAAAC07sFvTQ0I4O2U49hpr4HezaK44Ivluzv5ntQBTYHDlAJMLyRMyB6Dl+UGHBgqhHe/Xw+pcT9XdiUoOJYAx9g+w==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/noSchema.json b/test/client-side-encryption/spec/unified/noSchema.json new file mode 100644 index 0000000000..c18afa4ed4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/noSchema.json @@ -0,0 +1,115 @@ +{ + "description": "noSchema", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "unencrypted" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "unencrypted", + "documents": [] + } + ], + "tests": [ + { + "description": "Insert on an unencrypted collection", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1 + } + }, + "object": "coll" + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "unencrypted", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "unencrypted" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "unencrypted", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/removeKeyAltName.json b/test/client-side-encryption/spec/unified/removeKeyAltName.json index bef13c87de..1b7077077a 100644 --- a/test/client-side-encryption/spec/unified/removeKeyAltName.json +++ b/test/client-side-encryption/spec/unified/removeKeyAltName.json @@ -102,7 +102,9 @@ }, "keyAltName": "does_not_exist" }, - "expectResult": null + "expectResult": { + "$$unsetOrMatches": null + } } ], "expectEvents": [ diff --git a/test/client-side-encryption/spec/unified/replaceOne.json b/test/client-side-encryption/spec/unified/replaceOne.json new file mode 100644 index 0000000000..a093e238ba --- /dev/null +++ b/test/client-side-encryption/spec/unified/replaceOne.json @@ -0,0 +1,316 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "replaceOne with encryption", + "operations": [ + { + "name": "replaceOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "replacement": { + "encrypted_string": "string1", + "random": "abc" + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json index 7e3abb1274..8803491dbe 100644 --- a/test/client-side-encryption/spec/unified/rewrapManyDataKey.json +++ b/test/client-side-encryption/spec/unified/rewrapManyDataKey.json @@ -1,5 +1,5 @@ { - "description": "rewrapManyDataKey-kms_providers", + "description": "rewrapManyDataKey", "schemaVersion": "1.8", "runOnRequirements": [ { @@ -128,7 +128,7 @@ ], "keyMaterial": { "$binary": { - "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEGkNTybTc7Eyif0f+qqE0lAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDB2j78AeuIQxcRh8cQIBEIB7vj9buHEaT7XHFIsKBJiyzZRmNnjvqMK5LSdzonKdx97jlqauvPvTDXSsdQDcspUs5oLrGmAXpbFResscxmbwZoKgUtWiuIOpeAcYuszCiMKt15s1WIMLDXUhYtfCmhRhekvgHnRAaK4HJMlGE+lKJXYI84E0b86Cd/g+", + "base64": "pr01l7qDygUkFE/0peFwpnNlv3iIy8zrQK38Q9i12UCN2jwZHDmfyx8wokiIKMb9kAleeY+vnt3Cf1MKu9kcDmI+KxbNDd+V3ytAAGzOVLDJr77CiWjF9f8ntkXRHrAY9WwnVDANYkDwXlyU0Y2GQFTiW65jiQhUtYLYH63Tk48SsJuQvnWw1Q+PzY8ga+QeVec8wbcThwtm+r2IHsCFnc72Gv73qq7weISw+O4mN08z3wOp5FOS2ZM3MK7tBGmPdBcktW7F8ODGsOQ1FU53OrWUnyX2aTi2ftFFFMWVHqQo7EYuBZHru8RRODNKMyQk0BFfKovAeTAVRv9WH9QU7g==", "subType": "00" } }, @@ -196,7 +196,7 @@ ], "keyMaterial": { "$binary": { - "base64": "VoI9J8HusQ3u2gT9i8Awgg/6W4/igvLwRzn3SRDGx0Dl/1ayDMubphOw0ONPVKfuvS6HL3e4gAoCJ/uEz2KLFTVsEqYCpMhfAhgXxm8Ena8vDcOkCzFX+euvN/N2ES3wpzAD18b3qIH0MbBwKJP82d5GQ4pVfGnPW8Ujp9aO1qC/s0EqNqYyzJ1SyzhV9lAjHHGIENYJx+bBrekg2EeZBA==", + "base64": "CklVctHzke4mcytd0TxGqvepkdkQN8NUF4+jV7aZQITAKdz6WjdDpq3lMt9nSzWGG2vAEfvRb3mFEVjV57qqGqxjq2751gmiMRHXz0btStbIK3mQ5xbY9kdye4tsixlCryEwQONr96gwlwKKI9Nubl9/8+uRF6tgYjje7Q7OjauEf1SrJwKcoQ3WwnjZmEqAug0kImCpJ/irhdqPzivRiA==", "subType": "00" } }, @@ -246,6 +246,36 @@ "masterKey": { "provider": "local" } + }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "keyAltNames": [ + "kmip_delegated_key" + ], + "keyMaterial": { + "$binary": { + "base64": "5TLMFWlguBWe5GUESTvOVtkdBsCrynhnV72XRyZ66/nk+EP9/1oEp1t1sg0+vwCTqULHjBiUE6DRx2mYD/Eup1+u2Jgz9/+1sV1drXeOPALNPkSgiZiDbIb67zRi+wTABEcKcegJH+FhmSGxwUoQAiHCsCbcvia5P8tN1lt98YQ=", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } } ] } @@ -317,11 +347,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -437,6 +470,34 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/061334ae-07a8-4ceb-a813-8135540e837d", + "region": "us-east-1" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -499,11 +560,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -619,6 +683,34 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "azure", + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -683,11 +775,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -809,6 +904,36 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "gcp", + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -869,11 +994,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1020,6 +1148,257 @@ "upsert": { "$$unsetOrMatches": false } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + ] + }, + { + "description": "rewrap with new KMIP delegated KMS provider", + "operations": [ + { + "name": "rewrapManyDataKey", + "object": "clientEncryption0", + "arguments": { + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "opts": { + "provider": "kmip", + "masterKey": { + "delegated": true + } + } + }, + "expectResult": { + "bulkWriteResult": { + "insertedCount": 0, + "matchedCount": 5, + "modifiedCount": 5, + "deletedCount": 0, + "upsertedCount": 0, + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "find": "datakeys", + "filter": { + "keyAltNames": { + "$ne": "kmip_delegated_key" + } + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "command": { + "update": "datakeys", + "ordered": true, + "updates": [ + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "kmip", + "delegated": true, + "keyId": { + "$$type": "string" + } + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } } ], "writeConcern": { @@ -1051,11 +1430,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 4, - "modifiedCount": 4, + "matchedCount": 5, + "modifiedCount": 5, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } } @@ -1165,6 +1547,32 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "provider": "local" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { @@ -1214,11 +1622,14 @@ "expectResult": { "bulkWriteResult": { "insertedCount": 0, - "matchedCount": 5, - "modifiedCount": 5, + "matchedCount": 6, + "modifiedCount": 6, "deletedCount": 0, "upsertedCount": 0, - "upsertedIds": {} + "upsertedIds": {}, + "insertedIds": { + "$$unsetOrMatches": {} + } } } }, @@ -1276,6 +1687,16 @@ "keyName": "key-name-csfle" } }, + { + "_id": { + "$uuid": "7411e9af-c688-4df7-8143-5e60ae96cba5" + }, + "masterKey": { + "provider": "kmip", + "keyId": "11", + "delegated": true + } + }, { "_id": { "$binary": { @@ -1429,6 +1850,32 @@ "$$unsetOrMatches": false } }, + { + "q": { + "_id": { + "$$type": "binData" + } + }, + "u": { + "$set": { + "masterKey": { + "$$type": "object" + }, + "keyMaterial": { + "$$type": "binData" + } + }, + "$currentDate": { + "updateDate": true + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, { "q": { "_id": { diff --git a/test/client-side-encryption/spec/unified/timeoutMS.json b/test/client-side-encryption/spec/unified/timeoutMS.json new file mode 100644 index 0000000000..98dc50e98a --- /dev/null +++ b/test/client-side-encryption/spec/unified/timeoutMS.json @@ -0,0 +1,270 @@ +{ + "description": "timeoutMS", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "timeoutMS": 500 + } + } + }, + { + "client": { + "id": "client1", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "cse-timeouts-db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "cse-timeouts-coll" + } + } + ], + "initialData": [ + { + "databaseName": "cse-timeouts-db", + "collectionName": "cse-timeouts-coll", + "documents": [], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "timeoutMS applied to listCollections to get collection schema", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "cse-timeouts-coll" + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + }, + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "remaining timeoutMS applied to find to get keyvault data", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections", + "find" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0", + "random": "abc" + } + }, + "object": "coll", + "expectError": { + "isTimeoutError": true + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/types.json b/test/client-side-encryption/spec/unified/types.json new file mode 100644 index 0000000000..3bb49f2a64 --- /dev/null +++ b/test/client-side-encryption/spec/unified/types.json @@ -0,0 +1,2262 @@ +{ + "description": "types", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_objectId": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "objectId", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll0", + "database": "db0", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_symbol": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "symbol", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db1", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll1", + "database": "db1", + "collectionName": "default" + } + }, + { + "client": { + "id": "client2", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_int": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db2", + "client": "client2", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll2", + "database": "db2", + "collectionName": "default" + } + }, + { + "client": { + "id": "client3", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_double": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "double", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db3", + "client": "client3", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll3", + "database": "db3", + "collectionName": "default" + } + }, + { + "client": { + "id": "client4", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_decimal": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "decimal", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db4", + "client": "client4", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll4", + "database": "db4", + "collectionName": "default" + } + }, + { + "client": { + "id": "client5", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_binData": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "binData", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db5", + "client": "client5", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll5", + "database": "db5", + "collectionName": "default" + } + }, + { + "client": { + "id": "client6", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascript": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascript", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db6", + "client": "client6", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll6", + "database": "db6", + "collectionName": "default" + } + }, + { + "client": { + "id": "client7", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_javascriptWithScope": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "javascriptWithScope", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db7", + "client": "client7", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll7", + "database": "db7", + "collectionName": "default" + } + }, + { + "client": { + "id": "client8", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_object": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "object", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db8", + "client": "client8", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll8", + "database": "db8", + "collectionName": "default" + } + }, + { + "client": { + "id": "client9", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_timestamp": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "timestamp", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db9", + "client": "client9", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll9", + "database": "db9", + "collectionName": "default" + } + }, + { + "client": { + "id": "client10", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_regex": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "regex", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db10", + "client": "client10", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll10", + "database": "db10", + "collectionName": "default" + } + }, + { + "client": { + "id": "client11", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_date": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "date", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db11", + "client": "client11", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll11", + "database": "db11", + "collectionName": "default" + } + }, + { + "client": { + "id": "client12", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_minKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "minKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db12", + "client": "client12", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll12", + "database": "db12", + "collectionName": "default" + } + }, + { + "client": { + "id": "client13", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_maxKey": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "maxKey", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db13", + "client": "client13", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll13", + "database": "db13", + "collectionName": "default" + } + }, + { + "client": { + "id": "client14", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_undefined": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "undefined", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db14", + "client": "client14", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll14", + "database": "db14", + "collectionName": "default" + } + }, + { + "client": { + "id": "client15", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_array": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db15", + "client": "client15", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll15", + "database": "db15", + "collectionName": "default" + } + }, + { + "client": { + "id": "client16", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_bool": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "bool", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db16", + "client": "client16", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll16", + "database": "db16", + "collectionName": "default" + } + }, + { + "client": { + "id": "client17", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "schemaMap": { + "default.default": { + "properties": { + "encrypted_null": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "null", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db17", + "client": "client17", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll17", + "database": "db17", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "type=objectId", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + }, + "object": "coll0" + }, + { + "name": "find", + "object": "coll0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_objectId": { + "$oid": "AAAAAAAAAAAAAAAAAAAAAAAA" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_objectId": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAHmkTPqvzfHMWpvS1mEsrjOxVQ2dyihEgIFWD5E0eNEsiMBQsC0GuvjdqYRL5DHLFI1vKuGek7EYYp0Qyii/tHqA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=symbol", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + }, + "object": "coll1" + }, + { + "name": "find", + "object": "coll1", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_symbol": { + "$symbol": "test" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_symbol": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAOOmvDmWjcuKsSCO7U/7t9HJ8eI73B6wduyMbdkvn7n7V4uTJes/j+BTtneSdyG2JHKHGkevWAJSIU2XoO66BSXw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=int", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + }, + "object": "coll2" + }, + { + "name": "find", + "object": "coll2", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_int": { + "$numberInt": "123" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client2", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_int": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAQPNXJVXMEjGZnftMuf2INKufXCtQIRHdw5wTgn6QYt3ejcoAXyiwI4XIUizkpsob494qpt2in4tWeiO7b9zkA8Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=double", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_double": { + "$numberDouble": "1.23" + } + } + }, + "object": "coll3", + "expectError": { + "errorContains": "element of type: double" + } + } + ] + }, + { + "description": "type=decimal", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_decimal": { + "$numberDecimal": "1.23" + } + } + }, + "object": "coll4", + "expectError": { + "errorContains": "element of type: decimal" + } + } + ] + }, + { + "description": "type=binData", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + }, + "object": "coll5" + }, + { + "name": "find", + "object": "coll5", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AAAA", + "subType": "00" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client5", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_binData": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAFB/KHZQHaHHo8fctcl7v6kR+sLkJoTRx2cPSSck9ya+nbGROSeFhdhDRHaCzhV78fDEqnMDSVPNi+ZkbaIh46GQ==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascript", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + }, + "object": "coll6" + }, + { + "name": "find", + "object": "coll6", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_javascript": { + "$code": "var x = 1;" + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client6", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_javascript": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAANrvMgJkTKWGMc9wt3E2RBR2Hu5gL9p+vIIdHe9FcOm99t1W480/oX1Gnd87ON3B399DuFaxi/aaIiQSo7gTX6Lw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=javascriptWithScope", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_javascriptWithScope": { + "$code": "var x = 1;", + "$scope": {} + } + } + }, + "object": "coll7", + "expectError": { + "errorContains": "element of type: javascriptWithScope" + } + } + ] + }, + { + "description": "type=object", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_object": {} + } + }, + "object": "coll8", + "expectError": { + "errorContains": "element of type: object" + } + } + ] + }, + { + "description": "type=timestamp", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + }, + "object": "coll9" + }, + { + "name": "find", + "object": "coll9", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_timestamp": { + "$timestamp": { + "t": 123, + "i": 456 + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client9", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_timestamp": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAARJHaM4Gq3MpDTdBasBsEolQaOmxJQU1wsZVaSFAOLpEh1QihDglXI95xemePFMKhg+KNpFg7lw1ChCs2Wn/c26Q==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=regex", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + }, + "object": "coll10" + }, + { + "name": "find", + "object": "coll10", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_regex": { + "$regularExpression": { + "pattern": "test", + "options": "" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client10", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_regex": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAALVnxM4UqGhqf5eXw6nsS08am3YJrTf1EvjKitT8tyyMAbHsICIU3GUjuC7EBofCHbusvgo7pDyaClGostFz44nA==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=date", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + }, + "object": "coll11" + }, + { + "name": "find", + "object": "coll11", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_date": { + "$date": { + "$numberLong": "123" + } + } + } + ] + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client11", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_date": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAAJ5sN7u6l97+DswfKTqZAijSTSOo5htinGKQKUD7pHNJYlLXGOkB4glrCu7ibu0g3344RHQ5yUp4YxMEa8GD+Snw==", + "subType": "06" + } + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "type=minKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_minKey": { + "$minKey": 1 + } + } + }, + "object": "coll12", + "expectError": { + "errorContains": "Cannot encrypt element of type: minKey" + } + } + ] + }, + { + "description": "type=maxKey", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_maxKey": { + "$maxKey": 1 + } + } + }, + "object": "coll13", + "expectError": { + "errorContains": "Cannot encrypt element of type: maxKey" + } + } + ] + }, + { + "description": "type=undefined", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_undefined": { + "$undefined": true + } + } + }, + "object": "coll14", + "expectError": { + "errorContains": "Cannot encrypt element of type: undefined" + } + } + ] + }, + { + "description": "type=array", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_array": [] + } + }, + "object": "coll15", + "expectError": { + "errorContains": "element of type: array" + } + } + ] + }, + { + "description": "type=bool", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_bool": true + } + }, + "object": "coll16", + "expectError": { + "errorContains": "element of type: bool" + } + } + ] + }, + { + "description": "type=null", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_null": true + } + }, + "object": "coll17", + "expectError": { + "errorContains": "Cannot encrypt element of type: null" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/unsupportedCommand.json b/test/client-side-encryption/spec/unified/unsupportedCommand.json new file mode 100644 index 0000000000..a91390324a --- /dev/null +++ b/test/client-side-encryption/spec/unified/unsupportedCommand.json @@ -0,0 +1,200 @@ +{ + "description": "unsupportedCommand", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "x": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "x": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "mapReduce deterministic encryption (unsupported)", + "operations": [ + { + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "object": "coll", + "expectError": { + "errorContains": "command not supported for auto encryption: mapreduce" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateMany.json b/test/client-side-encryption/spec/unified/updateMany.json new file mode 100644 index 0000000000..cae4c0eaf4 --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateMany.json @@ -0,0 +1,376 @@ +{ + "description": "updateMany", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateMany with deterministic encryption", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "encrypted_string": { + "$in": [ + "string0", + "string1" + ] + } + }, + "update": { + "$set": { + "encrypted_string": "string2", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + }, + { + "_id": 2, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$in": [ + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + }, + { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + } + ] + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACQ76HWOut3DZtQuV90hp1aaCpZn95vZIaWmn+wrBehcEtcFwyJlBdlyzDzZTWPZCPgiFq72Wvh6Y7VbpU9NAp3A==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": true, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateMany fails when filtering on a random field", + "operations": [ + { + "name": "updateMany", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/updateOne.json b/test/client-side-encryption/spec/unified/updateOne.json new file mode 100644 index 0000000000..6c8fdcbb6e --- /dev/null +++ b/test/client-side-encryption/spec/unified/updateOne.json @@ -0,0 +1,538 @@ +{ + "description": "updateOne", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + }, + { + "client": { + "id": "client_unencrypted" + } + }, + { + "database": { + "id": "db_unencrypted", + "client": "client_unencrypted", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll_unencrypted", + "database": "db_unencrypted", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "createOptions": { + "validator": { + "$jsonSchema": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } + }, + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "updateOne with deterministic encryption", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "encrypted_string": "string0" + }, + "update": { + "$set": { + "encrypted_string": "string1", + "random": "abc" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "name": "find", + "object": "coll_unencrypted", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": { + "encrypted_string": { + "$eq": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + }, + "u": { + "$set": { + "encrypted_string": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACDdw4KFz3ZLquhsbt7RmDjD0N67n0uSXx7IGnQNCLeIKvot6s/ouI21Eo84IOtb6lhwUNPlSEBNY0/hbszWAKJg==", + "subType": "06" + } + }, + "random": { + "$$type": "binData" + } + } + }, + "multi": false, + "upsert": false + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "updateOne fails when filtering on a random field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": { + "random": "abc" + }, + "update": { + "$set": { + "encrypted_string": "string1" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "Cannot query on fields encrypted with the randomized encryption" + } + } + ] + }, + { + "description": "$unset works with an encrypted field", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$unset": { + "encrypted_string": "" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1 + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$unset": { + "encrypted_string": "" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename works if target value has same encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + } + }, + "object": "coll", + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "documents": [ + { + "_id": 1, + "encrypted_string_equivalent": { + "$binary": { + "base64": "AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==", + "subType": "06" + } + } + } + ], + "collectionName": "default", + "databaseName": "default" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "default", + "updates": [ + { + "q": {}, + "u": { + "$rename": { + "encrypted_string": "encrypted_string_equivalent" + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "$rename fails if target value has different encryption options", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$rename": { + "encrypted_string": "random" + } + } + }, + "object": "coll", + "expectError": { + "errorContains": "$rename between two encrypted fields must have the same metadata or both be unencrypted" + } + } + ] + }, + { + "description": "an invalid update (no $ operators) is validated and errors", + "operations": [ + { + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "encrypted_string": "random" + } + }, + "object": "coll", + "expectError": { + "errorContains": "" + } + } + ] + } + ] +} diff --git a/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json new file mode 100644 index 0000000000..c46a193273 --- /dev/null +++ b/test/client-side-encryption/spec/unified/validatorAndPartialFieldExpression.json @@ -0,0 +1,323 @@ +{ + "description": "validatorAndPartialFieldExpression", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "6.0.0", + "csfle": { + "minLibmongocryptVersion": "1.15.1" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "schemaMap": { + "default.encryptedCollection": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "create with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection" + } + } + ] + }, + { + "description": "create with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "collMod with a validator on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "unencrypted_string": "foo" + } + } + } + } + ] + }, + { + "description": "collMod with a validator on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "collMod", + "command": { + "collMod": "encryptedCollection", + "validator": { + "encrypted_string": "foo" + } + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an unencrypted field is OK", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "unencrypted_string": "foo" + } + } + ] + } + } + }, + { + "name": "assertIndexExists", + "object": "testRunner", + "arguments": { + "databaseName": "default", + "collectionName": "encryptedCollection", + "indexName": "name" + } + } + ] + }, + { + "description": "createIndexes with a partialFilterExpression on an encrypted field is an error", + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "encryptedCollection" + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "createIndexes", + "command": { + "createIndexes": "encryptedCollection", + "indexes": [ + { + "name": "name", + "key": { + "name": 1 + }, + "partialFilterExpression": { + "encrypted_string": "foo" + } + } + ] + } + }, + "expectError": { + "errorContains": "Comparison to encrypted fields not supported" + } + } + ] + } + ] +} diff --git a/test/collection_management/clustered-indexes.json b/test/collection_management/clustered-indexes.json index 739d0fd8b6..9db5ff06d7 100644 --- a/test/collection_management/clustered-indexes.json +++ b/test/collection_management/clustered-indexes.json @@ -10,14 +10,17 @@ "createEntities": [ { "client": { - "id": "client0" + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] } }, { "database": { "id": "database0", "client": "client0", - "databaseName": "ts-tests" + "databaseName": "ci-tests" } }, { @@ -31,7 +34,7 @@ "initialData": [ { "collectionName": "test", - "databaseName": "ts-tests", + "databaseName": "ci-tests", "documents": [] } ], @@ -64,10 +67,40 @@ "name": "assertCollectionExists", "object": "testRunner", "arguments": { - "databaseName": "ts-tests", + "databaseName": "ci-tests", "collectionName": "test" } } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -125,6 +158,49 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": { + "$eq": "test" + } + } + }, + "databaseName": "ci-tests" + } + } + ] + } ] }, { @@ -171,6 +247,44 @@ } ] } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "clusteredIndex": { + "key": { + "_id": 1 + }, + "unique": true, + "name": "test index" + } + }, + "databaseName": "ci-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "test" + }, + "databaseName": "ci-tests" + } + } + ] + } ] } ] diff --git a/test/collection_management/modifyCollection-pre_and_post_images.json b/test/collection_management/modifyCollection-pre_and_post_images.json deleted file mode 100644 index 8026faeb17..0000000000 --- a/test/collection_management/modifyCollection-pre_and_post_images.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "description": "modifyCollection-pre_and_post_images", - "schemaVersion": "1.4", - "runOnRequirements": [ - { - "minServerVersion": "6.0", - "serverless": "forbid" - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "papi-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "test" - } - } - ], - "tests": [ - { - "description": "modifyCollection to changeStreamPreAndPostImages enabled", - "operations": [ - { - "name": "dropCollection", - "object": "database0", - "arguments": { - "collection": "test" - } - }, - { - "name": "createCollection", - "object": "database0", - "arguments": { - "collection": "test", - "changeStreamPreAndPostImages": { - "enabled": false - } - } - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "databaseName": "papi-tests", - "collectionName": "test" - } - }, - { - "name": "modifyCollection", - "object": "database0", - "arguments": { - "collection": "test", - "changeStreamPreAndPostImages": { - "enabled": true - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "drop": "test" - }, - "databaseName": "papi-tests" - } - }, - { - "commandStartedEvent": { - "command": { - "create": "test", - "changeStreamPreAndPostImages": { - "enabled": false - } - } - } - }, - { - "commandStartedEvent": { - "command": { - "collMod": "test", - "changeStreamPreAndPostImages": { - "enabled": true - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/collection_management/timeseries-collection.json b/test/collection_management/timeseries-collection.json index b5638fd36e..2ee52eac41 100644 --- a/test/collection_management/timeseries-collection.json +++ b/test/collection_management/timeseries-collection.json @@ -250,6 +250,71 @@ ] } ] + }, + { + "description": "createCollection with bucketing options", + "runOnRequirements": [ + { + "minServerVersion": "6.3" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "database0", + "arguments": { + "collection": "test" + } + }, + { + "name": "createCollection", + "object": "database0", + "arguments": { + "collection": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + } + }, + { + "name": "assertCollectionExists", + "object": "testRunner", + "arguments": { + "databaseName": "ts-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test" + }, + "databaseName": "ts-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "timeseries": { + "timeField": "time", + "bucketMaxSpanSeconds": 3600, + "bucketRoundingSeconds": 3600 + } + }, + "databaseName": "ts-tests" + } + } + ] + } + ] } ] } diff --git a/test/command_logging/command.json b/test/command_logging/command.json new file mode 100644 index 0000000000..d2970df692 --- /dev/null +++ b/test/command_logging/command.json @@ -0,0 +1,215 @@ +{ + "description": "command-logging", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ping": 1, + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "databaseName": "logging-tests", + "commandName": "ping", + "reply": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "command": { + "$$type": "string" + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "databaseName": "logging-tests", + "commandName": "find", + "failure": { + "$$exists": true + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/driver-connection-id.json b/test/command_logging/driver-connection-id.json new file mode 100644 index 0000000000..40db98d6fa --- /dev/null +++ b/test/command_logging/driver-connection-id.json @@ -0,0 +1,146 @@ +{ + "description": "driver-connection-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "A failed command", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-handshake-messages.json b/test/command_logging/no-handshake-messages.json new file mode 100644 index 0000000000..a61e208798 --- /dev/null +++ b/test/command_logging/no-handshake-messages.json @@ -0,0 +1,94 @@ +{ + "description": "no-handshake-command-logs", + "schemaVersion": "1.13", + "tests": [ + { + "description": "Handshake commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "connectionCreatedEvent", + "connectionReadyEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionCreatedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/no-heartbeat-messages.json b/test/command_logging/no-heartbeat-messages.json new file mode 100644 index 0000000000..525be9171d --- /dev/null +++ b/test/command_logging/no-heartbeat-messages.json @@ -0,0 +1,91 @@ +{ + "description": "no-heartbeat-command-logs", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "tests": [ + { + "description": "Heartbeat commands should not generate log messages", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": {} + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "ping" + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/operation-id.json b/test/command_logging/operation-id.json new file mode 100644 index 0000000000..b1a3cec3d9 --- /dev/null +++ b/test/command_logging/operation-id.json @@ -0,0 +1,198 @@ +{ + "description": "operation-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "Successful bulk write command log messages include operationIds", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + }, + { + "deleteOne": { + "filter": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "delete", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failed bulk write command log message includes operationId", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "x": 1 + }, + "update": [ + { + "$invalidOperator": true + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "update", + "operationId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/redacted-commands.json b/test/command_logging/redacted-commands.json new file mode 100644 index 0000000000..43b9ff74f2 --- /dev/null +++ b/test/command_logging/redacted-commands.json @@ -0,0 +1,1438 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-redaction-tests" + } + } + ], + "tests": [ + { + "description": "authenticate command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to authenticate is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "authenticate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "authenticate", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "authenticate", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslStart command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslStart is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslStart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "ZmFrZXNhc2xwYXlsb2Fk", + "mechanism": "MONGODB-X509" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslStart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslStart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "saslContinue command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to saslContinue is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "ZmFrZXNhc2xwYXlsb2Fk" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "saslContinue", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "saslContinue", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "getnonce command and server reply are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "getnonce", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "network error in response to getnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "getnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "getnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "createUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to createUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "createUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "createUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "updateUser command and resulting server-generated error are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to updateUser is not redacted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "updateUser" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": "pwd", + "roles": [] + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "updateUser", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "updateUser", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbgetnonce is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbgetnonce" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbsaslstart", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydbsaslstart", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydbsaslstart is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydbsaslstart" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydbgetnonce", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydbgetnonce", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "copydb command and resulting server-generated error are redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": true, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "network error in response to copydb is not redacted", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "copydb" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "copydb", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "failureIsRedacted": false, + "data": { + "message": "Command failed", + "commandName": "copydb", + "failure": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate command and server reply are redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate command and server reply are redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": {} + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": {} + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate command and server reply are not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "hello", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "hello": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "hello", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "isWritablePrimary": true + } + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate command and server reply are not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "ismaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ismaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "ismaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-redaction-tests", + "commandName": "isMaster", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "isMaster": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "isMaster", + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1, + "ismaster": true + } + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/server-connection-id.json b/test/command_logging/server-connection-id.json new file mode 100644 index 0000000000..abbbbc7442 --- /dev/null +++ b/test/command_logging/server-connection-id.json @@ -0,0 +1,131 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serverConnectionId": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/service-id.json b/test/command_logging/service-id.json new file mode 100644 index 0000000000..ea39d61231 --- /dev/null +++ b/test/command_logging/service-id.json @@ -0,0 +1,207 @@ +{ + "description": "service-id", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection" + } + } + ], + "initialData": [ + { + "databaseName": "logging-server-connection-id-tests", + "collectionName": "logging-tests-collection", + "documents": [] + } + ], + "tests": [ + { + "description": "command log messages include serviceId when in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$type": "string" + } + } + } + ] + } + ] + }, + { + "description": "command log messages omit serviceId when not in LB mode", + "runOnRequirements": [ + { + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command failed", + "commandName": "find", + "serviceId": { + "$$exists": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_logging/unacknowledged-write.json b/test/command_logging/unacknowledged-write.json new file mode 100644 index 0000000000..0d33c020d5 --- /dev/null +++ b/test/command_logging/unacknowledged-write.json @@ -0,0 +1,151 @@ +{ + "description": "unacknowledged-write", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeLogMessages": { + "command": "debug" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "logging-tests-collection", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "logging-tests-collection", + "databaseName": "logging-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ], + "tests": [ + { + "description": "An unacknowledged write generates a succeeded log message with ok: 1 reply", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "messages": [ + { + "level": "debug", + "component": "command", + "data": { + "message": "Command started", + "databaseName": "logging-tests", + "commandName": "insert", + "command": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "insert": "logging-tests-collection", + "$db": "logging-tests" + } + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "command", + "data": { + "message": "Command succeeded", + "commandName": "insert", + "reply": { + "$$matchAsDocument": { + "ok": 1 + } + }, + "requestId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/bulkWrite.json b/test/command_monitoring/bulkWrite.json new file mode 100644 index 0000000000..49c728442e --- /dev/null +++ b/test/command_monitoring/bulkWrite.json @@ -0,0 +1,154 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful mixed bulk write", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 3 + }, + "u": { + "$set": { + "x": 333 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/command.json b/test/command_monitoring/command.json new file mode 100644 index 0000000000..c28af95fed --- /dev/null +++ b/test/command_monitoring/command.json @@ -0,0 +1,83 @@ +{ + "description": "command", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful command", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1 + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteMany.json b/test/command_monitoring/deleteMany.json new file mode 100644 index 0000000000..78ebad1f98 --- /dev/null +++ b/test/command_monitoring/deleteMany.json @@ -0,0 +1,162 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteMany", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteMany with write errors", + "operations": [ + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 0 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/deleteOne.json b/test/command_monitoring/deleteOne.json new file mode 100644 index 0000000000..2420794fe5 --- /dev/null +++ b/test/command_monitoring/deleteOne.json @@ -0,0 +1,162 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful deleteOne", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "A successful deleteOne with write errors", + "operations": [ + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$unsupported": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$unsupported": 1 + } + }, + "limit": 1 + } + ], + "ordered": true + }, + "commandName": "delete", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "delete" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/find.json b/test/command_monitoring/find.json new file mode 100644 index 0000000000..bc9668499b --- /dev/null +++ b/test/command_monitoring/find.json @@ -0,0 +1,558 @@ +{ + "description": "find", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "A successful find with no options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": 1 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with options", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "x": -1 + }, + "projection": { + "_id": 0, + "x": 1 + }, + "skip": 2, + "comment": "test", + "hint": { + "_id": 1 + }, + "max": { + "_id": 6 + }, + "maxTimeMS": 6000, + "min": { + "_id": 0 + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "x": 33 + }, + { + "x": 22 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with showRecordId and returnKey", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "showRecordId": true, + "returnKey": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "showRecordId": true, + "returnKey": true + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find with a getMore", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", + "runOnRequirements": [ + { + "minServerVersion": "3.1", + "maxServerVersion": "4.4.99", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "_id": { + "$gte": 1 + } + }, + "sort": { + "_id": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": { + "$$type": [ + "int", + "long" + ] + }, + "ns": "command-monitoring-tests.test", + "firstBatch": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "cursor": { + "id": 0, + "ns": "command-monitoring-tests.test", + "nextBatch": [ + { + "_id": 4, + "x": 44 + } + ] + } + }, + "commandName": "getMore", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + }, + { + "description": "A failed find event", + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "filter": { + "$or": true + } + }, + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "databaseName": "command-monitoring-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertMany.json b/test/command_monitoring/insertMany.json new file mode 100644 index 0000000000..a80a218c67 --- /dev/null +++ b/test/command_monitoring/insertMany.json @@ -0,0 +1,148 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertMany with write errors", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/insertOne.json b/test/command_monitoring/insertOne.json new file mode 100644 index 0000000000..6ff732e41b --- /dev/null +++ b/test/command_monitoring/insertOne.json @@ -0,0 +1,144 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "A successful insertOne with write errors", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1, + "x": 11 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/legacy/bulkWrite.json b/test/command_monitoring/legacy/bulkWrite.json deleted file mode 100644 index ca5a9a105c..0000000000 --- a/test/command_monitoring/legacy/bulkWrite.json +++ /dev/null @@ -1,110 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful mixed bulk write", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 333 - } - } - } - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4, - "x": 44 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 3 - }, - "u": { - "$set": { - "x": 333 - } - } - } - ], - "ordered": true - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/command.json b/test/command_monitoring/legacy/command.json deleted file mode 100644 index 7e1e347be0..0000000000 --- a/test/command_monitoring/legacy/command.json +++ /dev/null @@ -1,113 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful command", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - }, - { - "description": "A failed command event", - "operation": { - "name": "count", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "$or": true - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "count" - } - } - ] - }, - { - "description": "A successful command with a non-primary read preference", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": 1 - } - }, - "read_preference": { - "mode": "primaryPreferred" - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - } - }, - "command_name": "count", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "count" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteMany.json b/test/command_monitoring/legacy/deleteMany.json deleted file mode 100644 index 7cd396806c..0000000000 --- a/test/command_monitoring/legacy/deleteMany.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete many", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete many command with write errors", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 0 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/deleteOne.json b/test/command_monitoring/legacy/deleteOne.json deleted file mode 100644 index 0971dfcf2c..0000000000 --- a/test/command_monitoring/legacy/deleteOne.json +++ /dev/null @@ -1,115 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful delete one", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "delete" - } - } - ] - }, - { - "description": "A successful delete one command with write errors", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$nothing": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$nothing": 1 - } - }, - "limit": 1 - } - ], - "ordered": true - }, - "command_name": "delete", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "delete" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/find.json b/test/command_monitoring/legacy/find.json deleted file mode 100644 index e2bb95306f..0000000000 --- a/test/command_monitoring/legacy/find.json +++ /dev/null @@ -1,559 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "namespace": "command-monitoring-tests.test", - "tests": [ - { - "description": "A successful find event with no options", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": 1 - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with options", - "operation": { - "name": "find", - "read_preference": { - "mode": "primaryPreferred" - }, - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gt": 1 - } - }, - "sort": { - "_id": 1 - }, - "skip": { - "$numberLong": "2" - }, - "comment": "test", - "hint": { - "_id": 1 - }, - "max": { - "_id": 6 - }, - "maxTimeMS": 6000, - "min": { - "_id": 0 - }, - "returnKey": false, - "showRecordId": false - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "find" - } - } - ] - }, - { - "description": "A successful find event with a getmore", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "3" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A successful find event with a getmore and killcursors", - "ignore_if_server_version_greater_than": "3.0", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - }, - { - "command_started_event": { - "command": { - "killCursors": "test", - "cursors": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursorsUnknown": [ - { - "$numberLong": "42" - } - ] - }, - "command_name": "killCursors" - } - } - ] - }, - { - "description": "A successful find event with a getmore and the server kills the cursor (<= 4.4)", - "ignore_if_server_version_less_than": "3.1", - "ignore_if_server_version_greater_than": "4.4", - "ignore_if_topology_type": [ - "sharded" - ], - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "_id": { - "$gte": 1 - } - }, - "sort": { - "_id": 1 - }, - "batchSize": { - "$numberLong": "3" - }, - "limit": { - "$numberLong": "4" - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "42" - }, - "ns": "command-monitoring-tests.test", - "firstBatch": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - }, - "command_name": "find" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": { - "$numberLong": "1" - } - }, - "command_name": "getMore", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "cursor": { - "id": { - "$numberLong": "0" - }, - "ns": "command-monitoring-tests.test", - "nextBatch": [ - { - "_id": 4, - "x": 44 - } - ] - } - }, - "command_name": "getMore" - } - } - ] - }, - { - "description": "A failed find event", - "operation": { - "name": "find", - "arguments": { - "filter": { - "$or": true - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "filter": { - "$or": true - } - }, - "command_name": "find", - "database_name": "command-monitoring-tests" - } - }, - { - "command_failed_event": { - "command_name": "find" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertMany.json b/test/command_monitoring/legacy/insertMany.json deleted file mode 100644 index 0becf928e4..0000000000 --- a/test/command_monitoring/legacy/insertMany.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert many command with write errors", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful unordered insert many", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": false - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/insertOne.json b/test/command_monitoring/legacy/insertOne.json deleted file mode 100644 index 877bca1a61..0000000000 --- a/test/command_monitoring/legacy/insertOne.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful insert one", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2, - "x": 22 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "insert" - } - } - ] - }, - { - "description": "A successful insert one command with write errors", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ], - "ordered": true - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json b/test/command_monitoring/legacy/unacknowledgedBulkWrite.json deleted file mode 100644 index ae116289eb..0000000000 --- a/test/command_monitoring/legacy/unacknowledgedBulkWrite.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "collection_name": "test-unacknowledged-bulk-write", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful unordered bulk write with an unacknowledged write concern", - "comment": "On a 2.4 server, no GLE is sent and requires a client-side manufactured reply", - "operation": { - "name": "bulkWrite", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-unacknowledged-bulk-write", - "documents": [ - { - "_id": "unorderedBulkWriteInsertW0", - "x": 44 - } - ], - "ordered": false, - "writeConcern": { - "w": 0 - } - }, - "command_name": "insert", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1 - }, - "command_name": "insert" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateMany.json b/test/command_monitoring/legacy/updateMany.json deleted file mode 100644 index d82792fc4e..0000000000 --- a/test/command_monitoring/legacy/updateMany.json +++ /dev/null @@ -1,135 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update many", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 2 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update many command with write errors", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - }, - "multi": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/legacy/updateOne.json b/test/command_monitoring/legacy/updateOne.json deleted file mode 100644 index ba41dbb0c0..0000000000 --- a/test/command_monitoring/legacy/updateOne.json +++ /dev/null @@ -1,190 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "collection_name": "test", - "database_name": "command-monitoring-tests", - "tests": [ - { - "description": "A successful update one", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$inc": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1 - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one with upsert when the upserted id is not an object id", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 1, - "upserted": [ - { - "index": 0, - "_id": 4 - } - ] - }, - "command_name": "update" - } - } - ] - }, - { - "description": "A successful update one command with write errors", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$nothing": { - "x": 1 - } - } - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "ordered": true, - "updates": [ - { - "q": { - "_id": { - "$gt": 1 - } - }, - "u": { - "$nothing": { - "x": 1 - } - } - } - ] - }, - "command_name": "update", - "database_name": "command-monitoring-tests" - } - }, - { - "command_succeeded_event": { - "reply": { - "ok": 1, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 42, - "errmsg": "" - } - ] - }, - "command_name": "update" - } - } - ] - } - ] -} diff --git a/test/command_monitoring/redacted-commands.json b/test/command_monitoring/redacted-commands.json new file mode 100644 index 0000000000..4302ba8900 --- /dev/null +++ b/test/command_monitoring/redacted-commands.json @@ -0,0 +1,679 @@ +{ + "description": "redacted-commands", + "schemaVersion": "1.5", + "runOnRequirements": [ + { + "minServerVersion": "5.0", + "auth": false + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ], + "observeSensitiveCommands": true + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + } + ], + "tests": [ + { + "description": "authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "authenticate", + "command": { + "authenticate": 1, + "mechanism": "MONGODB-X509", + "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", + "db": "$external" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "authenticate", + "command": { + "authenticate": { + "$$exists": false + }, + "mechanism": { + "$$exists": false + }, + "user": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslStart", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslStart", + "command": { + "saslStart": 1, + "payload": "definitely-invalid-payload", + "db": "admin" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslStart", + "command": { + "saslStart": { + "$$exists": false + }, + "payload": { + "$$exists": false + }, + "db": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "saslContinue", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "saslContinue", + "command": { + "saslContinue": 1, + "conversationId": 0, + "payload": "definitely-invalid-payload" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "saslContinue", + "command": { + "saslContinue": { + "$$exists": false + }, + "conversationId": { + "$$exists": false + }, + "payload": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "getnonce", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "getnonce", + "command": { + "getnonce": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "getnonce", + "command": { + "getnonce": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "getnonce", + "reply": { + "ok": { + "$$exists": false + }, + "nonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "createUser", + "command": { + "createUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createUser", + "command": { + "createUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateUser", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "updateUser", + "command": { + "updateUser": "private", + "pwd": {}, + "roles": [] + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "updateUser", + "command": { + "updateUser": { + "$$exists": false + }, + "pwd": { + "$$exists": false + }, + "roles": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbgetnonce", + "runOnRequirements": [ + { + "maxServerVersion": "3.6.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbgetnonce", + "command": { + "copydbgetnonce": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydbsaslstart", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydbsaslstart", + "command": { + "copydbsaslstart": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "copydb", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "copydb", + "command": { + "copydb": "private" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "copydb", + "command": { + "copydb": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello with speculative authenticate", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello with speculative authenticate", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1, + "speculativeAuthenticate": { + "saslStart": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": false + }, + "speculativeAuthenticate": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "hello without speculative authenticate is not redacted", + "runOnRequirements": [ + { + "minServerVersion": "4.9" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "hello", + "command": { + "hello": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "hello", + "reply": { + "isWritablePrimary": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "legacy hello without speculative authenticate is not redacted", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ismaster", + "command": { + "ismaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "ismaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "isMaster", + "command": { + "isMaster": 1 + } + } + }, + { + "commandSucceededEvent": { + "commandName": "isMaster", + "reply": { + "ismaster": { + "$$exists": true + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/server-connection-id.json b/test/command_monitoring/server-connection-id.json new file mode 100644 index 0000000000..a8f27637fc --- /dev/null +++ b/test/command_monitoring/server-connection-id.json @@ -0,0 +1,101 @@ +{ + "description": "server-connection-id", + "schemaVersion": "1.6", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "server-connection-id-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "server-connection-id-tests", + "collectionName": "coll", + "documents": [] + } + ], + "tests": [ + { + "description": "command events include server connection id", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$or": true + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandSucceededEvent": { + "commandName": "insert", + "hasServerConnectionId": true + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + }, + { + "commandFailedEvent": { + "commandName": "find", + "hasServerConnectionId": true + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledged-client-bulkWrite.json b/test/command_monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 0000000000..14740cea34 --- /dev/null +++ b/test/command_monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,225 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "$$unsetOrMatches": { + "acknowledged": { + "$$unsetOrMatches": false + }, + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unacknowledgedBulkWrite.json b/test/command_monitoring/unacknowledgedBulkWrite.json new file mode 100644 index 0000000000..78ddde767f --- /dev/null +++ b/test/command_monitoring/unacknowledgedBulkWrite.json @@ -0,0 +1,117 @@ +{ + "description": "unacknowledgedBulkWrite", + "schemaVersion": "1.7", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A successful unordered bulk write with an unacknowledged write concern", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + } + } + ], + "ordered": false + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": "unorderedBulkWriteInsertW0", + "x": 44 + } + ], + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": { + "$$exists": false + } + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/unified/redacted-commands.json b/test/command_monitoring/unified/redacted-commands.json deleted file mode 100644 index 0f85dc3e94..0000000000 --- a/test/command_monitoring/unified/redacted-commands.json +++ /dev/null @@ -1,659 +0,0 @@ -{ - "description": "redacted-commands", - "schemaVersion": "1.5", - "runOnRequirements": [ - { - "minServerVersion": "5.0", - "auth": false - } - ], - "createEntities": [ - { - "client": { - "id": "client", - "observeEvents": [ - "commandStartedEvent", - "commandSucceededEvent" - ], - "observeSensitiveCommands": true - } - }, - { - "database": { - "id": "database", - "client": "client", - "databaseName": "command-monitoring-tests" - } - } - ], - "tests": [ - { - "description": "authenticate", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "authenticate", - "command": { - "authenticate": 1, - "mechanism": "MONGODB-X509", - "user": "CN=myName,OU=myOrgUnit,O=myOrg,L=myLocality,ST=myState,C=myCountry", - "db": "$external" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "authenticate", - "command": { - "authenticate": { - "$$exists": false - }, - "mechanism": { - "$$exists": false - }, - "user": { - "$$exists": false - }, - "db": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "saslStart", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "saslStart", - "command": { - "saslStart": 1, - "payload": "definitely-invalid-payload", - "db": "admin" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "saslStart", - "command": { - "saslStart": { - "$$exists": false - }, - "payload": { - "$$exists": false - }, - "db": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "saslContinue", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "saslContinue", - "command": { - "saslContinue": 1, - "conversationId": 0, - "payload": "definitely-invalid-payload" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "saslContinue", - "command": { - "saslContinue": { - "$$exists": false - }, - "conversationId": { - "$$exists": false - }, - "payload": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "getnonce", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "getnonce", - "command": { - "getnonce": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "getnonce", - "command": { - "getnonce": { - "$$exists": false - } - } - } - }, - { - "commandSucceededEvent": { - "commandName": "getnonce", - "reply": { - "ok": { - "$$exists": false - }, - "nonce": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "createUser", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "createUser", - "command": { - "createUser": "private", - "pwd": {}, - "roles": [] - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "createUser", - "command": { - "createUser": { - "$$exists": false - }, - "pwd": { - "$$exists": false - }, - "roles": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "updateUser", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "updateUser", - "command": { - "updateUser": "private", - "pwd": {}, - "roles": [] - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "updateUser", - "command": { - "updateUser": { - "$$exists": false - }, - "pwd": { - "$$exists": false - }, - "roles": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "copydbgetnonce", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "copydbgetnonce", - "command": { - "copydbgetnonce": "private" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "copydbgetnonce", - "command": { - "copydbgetnonce": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "copydbsaslstart", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "copydbsaslstart", - "command": { - "copydbsaslstart": "private" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "copydbsaslstart", - "command": { - "copydbsaslstart": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "copydb", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "copydb", - "command": { - "copydb": "private" - } - }, - "expectError": { - "isError": true - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "copydb", - "command": { - "copydb": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "hello with speculative authenticate", - "runOnRequirements": [ - { - "minServerVersion": "4.9" - } - ], - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "hello", - "command": { - "hello": 1, - "speculativeAuthenticate": { - "saslStart": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "hello", - "command": { - "hello": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - }, - { - "commandSucceededEvent": { - "commandName": "hello", - "reply": { - "isWritablePrimary": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "legacy hello with speculative authenticate", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "ismaster", - "command": { - "ismaster": 1, - "speculativeAuthenticate": { - "saslStart": 1 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "isMaster", - "command": { - "isMaster": 1, - "speculativeAuthenticate": { - "saslStart": 1 - } - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ismaster", - "command": { - "ismaster": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - }, - { - "commandSucceededEvent": { - "commandName": "ismaster", - "reply": { - "ismaster": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - }, - { - "commandStartedEvent": { - "commandName": "isMaster", - "command": { - "isMaster": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - }, - { - "commandSucceededEvent": { - "commandName": "isMaster", - "reply": { - "ismaster": { - "$$exists": false - }, - "speculativeAuthenticate": { - "$$exists": false - } - } - } - } - ] - } - ] - }, - { - "description": "hello without speculative authenticate is not redacted", - "runOnRequirements": [ - { - "minServerVersion": "4.9" - } - ], - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "hello", - "command": { - "hello": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "hello", - "command": { - "hello": 1 - } - } - }, - { - "commandSucceededEvent": { - "commandName": "hello", - "reply": { - "isWritablePrimary": { - "$$exists": true - } - } - } - } - ] - } - ] - }, - { - "description": "legacy hello without speculative authenticate is not redacted", - "operations": [ - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "ismaster", - "command": { - "ismaster": 1 - } - } - }, - { - "name": "runCommand", - "object": "database", - "arguments": { - "commandName": "isMaster", - "command": { - "isMaster": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client", - "events": [ - { - "commandStartedEvent": { - "commandName": "ismaster", - "command": { - "ismaster": 1 - } - } - }, - { - "commandSucceededEvent": { - "commandName": "ismaster", - "reply": { - "ismaster": { - "$$exists": true - } - } - } - }, - { - "commandStartedEvent": { - "commandName": "isMaster", - "command": { - "isMaster": 1 - } - } - }, - { - "commandSucceededEvent": { - "commandName": "isMaster", - "reply": { - "ismaster": { - "$$exists": true - } - } - } - } - ] - } - ] - } - ] -} diff --git a/test/command_monitoring/updateMany.json b/test/command_monitoring/updateMany.json new file mode 100644 index 0000000000..b15434226c --- /dev/null +++ b/test/command_monitoring/updateMany.json @@ -0,0 +1,188 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 2 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateMany with write errors", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": true + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/updateOne.json b/test/command_monitoring/updateOne.json new file mode 100644 index 0000000000..a0ae99e88d --- /dev/null +++ b/test/command_monitoring/updateOne.json @@ -0,0 +1,260 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "A successful updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with upsert where the upserted id is not an ObjectId", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "upserted": [ + { + "index": 0, + "_id": 4 + } + ] + }, + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "A successful updateOne with write errors", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$unsupported": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$unsupported": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true + }, + "commandName": "update", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 0, + "writeErrors": { + "$$type": "array" + } + }, + "commandName": "update" + } + } + ] + } + ] + } + ] +} diff --git a/test/command_monitoring/writeConcernError.json b/test/command_monitoring/writeConcernError.json new file mode 100644 index 0000000000..455e5422b7 --- /dev/null +++ b/test/command_monitoring/writeConcernError.json @@ -0,0 +1,155 @@ +{ + "description": "writeConcernError", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "A retryable write with write concern errors publishes success event", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91 + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1, + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91 + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2, + "x": 22 + } + ], + "ordered": true + }, + "commandName": "insert", + "databaseName": "command-monitoring-tests" + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "insert" + } + } + ] + } + ] + } + ] +} diff --git a/test/conftest.py b/test/conftest.py new file mode 100644 index 0000000000..91fad28d0a --- /dev/null +++ b/test/conftest.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import asyncio +import sys +from test import pytest_conf, setup, teardown + +import pytest + +_IS_SYNC = True + + +@pytest.fixture(scope="session") +def event_loop_policy(): + # The default asyncio loop implementation on Windows + # has issues with sharing sockets across loops (https://github.com/python/cpython/issues/122240) + # We explicitly use a different loop implementation here to prevent that issue + if sys.platform == "win32": + return asyncio.WindowsSelectorEventLoopPolicy() # type: ignore[attr-defined] + + return asyncio.get_event_loop_policy() + + +@pytest.fixture(scope="package", autouse=True) +def test_setup_and_teardown(): + setup() + yield + teardown() + + +pytest_collection_modifyitems = pytest_conf.pytest_collection_modifyitems diff --git a/test/connection_logging/connection-logging.json b/test/connection_logging/connection-logging.json new file mode 100644 index 0000000000..5799e834d7 --- /dev/null +++ b/test/connection_logging/connection-logging.json @@ -0,0 +1,523 @@ +{ + "description": "connection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "Create a client, run a command, and close the client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked out", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checked in", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "Connection pool was closed" + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool closed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Connection checkout fails due to error establishing connection", + "runOnRequirements": [ + { + "auth": true, + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryReads": false, + "appname": "clientAppName", + "heartbeatFrequencyMS": 10000 + }, + "observeLogMessages": { + "connection": "debug" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "clientAppName" + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout started", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection closed", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while using the connection", + "error": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection checkout failed", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "reason": "An error occurred while trying to establish a new connection", + "error": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool cleared", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/connection_logging/connection-pool-options.json b/test/connection_logging/connection-pool-options.json new file mode 100644 index 0000000000..7055a54869 --- /dev/null +++ b/test/connection_logging/connection-pool-options.json @@ -0,0 +1,458 @@ +{ + "description": "connection-pool-options", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "tests": [ + { + "description": "Options should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "connectionReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "minPoolSize": 1, + "maxPoolSize": 5, + "maxIdleTimeMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection created", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection ready", + "driverConnectionId": { + "$$type": [ + "int", + "long" + ] + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "maxConnecting should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "maxConnecting": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "maxConnecting": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueTimeoutMS": 10000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueTimeoutMS": 10000 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueSize should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 100 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueSize": 100 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "waitQueueMultiple should be included in connection pool created message when specified", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolReadyEvent" + ], + "observeLogMessages": { + "connection": "debug" + }, + "uriOptions": { + "waitQueueSize": 5 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool created", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "waitQueueMultiple": 5 + } + }, + { + "level": "debug", + "component": "connection", + "data": { + "message": "Connection pool ready", + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/cmap/connection-must-have-id.json b/test/connection_monitoring/connection-must-have-id.json similarity index 100% rename from test/cmap/connection-must-have-id.json rename to test/connection_monitoring/connection-must-have-id.json diff --git a/test/cmap/connection-must-order-ids.json b/test/connection_monitoring/connection-must-order-ids.json similarity index 100% rename from test/cmap/connection-must-order-ids.json rename to test/connection_monitoring/connection-must-order-ids.json diff --git a/test/cmap/pool-checkin-destroy-closed.json b/test/connection_monitoring/pool-checkin-destroy-closed.json similarity index 100% rename from test/cmap/pool-checkin-destroy-closed.json rename to test/connection_monitoring/pool-checkin-destroy-closed.json diff --git a/test/cmap/pool-checkin-destroy-stale.json b/test/connection_monitoring/pool-checkin-destroy-stale.json similarity index 100% rename from test/cmap/pool-checkin-destroy-stale.json rename to test/connection_monitoring/pool-checkin-destroy-stale.json diff --git a/test/cmap/pool-checkin-make-available.json b/test/connection_monitoring/pool-checkin-make-available.json similarity index 89% rename from test/cmap/pool-checkin-make-available.json rename to test/connection_monitoring/pool-checkin-make-available.json index 41c522ae67..3f37f188c0 100644 --- a/test/cmap/pool-checkin-make-available.json +++ b/test/connection_monitoring/pool-checkin-make-available.json @@ -22,7 +22,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", @@ -32,7 +33,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkin.json b/test/connection_monitoring/pool-checkin.json similarity index 100% rename from test/cmap/pool-checkin.json rename to test/connection_monitoring/pool-checkin.json diff --git a/test/cmap/pool-checkout-connection.json b/test/connection_monitoring/pool-checkout-connection.json similarity index 87% rename from test/cmap/pool-checkout-connection.json rename to test/connection_monitoring/pool-checkout-connection.json index d89b342605..c7e8914d45 100644 --- a/test/cmap/pool-checkout-connection.json +++ b/test/connection_monitoring/pool-checkout-connection.json @@ -23,12 +23,14 @@ { "type": "ConnectionReady", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedOut", "connectionId": 1, - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json similarity index 100% rename from test/cmap/pool-checkout-custom-maxConnecting-is-enforced.json rename to test/connection_monitoring/pool-checkout-custom-maxConnecting-is-enforced.json diff --git a/test/cmap/pool-checkout-error-closed.json b/test/connection_monitoring/pool-checkout-error-closed.json similarity index 94% rename from test/cmap/pool-checkout-error-closed.json rename to test/connection_monitoring/pool-checkout-error-closed.json index ee2926e1c0..614403ef50 100644 --- a/test/cmap/pool-checkout-error-closed.json +++ b/test/connection_monitoring/pool-checkout-error-closed.json @@ -38,7 +38,8 @@ { "type": "ConnectionCheckedOut", "address": 42, - "connectionId": 42 + "connectionId": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", @@ -56,6 +57,7 @@ { "type": "ConnectionCheckOutFailed", "address": 42, + "duration": 42, "reason": "poolClosed" } ], diff --git a/test/cmap/pool-checkout-maxConnecting-is-enforced.json b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json similarity index 97% rename from test/cmap/pool-checkout-maxConnecting-is-enforced.json rename to test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json index 732478bf7e..3a63818bfe 100644 --- a/test/cmap/pool-checkout-maxConnecting-is-enforced.json +++ b/test/connection_monitoring/pool-checkout-maxConnecting-is-enforced.json @@ -19,7 +19,7 @@ ], "closeConnection": false, "blockConnection": true, - "blockTimeMS": 750 + "blockTimeMS": 800 } }, "poolOptions": { @@ -53,7 +53,7 @@ }, { "name": "wait", - "ms": 100 + "ms": 400 }, { "name": "checkOut", diff --git a/test/cmap/pool-checkout-maxConnecting-timeout.json b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json similarity index 97% rename from test/cmap/pool-checkout-maxConnecting-timeout.json rename to test/connection_monitoring/pool-checkout-maxConnecting-timeout.json index 84ddf8fdba..4d9fda1a68 100644 --- a/test/cmap/pool-checkout-maxConnecting-timeout.json +++ b/test/connection_monitoring/pool-checkout-maxConnecting-timeout.json @@ -89,7 +89,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "timeout", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json similarity index 100% rename from test/cmap/pool-checkout-minPoolSize-connection-maxConnecting.json rename to test/connection_monitoring/pool-checkout-minPoolSize-connection-maxConnecting.json diff --git a/test/cmap/pool-checkout-multiple.json b/test/connection_monitoring/pool-checkout-multiple.json similarity index 100% rename from test/cmap/pool-checkout-multiple.json rename to test/connection_monitoring/pool-checkout-multiple.json diff --git a/test/cmap/pool-checkout-no-idle.json b/test/connection_monitoring/pool-checkout-no-idle.json similarity index 100% rename from test/cmap/pool-checkout-no-idle.json rename to test/connection_monitoring/pool-checkout-no-idle.json diff --git a/test/cmap/pool-checkout-no-stale.json b/test/connection_monitoring/pool-checkout-no-stale.json similarity index 100% rename from test/cmap/pool-checkout-no-stale.json rename to test/connection_monitoring/pool-checkout-no-stale.json diff --git a/test/cmap/pool-checkout-returned-connection-maxConnecting.json b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json similarity index 89% rename from test/cmap/pool-checkout-returned-connection-maxConnecting.json rename to test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json index 965d56f6d8..10b526e0c3 100644 --- a/test/cmap/pool-checkout-returned-connection-maxConnecting.json +++ b/test/connection_monitoring/pool-checkout-returned-connection-maxConnecting.json @@ -23,6 +23,7 @@ } }, "poolOptions": { + "maxConnecting": 2, "maxPoolSize": 10, "waitQueueTimeoutMS": 5000 }, @@ -72,9 +73,8 @@ "connection": "conn0" }, { - "name": "waitForEvent", - "event": "ConnectionCheckedOut", - "count": 4 + "name": "wait", + "ms": 100 } ], "events": [ @@ -104,14 +104,6 @@ "type": "ConnectionCheckedOut", "connectionId": 1, "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "address": 42 - }, - { - "type": "ConnectionCheckedOut", - "address": 42 } ], "ignore": [ diff --git a/test/cmap/pool-clear-clears-waitqueue.json b/test/connection_monitoring/pool-clear-clears-waitqueue.json similarity index 91% rename from test/cmap/pool-clear-clears-waitqueue.json rename to test/connection_monitoring/pool-clear-clears-waitqueue.json index d4aef928c7..e6077f12a5 100644 --- a/test/cmap/pool-clear-clears-waitqueue.json +++ b/test/connection_monitoring/pool-clear-clears-waitqueue.json @@ -59,7 +59,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutStarted", @@ -76,17 +77,20 @@ { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-clear-interrupting-pending-connections.json b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json new file mode 100644 index 0000000000..c1fd746329 --- /dev/null +++ b/test/connection_monitoring/pool-clear-interrupting-pending-connections.json @@ -0,0 +1,77 @@ +{ + "version": 1, + "style": "integration", + "description": "clear with interruptInUseConnections = true closes pending connections", + "runOn": [ + { + "minServerVersion": "4.9.0" + } + ], + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "isMaster", + "hello" + ], + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 10000 + } + }, + "poolOptions": { + "minPoolSize": 0 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "start", + "target": "thread1" + }, + { + "name": "checkOut", + "thread": "thread1" + }, + { + "name": "waitForEvent", + "event": "ConnectionCreated", + "count": 1 + }, + { + "name": "clear", + "interruptInUseConnections": true + }, + { + "name": "waitForEvent", + "event": "ConnectionCheckOutFailed", + "count": 1 + } + ], + "events": [ + { + "type": "ConnectionCheckOutStarted" + }, + { + "type": "ConnectionCreated" + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": true + }, + { + "type": "ConnectionClosed" + }, + { + "type": "ConnectionCheckOutFailed" + } + ], + "ignore": [ + "ConnectionCheckedIn", + "ConnectionCheckedOut", + "ConnectionPoolCreated", + "ConnectionPoolReady" + ] +} diff --git a/test/cmap/pool-clear-min-size.json b/test/connection_monitoring/pool-clear-min-size.json similarity index 100% rename from test/cmap/pool-clear-min-size.json rename to test/connection_monitoring/pool-clear-min-size.json diff --git a/test/cmap/pool-clear-paused.json b/test/connection_monitoring/pool-clear-paused.json similarity index 100% rename from test/cmap/pool-clear-paused.json rename to test/connection_monitoring/pool-clear-paused.json diff --git a/test/cmap/pool-clear-ready.json b/test/connection_monitoring/pool-clear-ready.json similarity index 91% rename from test/cmap/pool-clear-ready.json rename to test/connection_monitoring/pool-clear-ready.json index 800c3545ad..88c2988ac5 100644 --- a/test/cmap/pool-clear-ready.json +++ b/test/connection_monitoring/pool-clear-ready.json @@ -40,7 +40,8 @@ { "type": "ConnectionCheckedOut", "address": 42, - "connectionId": 42 + "connectionId": 42, + "duration": 42 }, { "type": "ConnectionPoolCleared", @@ -49,6 +50,7 @@ { "type": "ConnectionCheckOutFailed", "address": 42, + "duration": 42, "reason": "connectionError" }, { @@ -57,7 +59,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json new file mode 100644 index 0000000000..3d7536951d --- /dev/null +++ b/test/connection_monitoring/pool-clear-schedule-run-interruptInUseConnections-false.json @@ -0,0 +1,81 @@ +{ + "version": 1, + "style": "unit", + "description": "Pool clear SHOULD schedule the next background thread run immediately (interruptInUseConnections = false)", + "poolOptions": { + "backgroundThreadIntervalMS": 10000 + }, + "operations": [ + { + "name": "ready" + }, + { + "name": "checkOut" + }, + { + "name": "checkOut", + "label": "conn" + }, + { + "name": "checkIn", + "connection": "conn" + }, + { + "name": "clear", + "interruptInUseConnections": false + }, + { + "name": "waitForEvent", + "event": "ConnectionPoolCleared", + "count": 1, + "timeout": 1000 + }, + { + "name": "waitForEvent", + "event": "ConnectionClosed", + "count": 1, + "timeout": 1000 + }, + { + "name": "close" + } + ], + "events": [ + { + "type": "ConnectionCheckedOut", + "connectionId": 1, + "address": 42 + }, + { + "type": "ConnectionCheckedOut", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionCheckedIn", + "connectionId": 2, + "address": 42 + }, + { + "type": "ConnectionPoolCleared", + "interruptInUseConnections": false + }, + { + "type": "ConnectionClosed", + "connectionId": 2, + "reason": "stale", + "address": 42 + }, + { + "type": "ConnectionPoolClosed", + "address": 42 + } + ], + "ignore": [ + "ConnectionCreated", + "ConnectionPoolReady", + "ConnectionReady", + "ConnectionCheckOutStarted", + "ConnectionPoolCreated" + ] +} diff --git a/test/cmap/pool-close-destroy-conns.json b/test/connection_monitoring/pool-close-destroy-conns.json similarity index 100% rename from test/cmap/pool-close-destroy-conns.json rename to test/connection_monitoring/pool-close-destroy-conns.json diff --git a/test/cmap/pool-close.json b/test/connection_monitoring/pool-close.json similarity index 100% rename from test/cmap/pool-close.json rename to test/connection_monitoring/pool-close.json diff --git a/test/cmap/pool-create-max-size.json b/test/connection_monitoring/pool-create-max-size.json similarity index 100% rename from test/cmap/pool-create-max-size.json rename to test/connection_monitoring/pool-create-max-size.json diff --git a/test/cmap/pool-create-min-size-error.json b/test/connection_monitoring/pool-create-min-size-error.json similarity index 100% rename from test/cmap/pool-create-min-size-error.json rename to test/connection_monitoring/pool-create-min-size-error.json diff --git a/test/cmap/pool-create-min-size.json b/test/connection_monitoring/pool-create-min-size.json similarity index 100% rename from test/cmap/pool-create-min-size.json rename to test/connection_monitoring/pool-create-min-size.json diff --git a/test/cmap/pool-create-with-options.json b/test/connection_monitoring/pool-create-with-options.json similarity index 100% rename from test/cmap/pool-create-with-options.json rename to test/connection_monitoring/pool-create-with-options.json diff --git a/test/cmap/pool-create.json b/test/connection_monitoring/pool-create.json similarity index 100% rename from test/cmap/pool-create.json rename to test/connection_monitoring/pool-create.json diff --git a/test/cmap/pool-ready-ready.json b/test/connection_monitoring/pool-ready-ready.json similarity index 100% rename from test/cmap/pool-ready-ready.json rename to test/connection_monitoring/pool-ready-ready.json diff --git a/test/cmap/pool-ready.json b/test/connection_monitoring/pool-ready.json similarity index 91% rename from test/cmap/pool-ready.json rename to test/connection_monitoring/pool-ready.json index 29ce7326cf..a90aed04d0 100644 --- a/test/cmap/pool-ready.json +++ b/test/connection_monitoring/pool-ready.json @@ -31,7 +31,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "connectionError", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionPoolReady", @@ -47,7 +48,8 @@ }, { "type": "ConnectionCheckedOut", - "address": 42 + "address": 42, + "duration": 42 } ], "ignore": [ diff --git a/test/cmap/wait-queue-timeout.json b/test/connection_monitoring/wait-queue-timeout.json similarity index 94% rename from test/cmap/wait-queue-timeout.json rename to test/connection_monitoring/wait-queue-timeout.json index fbcbdfb04d..8bd7c49499 100644 --- a/test/cmap/wait-queue-timeout.json +++ b/test/connection_monitoring/wait-queue-timeout.json @@ -48,7 +48,8 @@ { "type": "ConnectionCheckedOut", "connectionId": 42, - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckOutStarted", @@ -57,7 +58,8 @@ { "type": "ConnectionCheckOutFailed", "reason": "timeout", - "address": 42 + "address": 42, + "duration": 42 }, { "type": "ConnectionCheckedIn", diff --git a/test/connection_string/test/invalid-uris.json b/test/connection_string/test/invalid-uris.json index e04da2b236..a7accbd27d 100644 --- a/test/connection_string/test/invalid-uris.json +++ b/test/connection_string/test/invalid-uris.json @@ -162,15 +162,6 @@ "auth": null, "options": null }, - { - "description": "Missing delimiting slash between hosts and options", - "uri": "mongodb://example.com?w=1", - "valid": false, - "warning": null, - "hosts": null, - "auth": null, - "options": null - }, { "description": "Incomplete key value pair for option", "uri": "mongodb://example.com/?w", diff --git a/test/connection_string/test/valid-auth.json b/test/connection_string/test/valid-auth.json index 4f684ff185..60f63f4e3f 100644 --- a/test/connection_string/test/valid-auth.json +++ b/test/connection_string/test/valid-auth.json @@ -220,29 +220,8 @@ "options": null }, { - "description": "Escaped user info and database (MONGODB-CR)", - "uri": "mongodb://%24am:f%3Azzb%40z%2Fz%3D@127.0.0.1/admin%3F?authMechanism=MONGODB-CR", - "valid": true, - "warning": false, - "hosts": [ - { - "type": "ipv4", - "host": "127.0.0.1", - "port": null - } - ], - "auth": { - "username": "$am", - "password": "f:zzb@z/z=", - "db": "admin?" - }, - "options": { - "authmechanism": "MONGODB-CR" - } - }, - { - "description": "Subdelimiters in user/pass don't need escaping (MONGODB-CR)", - "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=MONGODB-CR", + "description": "Subdelimiters in user/pass don't need escaping (PLAIN)", + "uri": "mongodb://!$&'()*+,;=:!$&'()*+,;=@127.0.0.1/admin?authMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -258,7 +237,7 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" } }, { @@ -284,7 +263,7 @@ }, { "description": "Escaped username (GSSAPI)", - "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authMechanism=GSSAPI", + "uri": "mongodb://user%40EXAMPLE.COM:secret@localhost/?authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authMechanism=GSSAPI", "valid": true, "warning": false, "hosts": [ @@ -303,7 +282,8 @@ "authmechanism": "GSSAPI", "authmechanismproperties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" } } }, diff --git a/test/connection_string/test/valid-options.json b/test/connection_string/test/valid-options.json index 4c2bded9e7..fce53873a6 100644 --- a/test/connection_string/test/valid-options.json +++ b/test/connection_string/test/valid-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Option names are normalized to lowercase", - "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=MONGODB-CR", + "uri": "mongodb://alice:secret@example.com/admin?AUTHMechanism=PLAIN", "valid": true, "warning": false, "hosts": [ @@ -18,7 +18,44 @@ "db": "admin" }, "options": { - "authmechanism": "MONGODB-CR" + "authmechanism": "PLAIN" + } + }, + { + "description": "Missing delimiting slash between hosts and options", + "uri": "mongodb://example.com?tls=true", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "tls": true + } + }, + { + "description": "Colon in a key value pair", + "uri": "mongodb://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://test-cluster,ENVIRONMENT:azure", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "hostname", + "host": "example.com", + "port": null + } + ], + "auth": null, + "options": { + "authmechanismProperties": { + "TOKEN_RESOURCE": "mongodb://test-cluster", + "ENVIRONMENT": "azure" + } } } ] diff --git a/test/connection_string/test/valid-unix_socket-absolute.json b/test/connection_string/test/valid-unix_socket-absolute.json index 5bb02476eb..66491db13b 100644 --- a/test/connection_string/test/valid-unix_socket-absolute.json +++ b/test/connection_string/test/valid-unix_socket-absolute.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://%2Ftmp%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "/tmp/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (absolute path with spaces in path)", "uri": "mongodb://%2Ftmp%2F %2Fmongodb-27017.sock", diff --git a/test/connection_string/test/valid-unix_socket-relative.json b/test/connection_string/test/valid-unix_socket-relative.json index 2ce649ffc2..788720920b 100644 --- a/test/connection_string/test/valid-unix_socket-relative.json +++ b/test/connection_string/test/valid-unix_socket-relative.json @@ -30,6 +30,21 @@ "auth": null, "options": null }, + { + "description": "Unix domain socket (mixed case)", + "uri": "mongodb://rel%2FMongoDB-27017.sock", + "valid": true, + "warning": false, + "hosts": [ + { + "type": "unix", + "host": "rel/MongoDB-27017.sock", + "port": null + } + ], + "auth": null, + "options": null + }, { "description": "Unix domain socket (relative path with spaces)", "uri": "mongodb://rel%2F %2Fmongodb-27017.sock", diff --git a/test/connection_string/test/valid-warnings.json b/test/connection_string/test/valid-warnings.json index 1eacbf8fcb..e11757eb0e 100644 --- a/test/connection_string/test/valid-warnings.json +++ b/test/connection_string/test/valid-warnings.json @@ -93,6 +93,23 @@ ], "auth": null, "options": null + }, + { + "description": "Comma in a key value pair causes a warning", + "uri": "mongodb://localhost?authMechanism=MONGODB-OIDC&authMechanismProperties=TOKEN_RESOURCE:mongodb://host1%2Chost2,ENVIRONMENT:azure", + "valid": true, + "warning": true, + "hosts": [ + { + "type": "hostname", + "host": "localhost", + "port": null + } + ], + "auth": null, + "options": { + "authMechanism": "MONGODB-OIDC" + } } ] } diff --git a/test/crud/unified/aggregate-collation.json b/test/crud/unified/aggregate-collation.json new file mode 100644 index 0000000000..e7f0c3a7f1 --- /dev/null +++ b/test/crud/unified/aggregate-collation.json @@ -0,0 +1,73 @@ +{ + "description": "aggregate-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with collation", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "x": "PING" + } + } + ], + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-merge-errorResponse.json b/test/crud/unified/aggregate-merge-errorResponse.json new file mode 100644 index 0000000000..6c7305fd91 --- /dev/null +++ b/test/crud/unified/aggregate-merge-errorResponse.json @@ -0,0 +1,90 @@ +{ + "description": "aggregate-merge-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 1 + } + ] + } + ], + "tests": [ + { + "description": "aggregate $merge DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.1", + "topologies": [ + "single", + "replicaset" + ] + } + ], + "operations": [ + { + "name": "aggregate", + "object": "database0", + "arguments": { + "pipeline": [ + { + "$documents": [ + { + "_id": 2, + "x": 1 + } + ] + }, + { + "$merge": { + "into": "test", + "whenMatched": "fail" + } + } + ] + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "_id": 1 + }, + "keyValue": { + "_id": 2 + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-out.json b/test/crud/unified/aggregate-out.json new file mode 100644 index 0000000000..db0d7918cf --- /dev/null +++ b/test/crud/unified/aggregate-out.json @@ -0,0 +1,143 @@ +{ + "description": "aggregate-out", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "2.6", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "batchSize": 2 + } + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "Aggregate with $out and batch size of 0", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "batchSize": 0 + } + } + ], + "outcome": [ + { + "collectionName": "other_test_collection", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/aggregate-write-readPreference.json b/test/crud/unified/aggregate-write-readPreference.json index bc887e83cb..c1fa3b4574 100644 --- a/test/crud/unified/aggregate-write-readPreference.json +++ b/test/crud/unified/aggregate-write-readPreference.json @@ -78,11 +78,6 @@ "x": 33 } ] - }, - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [] } ], "tests": [ @@ -159,22 +154,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -250,22 +229,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -344,22 +307,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] }, { @@ -438,22 +385,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll1", - "databaseName": "db0", - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } ] } ] diff --git a/test/crud/unified/aggregate.json b/test/crud/unified/aggregate.json index 0cbfb4e6e9..55634f05f6 100644 --- a/test/crud/unified/aggregate.json +++ b/test/crud/unified/aggregate.json @@ -562,6 +562,54 @@ ] } ] + }, + { + "description": "Aggregate with multiple stages", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + } + ], + "batchSize": 2 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ] } ] } diff --git a/test/crud/unified/bulkWrite-arrayFilters.json b/test/crud/unified/bulkWrite-arrayFilters.json index 70ee014f7a..bc4e7b9fcb 100644 --- a/test/crud/unified/bulkWrite-arrayFilters.json +++ b/test/crud/unified/bulkWrite-arrayFilters.json @@ -274,6 +274,91 @@ ] } ] + }, + { + "description": "BulkWrite with arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + } + }, + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] } ] } diff --git a/test/crud/unified/bulkWrite-collation.json b/test/crud/unified/bulkWrite-collation.json new file mode 100644 index 0000000000..fe54b1a1e3 --- /dev/null +++ b/test/crud/unified/bulkWrite-collation.json @@ -0,0 +1,254 @@ +{ + "description": "bulkWrite-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + }, + { + "_id": 4, + "x": "pong" + }, + { + "_id": 5, + "x": "pONg" + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with delete operations and collation", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "deleteOne": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "deleteMany": { + "filter": { + "x": "PONG" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 4, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "BulkWrite with update operations and collation", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 3 + } + } + }, + { + "updateOne": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + }, + { + "replaceOne": { + "filter": { + "x": "ping" + }, + "replacement": { + "_id": 6, + "x": "ping" + }, + "upsert": true, + "collation": { + "locale": "en_US", + "strength": 3 + } + } + }, + { + "updateMany": { + "filter": { + "x": "pong" + }, + "update": { + "$set": { + "x": "PONG" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 6, + "modifiedCount": 4, + "upsertedCount": 1, + "upsertedIds": { + "2": 6 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "PONG" + }, + { + "_id": 3, + "x": "PONG" + }, + { + "_id": 4, + "x": "PONG" + }, + { + "_id": 5, + "x": "PONG" + }, + { + "_id": 6, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-errorResponse.json b/test/crud/unified/bulkWrite-errorResponse.json new file mode 100644 index 0000000000..157637c713 --- /dev/null +++ b/test/crud/unified/bulkWrite-errorResponse.json @@ -0,0 +1,88 @@ +{ + "description": "bulkWrite-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "bulkWrite operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-replaceOne-sort.json b/test/crud/unified/bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..c0bd383514 --- /dev/null +++ b/test/crud/unified/bulkWrite-replaceOne-sort.json @@ -0,0 +1,239 @@ +{ + "description": "BulkWrite replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite replaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateMany-pipeline.json b/test/crud/unified/bulkWrite-updateMany-pipeline.json new file mode 100644 index 0000000000..e938ea7535 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateMany-pipeline.json @@ -0,0 +1,148 @@ +{ + "description": "bulkWrite-updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-pipeline.json b/test/crud/unified/bulkWrite-updateOne-pipeline.json new file mode 100644 index 0000000000..769bd106f8 --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-pipeline.json @@ -0,0 +1,156 @@ +{ + "description": "bulkWrite-updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne in bulk write using pipelines", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite-updateOne-sort.json b/test/crud/unified/bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..f78bd3bf3e --- /dev/null +++ b/test/crud/unified/bulkWrite-updateOne-sort.json @@ -0,0 +1,255 @@ +{ + "description": "BulkWrite updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite updateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "BulkWrite updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": [ + { + "$set": { + "x": 1 + } + } + ] + } + } + ] + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": [ + { + "$set": { + "x": 1 + } + } + ], + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bulkWrite.json b/test/crud/unified/bulkWrite.json new file mode 100644 index 0000000000..59b33cbac5 --- /dev/null +++ b/test/crud/unified/bulkWrite.json @@ -0,0 +1,829 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite with deleteOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 2 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "BulkWrite with deleteMany operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "x": { + "$lt": 11 + } + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$lte": 22 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [] + } + ] + }, + { + "description": "BulkWrite with insertOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite with replaceOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 33 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 12 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "x": 33 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "2": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with updateOne operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 0 + }, + "update": { + "$set": { + "x": 0 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 11 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 33 + } + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 2, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "3": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with updateMany operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": { + "$lt": 11 + } + }, + "update": { + "$set": { + "x": 0 + } + } + } + }, + { + "updateMany": { + "filter": { + "x": { + "$lte": 22 + } + }, + "update": { + "$unset": { + "y": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "x": { + "$lte": 22 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 33 + } + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 4, + "modifiedCount": 2, + "upsertedCount": 1, + "upsertedIds": { + "3": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite with mixed ordered operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "deleteMany": { + "filter": { + "x": { + "$nin": [ + 24, + 34 + ] + } + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "upsert": true + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 2, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "3": 4 + } + }, + "matchedCount": 3, + "modifiedCount": 3, + "upsertedCount": 1, + "upsertedIds": { + "5": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 34 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite with mixed unordered operations", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "_id": 3, + "x": 33 + }, + "upsert": true + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 1, + "upsertedIds": { + "0": 3 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite continue-on-error behavior with unordered (preexisting duplicate key)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "BulkWrite continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/bypassDocumentValidation.json b/test/crud/unified/bypassDocumentValidation.json new file mode 100644 index 0000000000..aff2d37f81 --- /dev/null +++ b/test/crud/unified/bypassDocumentValidation.json @@ -0,0 +1,493 @@ +{ + "description": "bypassDocumentValidation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + }, + "commandName": "aggregate", + "databaseName": "crud" + } + } + ] + } + ] + }, + { + "description": "BulkWrite passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4, + "x": 44 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 32 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-delete-options.json b/test/crud/unified/client-bulkWrite-delete-options.json new file mode 100644 index 0000000000..d9987897dc --- /dev/null +++ b/test/crud/unified/client-bulkWrite-delete-options.json @@ -0,0 +1,268 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errorResponse.json b/test/crud/unified/client-bulkWrite-errorResponse.json new file mode 100644 index 0000000000..b828aad3b9 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errorResponse.json @@ -0,0 +1,69 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-errors.json b/test/crud/unified/client-bulkWrite-errors.json new file mode 100644 index 0000000000..015bd95c99 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-errors.json @@ -0,0 +1,513 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Requesting unacknowledged write with verboseResults is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true, + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and verbose results" + } + } + ] + }, + { + "description": "Requesting unacknowledged write with ordered is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and ordered writes" + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-mixed-namespaces.json b/test/crud/unified/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 0000000000..55f0618923 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,315 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-options.json b/test/crud/unified/client-bulkWrite-options.json new file mode 100644 index 0000000000..708fe4e85b --- /dev/null +++ b/test/crud/unified/client-bulkWrite-options.json @@ -0,0 +1,716 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-ordered.json b/test/crud/unified/client-bulkWrite-ordered.json new file mode 100644 index 0000000000..6fb10d992f --- /dev/null +++ b/test/crud/unified/client-bulkWrite-ordered.json @@ -0,0 +1,291 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-partialResults.json b/test/crud/unified/client-bulkWrite-partialResults.json new file mode 100644 index 0000000000..1b75e37834 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-partialResults.json @@ -0,0 +1,540 @@ +{ + "description": "client bulkWrite partial results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "newDocument": { + "_id": 2, + "x": 22 + } + }, + "tests": [ + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is unset when all operations fail during an unordered bulk write", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "1": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-replaceOne-sort.json b/test/crud/unified/client-bulkWrite-replaceOne-sort.json new file mode 100644 index 0000000000..fc66ec015d --- /dev/null +++ b/test/crud/unified/client-bulkWrite-replaceOne-sort.json @@ -0,0 +1,163 @@ +{ + "description": "client bulkWrite replaceOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-results.json b/test/crud/unified/client-bulkWrite-results.json new file mode 100644 index 0000000000..accf5a9cbf --- /dev/null +++ b/test/crud/unified/client-bulkWrite-results.json @@ -0,0 +1,833 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-options.json b/test/crud/unified/client-bulkWrite-update-options.json new file mode 100644 index 0000000000..ce6241c681 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-options.json @@ -0,0 +1,949 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-pipeline.json b/test/crud/unified/client-bulkWrite-update-pipeline.json new file mode 100644 index 0000000000..9dba5ee6c5 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-pipeline.json @@ -0,0 +1,258 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-update-validation.json b/test/crud/unified/client-bulkWrite-update-validation.json new file mode 100644 index 0000000000..617e711338 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/client-bulkWrite-updateOne-sort.json b/test/crud/unified/client-bulkWrite-updateOne-sort.json new file mode 100644 index 0000000000..ef75dcb374 --- /dev/null +++ b/test/crud/unified/client-bulkWrite-updateOne-sort.json @@ -0,0 +1,167 @@ +{ + "description": "client bulkWrite updateOne-sort", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with sort option", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "ops": [ + { + "update": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "nErrors": 0, + "nMatched": 1, + "nModified": 1 + }, + "commandName": "bulkWrite" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/count-collation.json b/test/crud/unified/count-collation.json new file mode 100644 index 0000000000..eef65e0880 --- /dev/null +++ b/test/crud/unified/count-collation.json @@ -0,0 +1,83 @@ +{ + "description": "count-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Count documents with collation", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "x": "ping" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "Deprecated count with collation", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": { + "x": "ping" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/crud/unified/count-empty.json b/test/crud/unified/count-empty.json new file mode 100644 index 0000000000..29d8d76f67 --- /dev/null +++ b/test/crud/unified/count-empty.json @@ -0,0 +1,71 @@ +{ + "description": "count-empty", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [] + } + ], + "tests": [ + { + "description": "Estimated document count with empty collection", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "arguments": {}, + "expectResult": 0 + } + ] + }, + { + "description": "Count documents with empty collection", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 0 + } + ] + }, + { + "description": "Deprecated count with empty collection", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 0 + } + ] + } + ] +} diff --git a/test/crud/unified/count.json b/test/crud/unified/count.json new file mode 100644 index 0000000000..80fff5a30c --- /dev/null +++ b/test/crud/unified/count.json @@ -0,0 +1,148 @@ +{ + "description": "count", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Estimated document count", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "arguments": {}, + "expectResult": 3 + } + ] + }, + { + "description": "Count documents without a filter", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 3 + } + ] + }, + { + "description": "Count documents with a filter", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": 2 + } + ] + }, + { + "description": "Count documents with skip and limit", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {}, + "skip": 1, + "limit": 3 + }, + "expectResult": 2 + } + ] + }, + { + "description": "Deprecated count without a filter", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 3 + } + ] + }, + { + "description": "Deprecated count with a filter", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": 2 + } + ] + }, + { + "description": "Deprecated count with skip and limit", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {}, + "skip": 1, + "limit": 3 + }, + "expectResult": 2 + } + ] + } + ] +} diff --git a/test/crud/unified/create-null-ids.json b/test/crud/unified/create-null-ids.json new file mode 100644 index 0000000000..8e0c3ac5d1 --- /dev/null +++ b/test/crud/unified/create-null-ids.json @@ -0,0 +1,253 @@ +{ + "description": "create-null-ids", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "crud_id" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "type_tests" + } + } + ], + "initialData": [ + { + "collectionName": "type_tests", + "databaseName": "crud_id", + "documents": [] + } + ], + "tests": [ + { + "description": "inserting _id with type null via insertOne", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": null + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via insertMany", + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": null + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateOne", + "operations": [ + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via updateMany", + "operations": [ + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": { + "_id": null + }, + "update": { + "$unset": { + "a": "" + } + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via replaceOne", + "operations": [ + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "_id": null + }, + "upsert": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via bulkWrite", + "operations": [ + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + }, + { + "description": "inserting _id with type null via clientBulkWrite", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud_id.type_tests", + "document": { + "_id": null + } + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": { + "_id": { + "$type": "null" + } + } + }, + "expectResult": 1 + } + ] + } + ] +} diff --git a/test/crud/unified/db-aggregate-write-readPreference.json b/test/crud/unified/db-aggregate-write-readPreference.json index 2a81282de8..b6460f001f 100644 --- a/test/crud/unified/db-aggregate-write-readPreference.json +++ b/test/crud/unified/db-aggregate-write-readPreference.json @@ -52,13 +52,6 @@ } } ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [] - } - ], "tests": [ { "description": "Database-level aggregate with $out includes read preference for 5.0+ server", @@ -141,17 +134,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -235,17 +217,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -332,17 +303,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] }, { @@ -429,17 +389,6 @@ } ] } - ], - "outcome": [ - { - "collectionName": "coll0", - "databaseName": "db0", - "documents": [ - { - "_id": 1 - } - ] - } ] } ] diff --git a/test/crud/unified/deleteMany-collation.json b/test/crud/unified/deleteMany-collation.json new file mode 100644 index 0000000000..23d2f037cb --- /dev/null +++ b/test/crud/unified/deleteMany-collation.json @@ -0,0 +1,86 @@ +{ + "description": "deleteMany-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany when many documents match with collation", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteMany.json b/test/crud/unified/deleteMany.json new file mode 100644 index 0000000000..36cdff8dc0 --- /dev/null +++ b/test/crud/unified/deleteMany.json @@ -0,0 +1,115 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany when many documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "DeleteMany when no document matches", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-collation.json b/test/crud/unified/deleteOne-collation.json new file mode 100644 index 0000000000..44bab6e120 --- /dev/null +++ b/test/crud/unified/deleteOne-collation.json @@ -0,0 +1,90 @@ +{ + "description": "deleteOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne when many documents matches with collation", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne-errorResponse.json b/test/crud/unified/deleteOne-errorResponse.json new file mode 100644 index 0000000000..1f3a266f1e --- /dev/null +++ b/test/crud/unified/deleteOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "deleteOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "delete operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/deleteOne.json b/test/crud/unified/deleteOne.json new file mode 100644 index 0000000000..8177b2fb6b --- /dev/null +++ b/test/crud/unified/deleteOne.json @@ -0,0 +1,136 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ] + }, + { + "description": "DeleteOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 2 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "DeleteOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-collation.json b/test/crud/unified/distinct-collation.json new file mode 100644 index 0000000000..e40cb0b2cf --- /dev/null +++ b/test/crud/unified/distinct-collation.json @@ -0,0 +1,69 @@ +{ + "description": "distinct-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "string": "PING" + }, + { + "_id": 2, + "string": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Distinct with a collation", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "string", + "filter": {}, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + "PING" + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct-comment.json b/test/crud/unified/distinct-comment.json index 0669d4f30a..11bce9ac9d 100644 --- a/test/crud/unified/distinct-comment.json +++ b/test/crud/unified/distinct-comment.json @@ -64,7 +64,11 @@ "key": "value" } }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ @@ -105,7 +109,11 @@ "filter": {}, "comment": "comment" }, - "expectResult": [ 11, 22, 33 ] + "expectResult": [ + 11, + 22, + 33 + ] } ], "expectEvents": [ diff --git a/test/crud/unified/distinct-hint.json b/test/crud/unified/distinct-hint.json new file mode 100644 index 0000000000..2a6869cbe0 --- /dev/null +++ b/test/crud/unified/distinct-hint.json @@ -0,0 +1,139 @@ +{ + "description": "distinct-hint", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "7.1.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "distinct-hint-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "distinct-hint-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "distinct with hint string", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": "_id_" + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": "_id_" + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + }, + { + "description": "distinct with hint document", + "operations": [ + { + "name": "distinct", + "object": "collection0", + "arguments": { + "fieldName": "x", + "filter": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "expectResult": [ + 11 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll0", + "key": "x", + "query": { + "_id": 1 + }, + "hint": { + "_id": 1 + } + }, + "commandName": "distinct", + "databaseName": "distinct-hint-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/distinct.json b/test/crud/unified/distinct.json new file mode 100644 index 0000000000..9accffabc9 --- /dev/null +++ b/test/crud/unified/distinct.json @@ -0,0 +1,86 @@ +{ + "description": "distinct", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct without a filter", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectResult": [ + 11, + 22, + 33 + ] + } + ] + }, + { + "description": "Distinct with a filter", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ] + } + ] +} diff --git a/test/crud/unified/estimatedDocumentCount.json b/test/crud/unified/estimatedDocumentCount.json index 1b650c1cb6..3577d9006b 100644 --- a/test/crud/unified/estimatedDocumentCount.json +++ b/test/crud/unified/estimatedDocumentCount.json @@ -249,7 +249,7 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "isError": true + "isClientError": true } } ], diff --git a/test/crud/unified/find-collation.json b/test/crud/unified/find-collation.json new file mode 100644 index 0000000000..13b105ad5a --- /dev/null +++ b/test/crud/unified/find-collation.json @@ -0,0 +1,69 @@ +{ + "description": "find-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "Find with a collation", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "x": "PING" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": "ping" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/find.json b/test/crud/unified/find.json index 275d5d351a..325cd96c21 100644 --- a/test/crud/unified/find.json +++ b/test/crud/unified/find.json @@ -151,6 +151,154 @@ ] } ] + }, + { + "description": "Find with filter", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "Find with filter, sort, skip, and limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "limit": 2 + }, + "expectResult": [ + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ] + }, + { + "description": "Find with limit, sort, and batchsize", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "Find with batchSize equal to limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 4 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "limit": 4, + "batchSize": 5 + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] } ] } diff --git a/test/crud/unified/findOne.json b/test/crud/unified/findOne.json new file mode 100644 index 0000000000..826c0f5dfd --- /dev/null +++ b/test/crud/unified/findOne.json @@ -0,0 +1,158 @@ +{ + "description": "findOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "find-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "find-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ], + "tests": [ + { + "description": "FindOne with filter", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": 1 + }, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne with filter, sort, and skip", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2 + }, + "expectResult": { + "_id": 5, + "x": 55 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "batchSize": { + "$$exists": false + }, + "limit": 1, + "singleBatch": true + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete-collation.json b/test/crud/unified/findOneAndDelete-collation.json new file mode 100644 index 0000000000..a0452876a3 --- /dev/null +++ b/test/crud/unified/findOneAndDelete-collation.json @@ -0,0 +1,98 @@ +{ + "description": "findOneAndDelete-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 2, + "x": "PING" + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "ping" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndDelete.json b/test/crud/unified/findOneAndDelete.json new file mode 100644 index 0000000000..e434b3b740 --- /dev/null +++ b/test/crud/unified/findOneAndDelete.json @@ -0,0 +1,171 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete when many documents match", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete when one document matches", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 2 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete when no documents match", + "operations": [ + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "_id": 4 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-collation.json b/test/crud/unified/findOneAndReplace-collation.json new file mode 100644 index 0000000000..0d60d54164 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-collation.json @@ -0,0 +1,97 @@ +{ + "description": "findOneAndReplace-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when one document matches with collation returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "x": "PING" + }, + "replacement": { + "x": "pong" + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "pong" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace-upsert.json b/test/crud/unified/findOneAndReplace-upsert.json new file mode 100644 index 0000000000..f1f18996c8 --- /dev/null +++ b/test/crud/unified/findOneAndReplace-upsert.json @@ -0,0 +1,254 @@ +{ + "description": "findOneAndReplace-upsert", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 44 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 44 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndReplace.json b/test/crud/unified/findOneAndReplace.json new file mode 100644 index 0000000000..a4731602c4 --- /dev/null +++ b/test/crud/unified/findOneAndReplace.json @@ -0,0 +1,332 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace when many documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when many documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 32 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when one document matches returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when one document matches returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 2 + }, + "replacement": { + "x": 32 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 32 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 32 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace when no documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-arrayFilters.json b/test/crud/unified/findOneAndUpdate-arrayFilters.json new file mode 100644 index 0000000000..6c99e4ff66 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-arrayFilters.json @@ -0,0 +1,251 @@ +{ + "description": "findOneAndUpdate-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when no document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-collation.json b/test/crud/unified/findOneAndUpdate-collation.json new file mode 100644 index 0000000000..7a49347a3a --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-collation.json @@ -0,0 +1,106 @@ +{ + "description": "findOneAndUpdate-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when many documents match with collation returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "x": "PING" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "_id": 1 + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "x": "ping" + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-errorResponse.json b/test/crud/unified/findOneAndUpdate-errorResponse.json new file mode 100644 index 0000000000..5023a450f3 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-errorResponse.json @@ -0,0 +1,132 @@ +{ + "description": "findOneAndUpdate-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": "foo" + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate DuplicateKey error is accessible", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "createIndex", + "object": "collection0", + "arguments": { + "keys": { + "x": 1 + }, + "unique": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": "foo" + } + }, + "upsert": true + }, + "expectError": { + "errorCode": 11000, + "errorResponse": { + "keyPattern": { + "x": 1 + }, + "keyValue": { + "x": "foo" + } + } + } + } + ] + }, + { + "description": "findOneAndUpdate document validation errInfo is accessible", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "modifyCollection", + "object": "database0", + "arguments": { + "collection": "test", + "validator": { + "x": { + "$type": "string" + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 121, + "errorResponse": { + "errInfo": { + "failingDocumentId": 1, + "details": { + "$$type": "object" + } + } + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate-pipeline.json b/test/crud/unified/findOneAndUpdate-pipeline.json new file mode 100644 index 0000000000..81dba9ae93 --- /dev/null +++ b/test/crud/unified/findOneAndUpdate-pipeline.json @@ -0,0 +1,130 @@ +{ + "description": "findOneAndUpdate-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate using pipelines", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "commandName": "findAndModify", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/findOneAndUpdate.json b/test/crud/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..d79cf8ac5b --- /dev/null +++ b/test/crud/unified/findOneAndUpdate.json @@ -0,0 +1,448 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate when many documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when many documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 23 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 22 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when one document matches returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": { + "x": 23 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match with upsert returning the document before modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "upsert": true + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + } + }, + "expectResult": null + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate when no documents match with upsert returning the document after modification", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "projection": { + "x": 1, + "_id": 0 + }, + "returnDocument": "After", + "sort": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "x": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertMany.json b/test/crud/unified/insertMany.json new file mode 100644 index 0000000000..643b7f44de --- /dev/null +++ b/test/crud/unified/insertMany.json @@ -0,0 +1,205 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany with non-existing documents", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (preexisting duplicate key)", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne-errorResponse.json b/test/crud/unified/insertOne-errorResponse.json new file mode 100644 index 0000000000..04ea6a7451 --- /dev/null +++ b/test/crud/unified/insertOne-errorResponse.json @@ -0,0 +1,82 @@ +{ + "description": "insertOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "insert operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/insertOne.json b/test/crud/unified/insertOne.json new file mode 100644 index 0000000000..1a90913476 --- /dev/null +++ b/test/crud/unified/insertOne.json @@ -0,0 +1,77 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne with a non-existing document", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-collation.json b/test/crud/unified/replaceOne-collation.json new file mode 100644 index 0000000000..dd76b9d616 --- /dev/null +++ b/test/crud/unified/replaceOne-collation.json @@ -0,0 +1,92 @@ +{ + "description": "replaceOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "x": "PING" + }, + "replacement": { + "_id": 2, + "x": "pong" + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne-sort.json b/test/crud/unified/replaceOne-sort.json new file mode 100644 index 0000000000..cf2271dda5 --- /dev/null +++ b/test/crud/unified/replaceOne-sort.json @@ -0,0 +1,232 @@ +{ + "description": "replaceOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 1 + } + ] + } + ] + }, + { + "description": "replaceOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 1 + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/replaceOne.json b/test/crud/unified/replaceOne.json new file mode 100644 index 0000000000..bdb7556f2f --- /dev/null +++ b/test/crud/unified/replaceOne.json @@ -0,0 +1,259 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ] + }, + { + "description": "ReplaceOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 1 + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "ReplaceOne with upsert when no documents match without an id specified", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "ReplaceOne with upsert when no documents match with an id specified", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 4 + }, + "replacement": { + "_id": 4, + "x": 1 + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-arrayFilters.json b/test/crud/unified/updateMany-arrayFilters.json new file mode 100644 index 0000000000..8730caeb42 --- /dev/null +++ b/test/crud/unified/updateMany-arrayFilters.json @@ -0,0 +1,233 @@ +{ + "description": "updateMany-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when no documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "UpdateMany when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + } + ] + } + ] + }, + { + "description": "UpdateMany when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 2 + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-collation.json b/test/crud/unified/updateMany-collation.json new file mode 100644 index 0000000000..0c780a3c2d --- /dev/null +++ b/test/crud/unified/updateMany-collation.json @@ -0,0 +1,101 @@ +{ + "description": "updateMany-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + }, + { + "_id": 3, + "x": "pINg" + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when many documents match with collation", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "x": "ping" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + }, + { + "_id": 3, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany-pipeline.json b/test/crud/unified/updateMany-pipeline.json new file mode 100644 index 0000000000..e0f6d9d4a4 --- /dev/null +++ b/test/crud/unified/updateMany-pipeline.json @@ -0,0 +1,142 @@ +{ + "description": "updateMany-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": {}, + "u": [ + { + "$project": { + "x": 1 + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateMany.json b/test/crud/unified/updateMany.json new file mode 100644 index 0000000000..19b890592b --- /dev/null +++ b/test/crud/unified/updateMany.json @@ -0,0 +1,236 @@ +{ + "description": "updateMany", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany when many documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateMany when one document matches", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateMany with upsert when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-arrayFilters.json b/test/crud/unified/updateOne-arrayFilters.json new file mode 100644 index 0000000000..be5d05b01e --- /dev/null +++ b/test/crud/unified/updateOne-arrayFilters.json @@ -0,0 +1,453 @@ +{ + "description": "updateOne-arrayFilters", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.5.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when no document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 4 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when one document matches arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 3 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 2 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when multiple documents match arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": {}, + "update": { + "$set": { + "y.$[i].b": 2 + } + }, + "arrayFilters": [ + { + "i.b": 1 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 2 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when no documents match multiple arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "y.$[i].c.$[j].d": 0 + } + }, + "arrayFilters": [ + { + "i.b": 5 + }, + { + "j.d": 3 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 1 + } + ] + } + ] + } + ] + } + ] + }, + { + "description": "UpdateOne when one document matches multiple arrayFilters", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "y.$[i].c.$[j].d": 0 + } + }, + "arrayFilters": [ + { + "i.b": 5 + }, + { + "j.d": 1 + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "y": [ + { + "b": 3 + }, + { + "b": 1 + } + ] + }, + { + "_id": 2, + "y": [ + { + "b": 0 + }, + { + "b": 1 + } + ] + }, + { + "_id": 3, + "y": [ + { + "b": 5, + "c": [ + { + "d": 2 + }, + { + "d": 0 + } + ] + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-collation.json b/test/crud/unified/updateOne-collation.json new file mode 100644 index 0000000000..a39be46054 --- /dev/null +++ b/test/crud/unified/updateOne-collation.json @@ -0,0 +1,93 @@ +{ + "description": "updateOne-collation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.4", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "ping" + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when one document matches with collation", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "x": "PING" + }, + "update": { + "$set": { + "x": "pong" + } + }, + "collation": { + "locale": "en_US", + "strength": 2 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": "pong" + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-errorResponse.json b/test/crud/unified/updateOne-errorResponse.json new file mode 100644 index 0000000000..0ceddbc4fc --- /dev/null +++ b/test/crud/unified/updateOne-errorResponse.json @@ -0,0 +1,87 @@ +{ + "description": "updateOne-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "tests": [ + { + "description": "update operations support errorResponse assertions", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 8 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-pipeline.json b/test/crud/unified/updateOne-pipeline.json new file mode 100644 index 0000000000..1348c6b53b --- /dev/null +++ b/test/crud/unified/updateOne-pipeline.json @@ -0,0 +1,150 @@ +{ + "description": "updateOne-pipeline", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1, + "y": 1, + "t": { + "u": { + "v": 1 + } + } + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne using pipelines", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ] + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": [ + { + "$replaceRoot": { + "newRoot": "$t" + } + }, + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + }, + "commandName": "update", + "databaseName": "crud-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "u": { + "v": 1 + }, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "y": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne-sort.json b/test/crud/unified/updateOne-sort.json new file mode 100644 index 0000000000..8fe4f50b94 --- /dev/null +++ b/test/crud/unified/updateOne-sort.json @@ -0,0 +1,240 @@ +{ + "description": "updateOne-sort", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne with sort option", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + }, + { + "commandSucceededEvent": { + "reply": { + "ok": 1, + "n": 1 + }, + "commandName": "update" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "updateOne with sort option unsupported (server-side error)", + "runOnRequirements": [ + { + "maxServerVersion": "7.99" + } + ], + "operations": [ + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": -1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll0", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "sort": { + "_id": -1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateOne.json b/test/crud/unified/updateOne.json new file mode 100644 index 0000000000..a3f559673e --- /dev/null +++ b/test/crud/unified/updateOne.json @@ -0,0 +1,216 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-v1" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne when many documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ] + }, + { + "description": "UpdateOne when one document matches", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateOne when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert when no documents match", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "crud-v1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/crud/unified/updateWithPipelines.json b/test/crud/unified/updateWithPipelines.json deleted file mode 100644 index 164f2f6a19..0000000000 --- a/test/crud/unified/updateWithPipelines.json +++ /dev/null @@ -1,494 +0,0 @@ -{ - "description": "updateWithPipelines", - "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "4.1.11" - } - ], - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "crud-tests" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "test" - } - } - ], - "initialData": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "y": 1, - "t": { - "u": { - "v": 1 - } - } - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ], - "tests": [ - { - "description": "UpdateOne using pipelines", - "operations": [ - { - "object": "collection0", - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "expectResult": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": { - "$$unsetOrMatches": false - }, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateMany using pipelines", - "operations": [ - { - "object": "collection0", - "name": "updateMany", - "arguments": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "expectResult": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - ] - }, - { - "description": "FindOneAndUpdate using pipelines", - "operations": [ - { - "object": "collection0", - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "findAndModify": "test", - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - }, - "commandName": "findAndModify", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateOne in bulk write using pipelines", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateOne": { - "filter": { - "_id": 1 - }, - "update": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "expectResult": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": [ - { - "$replaceRoot": { - "newRoot": "$t" - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": { - "$$unsetOrMatches": false - }, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "u": { - "v": 1 - }, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "y": 1 - } - ] - } - ] - }, - { - "description": "UpdateMany in bulk write using pipelines", - "operations": [ - { - "object": "collection0", - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "updateMany": { - "filter": {}, - "update": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ] - } - } - ] - }, - "expectResult": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "update": "test", - "updates": [ - { - "q": {}, - "u": [ - { - "$project": { - "x": 1 - } - }, - { - "$addFields": { - "foo": 1 - } - } - ], - "multi": true, - "upsert": { - "$$unsetOrMatches": false - } - } - ] - }, - "commandName": "update", - "databaseName": "crud-tests" - } - } - ] - } - ], - "outcome": [ - { - "collectionName": "test", - "databaseName": "crud-tests", - "documents": [ - { - "_id": 1, - "x": 1, - "foo": 1 - }, - { - "_id": 2, - "x": 2, - "foo": 1 - } - ] - } - ] - } - ] -} diff --git a/test/crud/v1/read/aggregate-collation.json b/test/crud/v1/read/aggregate-collation.json deleted file mode 100644 index d958e447bf..0000000000 --- a/test/crud/v1/read/aggregate-collation.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "Aggregate with collation", - "operation": { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "x": "PING" - } - } - ], - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": [ - { - "_id": 1, - "x": "ping" - } - ] - } - } - ] -} diff --git a/test/crud/v1/read/aggregate-out.json b/test/crud/v1/read/aggregate-out.json deleted file mode 100644 index c195e163e0..0000000000 --- a/test/crud/v1/read/aggregate-out.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "minServerVersion": "2.6", - "serverless": "forbid", - "tests": [ - { - "description": "Aggregate with $out", - "operation": { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "batchSize": 2 - } - }, - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "Aggregate with $out and batch size of 0", - "operation": { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_test_collection" - } - ], - "batchSize": 0 - } - }, - "outcome": { - "collection": { - "name": "other_test_collection", - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/read/aggregate.json b/test/crud/v1/read/aggregate.json deleted file mode 100644 index 797a922395..0000000000 --- a/test/crud/v1/read/aggregate.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate with multiple stages", - "operation": { - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$sort": { - "x": 1 - } - }, - { - "$match": { - "_id": { - "$gt": 1 - } - } - } - ], - "batchSize": 2 - } - }, - "outcome": { - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - ] -} diff --git a/test/crud/v1/read/count-collation.json b/test/crud/v1/read/count-collation.json deleted file mode 100644 index 7d61508493..0000000000 --- a/test/crud/v1/read/count-collation.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": "PING" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "Count documents with collation", - "operation": { - "name": "countDocuments", - "arguments": { - "filter": { - "x": "ping" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": 1 - } - }, - { - "description": "Deprecated count with collation", - "operation": { - "name": "count", - "arguments": { - "filter": { - "x": "ping" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": 1 - } - } - ] -} diff --git a/test/crud/v1/read/count-empty.json b/test/crud/v1/read/count-empty.json deleted file mode 100644 index 2b8627e0c6..0000000000 --- a/test/crud/v1/read/count-empty.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "data": [], - "tests": [ - { - "description": "Estimated document count with empty collection", - "operation": { - "name": "estimatedDocumentCount", - "arguments": {} - }, - "outcome": { - "result": 0 - } - }, - { - "description": "Count documents with empty collection", - "operation": { - "name": "countDocuments", - "arguments": { - "filter": {} - } - }, - "outcome": { - "result": 0 - } - }, - { - "description": "Deprecated count with empty collection", - "operation": { - "name": "count", - "arguments": { - "filter": {} - } - }, - "outcome": { - "result": 0 - } - } - ] -} diff --git a/test/crud/v1/read/count.json b/test/crud/v1/read/count.json deleted file mode 100644 index 9642b2fbd0..0000000000 --- a/test/crud/v1/read/count.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Estimated document count", - "operation": { - "name": "estimatedDocumentCount", - "arguments": {} - }, - "outcome": { - "result": 3 - } - }, - { - "description": "Count documents without a filter", - "operation": { - "name": "countDocuments", - "arguments": { - "filter": {} - } - }, - "outcome": { - "result": 3 - } - }, - { - "description": "Count documents with a filter", - "operation": { - "name": "countDocuments", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "outcome": { - "result": 2 - } - }, - { - "description": "Count documents with skip and limit", - "operation": { - "name": "countDocuments", - "arguments": { - "filter": {}, - "skip": 1, - "limit": 3 - } - }, - "outcome": { - "result": 2 - } - }, - { - "description": "Deprecated count without a filter", - "operation": { - "name": "count", - "arguments": { - "filter": {} - } - }, - "outcome": { - "result": 3 - } - }, - { - "description": "Deprecated count with a filter", - "operation": { - "name": "count", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "outcome": { - "result": 2 - } - }, - { - "description": "Deprecated count with skip and limit", - "operation": { - "name": "count", - "arguments": { - "filter": {}, - "skip": 1, - "limit": 3 - } - }, - "outcome": { - "result": 2 - } - } - ] -} diff --git a/test/crud/v1/read/distinct-collation.json b/test/crud/v1/read/distinct-collation.json deleted file mode 100644 index 984991a43b..0000000000 --- a/test/crud/v1/read/distinct-collation.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "string": "PING" - }, - { - "_id": 2, - "string": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "Distinct with a collation", - "operation": { - "name": "distinct", - "arguments": { - "fieldName": "string", - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": [ - "PING" - ] - } - } - ] -} diff --git a/test/crud/v1/read/distinct.json b/test/crud/v1/read/distinct.json deleted file mode 100644 index a57ee36a83..0000000000 --- a/test/crud/v1/read/distinct.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Distinct without a filter", - "operation": { - "name": "distinct", - "arguments": { - "fieldName": "x", - "filter": {} - } - }, - "outcome": { - "result": [ - 11, - 22, - 33 - ] - } - }, - { - "description": "Distinct with a filter", - "operation": { - "name": "distinct", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "outcome": { - "result": [ - 22, - 33 - ] - } - } - ] -} diff --git a/test/crud/v1/read/find-collation.json b/test/crud/v1/read/find-collation.json deleted file mode 100644 index 4e56c05253..0000000000 --- a/test/crud/v1/read/find-collation.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "Find with a collation", - "operation": { - "name": "find", - "arguments": { - "filter": { - "x": "PING" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": [ - { - "_id": 1, - "x": "ping" - } - ] - } - } - ] -} diff --git a/test/crud/v1/read/find.json b/test/crud/v1/read/find.json deleted file mode 100644 index 3597e37be6..0000000000 --- a/test/crud/v1/read/find.json +++ /dev/null @@ -1,105 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "Find with filter", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": [ - { - "_id": 1, - "x": 11 - } - ] - } - }, - { - "description": "Find with filter, sort, skip, and limit", - "operation": { - "name": "find", - "arguments": { - "filter": { - "_id": { - "$gt": 2 - } - }, - "sort": { - "_id": 1 - }, - "skip": 2, - "limit": 2 - } - }, - "outcome": { - "result": [ - { - "_id": 5, - "x": 55 - } - ] - } - }, - { - "description": "Find with limit, sort, and batchsize", - "operation": { - "name": "find", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4, - "batchSize": 2 - } - }, - "outcome": { - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - ] -} diff --git a/test/crud/v1/write/bulkWrite-arrayFilters.json b/test/crud/v1/write/bulkWrite-arrayFilters.json deleted file mode 100644 index 99e73f5d75..0000000000 --- a/test/crud/v1/write/bulkWrite-arrayFilters.json +++ /dev/null @@ -1,111 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ], - "minServerVersion": "3.5.6", - "tests": [ - { - "description": "BulkWrite with arrayFilters", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - }, - { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 3, - "modifiedCount": 3, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 2 - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/bulkWrite-collation.json b/test/crud/v1/write/bulkWrite-collation.json deleted file mode 100644 index bc90aa8172..0000000000 --- a/test/crud/v1/write/bulkWrite-collation.json +++ /dev/null @@ -1,218 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - }, - { - "_id": 4, - "x": "pong" - }, - { - "_id": 5, - "x": "pONg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "BulkWrite with delete operations and collation", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "x": "PING" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "x": "PING" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "x": "PONG" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 4, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "BulkWrite with update operations and collation", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": { - "x": "ping" - }, - "update": { - "$set": { - "x": "PONG" - } - }, - "collation": { - "locale": "en_US", - "strength": 3 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "x": "ping" - }, - "update": { - "$set": { - "x": "PONG" - } - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "x": "ping" - }, - "replacement": { - "_id": 6, - "x": "ping" - }, - "upsert": true, - "collation": { - "locale": "en_US", - "strength": 3 - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "x": "pong" - }, - "update": { - "$set": { - "x": "PONG" - } - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 6, - "modifiedCount": 4, - "upsertedCount": 1, - "upsertedIds": { - "2": 6 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "PONG" - }, - { - "_id": 3, - "x": "PONG" - }, - { - "_id": 4, - "x": "PONG" - }, - { - "_id": 5, - "x": "PONG" - }, - { - "_id": 6, - "x": "ping" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/bulkWrite.json b/test/crud/v1/write/bulkWrite.json deleted file mode 100644 index dc00da28ad..0000000000 --- a/test/crud/v1/write/bulkWrite.json +++ /dev/null @@ -1,778 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "minServerVersion": "2.6", - "tests": [ - { - "description": "BulkWrite with deleteOne operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 3 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 2 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "BulkWrite with deleteMany operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteMany", - "arguments": { - "filter": { - "x": { - "$lt": 11 - } - } - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "x": { - "$lte": 22 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 2, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [] - } - } - }, - { - "description": "BulkWrite with insertOne operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 2, - "insertedIds": { - "0": 3, - "1": 4 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite with replaceOne operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 33 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "x": 12 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "x": 33 - }, - "upsert": true - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 1, - "upsertedIds": { - "2": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite with updateOne operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 0 - }, - "update": { - "$set": { - "x": 0 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 11 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 33 - } - }, - "upsert": true - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 2, - "modifiedCount": 1, - "upsertedCount": 1, - "upsertedIds": { - "3": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite with updateMany operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": { - "x": { - "$lt": 11 - } - }, - "update": { - "$set": { - "x": 0 - } - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "x": { - "$lte": 22 - } - }, - "update": { - "$unset": { - "y": 1 - } - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "x": { - "$lte": 22 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "x": 33 - } - }, - "upsert": true - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 4, - "modifiedCount": 2, - "upsertedCount": 1, - "upsertedIds": { - "3": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite with mixed ordered operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "x": { - "$nin": [ - 24, - 34 - ] - } - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "_id": 4, - "x": 44 - }, - "upsert": true - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 2, - "insertedCount": 2, - "insertedIds": { - "0": 3, - "3": 4 - }, - "matchedCount": 3, - "modifiedCount": 3, - "upsertedCount": 1, - "upsertedIds": { - "5": 4 - } - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 24 - }, - { - "_id": 3, - "x": 34 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite with mixed unordered operations", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "_id": 3, - "x": 33 - }, - "upsert": true - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 1, - "upsertedIds": { - "0": 3 - } - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite continue-on-error behavior with unordered (preexisting duplicate key)", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 2, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "BulkWrite continue-on-error behavior with unordered (duplicate key in requests)", - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4, - "x": 44 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 2, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/deleteMany-collation.json b/test/crud/v1/write/deleteMany-collation.json deleted file mode 100644 index fce75e488a..0000000000 --- a/test/crud/v1/write/deleteMany-collation.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "DeleteMany when many documents match with collation", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "x": "PING" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "deletedCount": 2 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/deleteMany.json b/test/crud/v1/write/deleteMany.json deleted file mode 100644 index 7eee85e77f..0000000000 --- a/test/crud/v1/write/deleteMany.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "DeleteMany when many documents match", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "outcome": { - "result": { - "deletedCount": 2 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "DeleteMany when no document matches", - "operation": { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": 4 - } - } - }, - "outcome": { - "result": { - "deletedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/deleteOne-collation.json b/test/crud/v1/write/deleteOne-collation.json deleted file mode 100644 index 9bcef411ef..0000000000 --- a/test/crud/v1/write/deleteOne-collation.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "DeleteOne when many documents matches with collation", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "x": "PING" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 3, - "x": "pINg" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/deleteOne.json b/test/crud/v1/write/deleteOne.json deleted file mode 100644 index a1106deae3..0000000000 --- a/test/crud/v1/write/deleteOne.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "DeleteOne when many documents match", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - } - } - }, - { - "description": "DeleteOne when one document matches", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 2 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "DeleteOne when no documents match", - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 4 - } - } - }, - "outcome": { - "result": { - "deletedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndDelete-collation.json b/test/crud/v1/write/findOneAndDelete-collation.json deleted file mode 100644 index 32480da842..0000000000 --- a/test/crud/v1/write/findOneAndDelete-collation.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "FindOneAndDelete when one document matches with collation", - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 2, - "x": "PING" - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "x": "ping" - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 3, - "x": "pINg" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndDelete.json b/test/crud/v1/write/findOneAndDelete.json deleted file mode 100644 index e424e2aad1..0000000000 --- a/test/crud/v1/write/findOneAndDelete.json +++ /dev/null @@ -1,127 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "FindOneAndDelete when many documents match", - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndDelete when one document matches", - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 2 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndDelete when no documents match", - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "_id": 4 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndReplace-collation.json b/test/crud/v1/write/findOneAndReplace-collation.json deleted file mode 100644 index 9b3c25005b..0000000000 --- a/test/crud/v1/write/findOneAndReplace-collation.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "FindOneAndReplace when one document matches with collation returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "x": "PING" - }, - "replacement": { - "x": "pong" - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "x": "pong" - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "pong" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndReplace-upsert.json b/test/crud/v1/write/findOneAndReplace-upsert.json deleted file mode 100644 index 0f07bf9c18..0000000000 --- a/test/crud/v1/write/findOneAndReplace-upsert.json +++ /dev/null @@ -1,201 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "minServerVersion": "2.6", - "tests": [ - { - "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document before modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "upsert": true - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when no documents match without id specified with upsert returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - }, - "upsert": true - } - }, - "outcome": { - "result": { - "x": 44 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document before modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "_id": 4, - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "upsert": true - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when no documents match with id specified with upsert returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "_id": 4, - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - }, - "upsert": true - } - }, - "outcome": { - "result": { - "x": 44 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndReplace.json b/test/crud/v1/write/findOneAndReplace.json deleted file mode 100644 index 70e5c3df4e..0000000000 --- a/test/crud/v1/write/findOneAndReplace.json +++ /dev/null @@ -1,273 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "FindOneAndReplace when many documents match returning the document before modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 32 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 32 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when many documents match returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 32 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 32 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 32 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when one document matches returning the document before modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 2 - }, - "replacement": { - "x": 32 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 32 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when one document matches returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 2 - }, - "replacement": { - "x": 32 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 32 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 32 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when no documents match returning the document before modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndReplace when no documents match returning the document after modification", - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 44 - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndUpdate-arrayFilters.json b/test/crud/v1/write/findOneAndUpdate-arrayFilters.json deleted file mode 100644 index 1aa13b863e..0000000000 --- a/test/crud/v1/write/findOneAndUpdate-arrayFilters.json +++ /dev/null @@ -1,203 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ], - "minServerVersion": "3.5.6", - "tests": [ - { - "description": "FindOneAndUpdate when no document matches arrayFilters", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 4 - } - ] - } - }, - "outcome": { - "result": { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when one document matches arrayFilters", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - }, - "outcome": { - "result": { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when multiple documents match arrayFilters", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - }, - "outcome": { - "result": { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndUpdate-collation.json b/test/crud/v1/write/findOneAndUpdate-collation.json deleted file mode 100644 index 8abab7bd6b..0000000000 --- a/test/crud/v1/write/findOneAndUpdate-collation.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "FindOneAndUpdate when many documents match with collation returning the document before modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "x": "PING" - }, - "update": { - "$set": { - "x": "pong" - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "_id": 1 - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "x": "ping" - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "pong" - }, - { - "_id": 3, - "x": "pINg" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/findOneAndUpdate.json b/test/crud/v1/write/findOneAndUpdate.json deleted file mode 100644 index 6da8325273..0000000000 --- a/test/crud/v1/write/findOneAndUpdate.json +++ /dev/null @@ -1,379 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "FindOneAndUpdate when many documents match returning the document before modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when many documents match returning the document after modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 23 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when one document matches returning the document before modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 22 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when one document matches returning the document after modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "x": 23 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when no documents match returning the document before modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when no documents match with upsert returning the document before modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "upsert": true - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when no documents match returning the document after modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": null, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate when no documents match with upsert returning the document after modification", - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "projection": { - "x": 1, - "_id": 0 - }, - "returnDocument": "After", - "sort": { - "x": 1 - }, - "upsert": true - } - }, - "outcome": { - "result": { - "x": 1 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/insertMany.json b/test/crud/v1/write/insertMany.json deleted file mode 100644 index 6a2e5261b7..0000000000 --- a/test/crud/v1/write/insertMany.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "InsertMany with non-existing documents", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany continue-on-error behavior with unordered (preexisting duplicate key)", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 2, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany continue-on-error behavior with unordered (duplicate key in requests)", - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 2, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/insertOne.json b/test/crud/v1/write/insertOne.json deleted file mode 100644 index 525de75e07..0000000000 --- a/test/crud/v1/write/insertOne.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "InsertOne with a non-existing document", - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - "outcome": { - "result": { - "insertedId": 2 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/replaceOne-collation.json b/test/crud/v1/write/replaceOne-collation.json deleted file mode 100644 index fa4cbe9970..0000000000 --- a/test/crud/v1/write/replaceOne-collation.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "ReplaceOne when one document matches with collation", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "x": "PING" - }, - "replacement": { - "_id": 2, - "x": "pong" - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "pong" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/replaceOne.json b/test/crud/v1/write/replaceOne.json deleted file mode 100644 index 101af25c7c..0000000000 --- a/test/crud/v1/write/replaceOne.json +++ /dev/null @@ -1,205 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "minServerVersion": "2.6", - "tests": [ - { - "description": "ReplaceOne when many documents match", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "replacement": { - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - }, - { - "description": "ReplaceOne when one document matches", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "ReplaceOne when no documents match", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "_id": 4, - "x": 1 - } - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "ReplaceOne with upsert when no documents match without an id specified", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "x": 1 - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "ReplaceOne with upsert when no documents match with an id specified", - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 4 - }, - "replacement": { - "_id": 4, - "x": 1 - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateMany-arrayFilters.json b/test/crud/v1/write/updateMany-arrayFilters.json deleted file mode 100644 index ae4c123ea5..0000000000 --- a/test/crud/v1/write/updateMany-arrayFilters.json +++ /dev/null @@ -1,185 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ], - "minServerVersion": "3.5.6", - "tests": [ - { - "description": "UpdateMany when no documents match arrayFilters", - "operation": { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 4 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - }, - { - "description": "UpdateMany when one document matches arrayFilters", - "operation": { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - } - ] - } - } - }, - { - "description": "UpdateMany when multiple documents match arrayFilters", - "operation": { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 2 - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateMany-collation.json b/test/crud/v1/write/updateMany-collation.json deleted file mode 100644 index 8becfd806b..0000000000 --- a/test/crud/v1/write/updateMany-collation.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - }, - { - "_id": 3, - "x": "pINg" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "UpdateMany when many documents match with collation", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "x": "ping" - }, - "update": { - "$set": { - "x": "pong" - } - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "pong" - }, - { - "_id": 3, - "x": "pong" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateMany.json b/test/crud/v1/write/updateMany.json deleted file mode 100644 index a3c339987d..0000000000 --- a/test/crud/v1/write/updateMany.json +++ /dev/null @@ -1,183 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "minServerVersion": "2.6", - "tests": [ - { - "description": "UpdateMany when many documents match", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 34 - } - ] - } - } - }, - { - "description": "UpdateMany when one document matches", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "UpdateMany when no documents match", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "UpdateMany with upsert when no documents match", - "operation": { - "name": "updateMany", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateOne-arrayFilters.json b/test/crud/v1/write/updateOne-arrayFilters.json deleted file mode 100644 index 087ed4b82f..0000000000 --- a/test/crud/v1/write/updateOne-arrayFilters.json +++ /dev/null @@ -1,395 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 1 - } - ] - } - ] - } - ], - "minServerVersion": "3.5.6", - "tests": [ - { - "description": "UpdateOne when no document matches arrayFilters", - "operation": { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 4 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 1 - } - ] - } - ] - } - ] - } - } - }, - { - "description": "UpdateOne when one document matches arrayFilters", - "operation": { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 3 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 2 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 1 - } - ] - } - ] - } - ] - } - } - }, - { - "description": "UpdateOne when multiple documents match arrayFilters", - "operation": { - "name": "updateOne", - "arguments": { - "filter": {}, - "update": { - "$set": { - "y.$[i].b": 2 - } - }, - "arrayFilters": [ - { - "i.b": 1 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 2 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 1 - } - ] - } - ] - } - ] - } - } - }, - { - "description": "UpdateOne when no documents match multiple arrayFilters", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "y.$[i].c.$[j].d": 0 - } - }, - "arrayFilters": [ - { - "i.b": 5 - }, - { - "j.d": 3 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 1 - } - ] - } - ] - } - ] - } - } - }, - { - "description": "UpdateOne when one document matches multiple arrayFilters", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3 - }, - "update": { - "$set": { - "y.$[i].c.$[j].d": 0 - } - }, - "arrayFilters": [ - { - "i.b": 5 - }, - { - "j.d": 1 - } - ] - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "y": [ - { - "b": 3 - }, - { - "b": 1 - } - ] - }, - { - "_id": 2, - "y": [ - { - "b": 0 - }, - { - "b": 1 - } - ] - }, - { - "_id": 3, - "y": [ - { - "b": 5, - "c": [ - { - "d": 2 - }, - { - "d": 0 - } - ] - } - ] - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateOne-collation.json b/test/crud/v1/write/updateOne-collation.json deleted file mode 100644 index 3afdb83e0f..0000000000 --- a/test/crud/v1/write/updateOne-collation.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "ping" - } - ], - "minServerVersion": "3.4", - "serverless": "forbid", - "tests": [ - { - "description": "UpdateOne when one document matches with collation", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "x": "PING" - }, - "update": { - "$set": { - "x": "pong" - } - }, - "collation": { - "locale": "en_US", - "strength": 2 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": "pong" - } - ] - } - } - } - ] -} diff --git a/test/crud/v1/write/updateOne.json b/test/crud/v1/write/updateOne.json deleted file mode 100644 index 76d2086bdb..0000000000 --- a/test/crud/v1/write/updateOne.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "minServerVersion": "2.6", - "tests": [ - { - "description": "UpdateOne when many documents match", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": { - "$gt": 1 - } - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - } - }, - { - "description": "UpdateOne when one document matches", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "UpdateOne when no documents match", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "UpdateOne with upsert when no documents match", - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/crud_v2_format.py b/test/crud_v2_format.py deleted file mode 100644 index 4118dfef9f..0000000000 --- a/test/crud_v2_format.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""v2 format CRUD test runner. - -https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.rst -""" - -from test.utils_spec_runner import SpecRunner - - -class TestCrudV2(SpecRunner): - # Default test database and collection names. - TEST_DB = None - TEST_COLLECTION = None - - def allowable_errors(self, op): - """Override expected error classes.""" - errors = super(TestCrudV2, self).allowable_errors(op) - errors += (ValueError,) - return errors - - def get_scenario_db_name(self, scenario_def): - """Crud spec says database_name is optional.""" - return scenario_def.get("database_name", self.TEST_DB) - - def get_scenario_coll_name(self, scenario_def): - """Crud spec says collection_name is optional.""" - return scenario_def.get("collection_name", self.TEST_COLLECTION) - - def get_object_name(self, op): - """Crud spec says object is optional and defaults to 'collection'.""" - return op.get("object", "collection") - - def get_outcome_coll_name(self, outcome, collection): - """Crud spec says outcome has an optional 'collection.name'.""" - return outcome["collection"].get("name", collection.name) - - def setup_scenario(self, scenario_def): - """Allow specs to override a test's setup.""" - # PYTHON-1935 Only create the collection if there is data to insert. - if scenario_def["data"]: - super(TestCrudV2, self).setup_scenario(scenario_def) diff --git a/test/csot/bulkWrite.json b/test/csot/bulkWrite.json index 14d5b654f6..9a05809f77 100644 --- a/test/csot/bulkWrite.json +++ b/test/csot/bulkWrite.json @@ -19,7 +19,10 @@ "useMultipleMongoses": false, "observeEvents": [ "commandStartedEvent" - ] + ], + "uriOptions": { + "w": 1 + } } }, { @@ -48,6 +51,13 @@ { "description": "timeoutMS applied to entire bulkWrite, not individual commands", "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, { "name": "failPoint", "object": "testRunner", @@ -69,15 +79,6 @@ } } }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - } - }, { "name": "bulkWrite", "object": "collection", @@ -114,10 +115,10 @@ "events": [ { "commandStartedEvent": { - "commandName": "find", + "commandName": "insert", "databaseName": "test", "command": { - "find": "coll" + "insert": "coll" } } }, diff --git a/test/csot/change-streams.json b/test/csot/change-streams.json index a8b2b7e170..8cffb08e26 100644 --- a/test/csot/change-streams.json +++ b/test/csot/change-streams.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -104,7 +104,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 250 } } } @@ -114,7 +114,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 50 + "timeoutMS": 200 }, "expectError": { "isTimeoutError": true @@ -242,7 +242,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -252,7 +252,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2, "maxAwaitTimeMS": 1 }, @@ -310,7 +310,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -330,7 +330,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 12, + "blockTimeMS": 120, "errorCode": 7, "errorLabels": [ "ResumableChangeStreamError" @@ -412,7 +412,7 @@ "arguments": { "pipeline": [], "maxAwaitTimeMS": 1, - "timeoutMS": 100 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, @@ -431,7 +431,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 150 + "blockTimeMS": 250 } } } @@ -534,7 +534,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -544,7 +544,7 @@ "object": "collection", "arguments": { "pipeline": [], - "timeoutMS": 10 + "timeoutMS": 200 }, "saveResultAsEntity": "changeStream" }, diff --git a/test/csot/close-cursors.json b/test/csot/close-cursors.json index 1361971c4c..79b0de7b6a 100644 --- a/test/csot/close-cursors.json +++ b/test/csot/close-cursors.json @@ -75,7 +75,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 50 + "blockTimeMS": 250 } } } @@ -86,7 +86,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -175,7 +175,7 @@ "killCursors" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 250 } } } @@ -186,7 +186,7 @@ "arguments": { "filter": {}, "batchSize": 2, - "timeoutMS": 20 + "timeoutMS": 200 }, "saveResultAsEntity": "cursor" }, @@ -194,7 +194,7 @@ "name": "close", "object": "cursor", "arguments": { - "timeoutMS": 40 + "timeoutMS": 400 } } ], @@ -215,7 +215,7 @@ { "commandStartedEvent": { "command": { - "killCursors": "collection", + "killCursors": "coll", "maxTimeMS": { "$$type": [ "int", diff --git a/test/csot/command-execution.json b/test/csot/command-execution.json index 92358f2184..aa9c3eb23f 100644 --- a/test/csot/command-execution.json +++ b/test/csot/command-execution.json @@ -3,7 +3,13 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" } ], "createEntities": [ @@ -45,7 +51,7 @@ ], "appName": "reduceMaxTimeMSTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -62,7 +68,8 @@ "uriOptions": { "appName": "reduceMaxTimeMSTest", "w": 1, - "timeoutMS": 500 + "timeoutMS": 500, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -86,6 +93,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -100,6 +124,15 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, { "commandStartedEvent": { "commandName": "insert", @@ -107,7 +140,7 @@ "command": { "insert": "timeoutColl", "maxTimeMS": { - "$$lte": 500 + "$$lte": 450 } } } @@ -134,7 +167,7 @@ ], "appName": "rttTooHighTest", "blockConnection": true, - "blockTimeMS": 20 + "blockTimeMS": 50 } } } @@ -151,7 +184,8 @@ "uriOptions": { "appName": "rttTooHighTest", "w": 1, - "timeoutMS": 10 + "timeoutMS": 10, + "heartbeatFrequencyMS": 500 }, "observeEvents": [ "commandStartedEvent" @@ -175,6 +209,23 @@ ] } }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, { "name": "insertOne", "object": "timeoutCollection", @@ -192,7 +243,7 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 3 } }, "expectError": { @@ -204,12 +255,100 @@ "object": "timeoutCollection", "arguments": { "document": { - "_id": 2 + "_id": 4 } }, "expectError": { "isTimeoutError": true } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } }, { "name": "insertOne", @@ -218,16 +357,35 @@ "document": { "_id": 2 } - }, - "expectError": { - "isTimeoutError": true } } ], "expectEvents": [ { "client": "client", - "events": [] + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] } ] } diff --git a/test/csot/convenient-transactions.json b/test/csot/convenient-transactions.json index 0c8cc6edd9..3868b3026c 100644 --- a/test/csot/convenient-transactions.json +++ b/test/csot/convenient-transactions.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -81,6 +81,9 @@ } } ] + }, + "expectError": { + "isClientError": true } } ], @@ -109,7 +112,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 30 + "blockTimeMS": 300 } } } @@ -182,6 +185,21 @@ } } } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } } ] } diff --git a/test/csot/deprecated-options.json b/test/csot/deprecated-options.json index 0e2bdefd73..647e1bf792 100644 --- a/test/csot/deprecated-options.json +++ b/test/csot/deprecated-options.json @@ -1,12 +1,12 @@ { - "description": "operations ignore deprected timeout options if timeoutMS is set", + "description": "operations ignore deprecated timeout options if timeoutMS is set", "schemaVersion": "1.9", "runOnRequirements": [ { "minServerVersion": "4.2", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -6750,16 +6750,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 100000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ] @@ -6815,16 +6822,23 @@ ] } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 100000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -6832,6 +6846,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -6903,6 +6923,16 @@ ] } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", @@ -6910,10 +6940,6 @@ "timeoutMS": 1000, "maxTimeMS": 5000, "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -6921,6 +6947,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -7003,6 +7035,17 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1", + "timeoutMS": 100000 + } + }, { "name": "dropIndexes", "object": "collection", diff --git a/test/csot/error-transformations.json b/test/csot/error-transformations.json index 4d9e061c3b..4889e39583 100644 --- a/test/csot/error-transformations.json +++ b/test/csot/error-transformations.json @@ -11,7 +11,6 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", "sharded" ] } diff --git a/test/csot/global-timeoutMS.json b/test/csot/global-timeoutMS.json index 34854ac155..f1edbe68e3 100644 --- a/test/csot/global-timeoutMS.json +++ b/test/csot/global-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -5621,15 +5621,21 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "name": "x_1" - }, - "expectError": { - "isClientError": false, - "isTimeoutError": false } } ], @@ -5637,6 +5643,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", diff --git a/test/csot/gridfs-advanced.json b/test/csot/gridfs-advanced.json index 6bf0229a04..c6c0944d2f 100644 --- a/test/csot/gridfs-advanced.json +++ b/test/csot/gridfs-advanced.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-delete.json b/test/csot/gridfs-delete.json index 8701929ff3..9f4980114b 100644 --- a/test/csot/gridfs-delete.json +++ b/test/csot/gridfs-delete.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-download.json b/test/csot/gridfs-download.json index 2ab64010f8..8542f69e89 100644 --- a/test/csot/gridfs-download.json +++ b/test/csot/gridfs-download.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-find.json b/test/csot/gridfs-find.json index 45bb7066d6..7409036284 100644 --- a/test/csot/gridfs-find.json +++ b/test/csot/gridfs-find.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/gridfs-upload.json b/test/csot/gridfs-upload.json index 690fdda77f..b3f174973d 100644 --- a/test/csot/gridfs-upload.json +++ b/test/csot/gridfs-upload.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ diff --git a/test/csot/legacy-timeouts.json b/test/csot/legacy-timeouts.json index 3a2d2eaefb..535425c934 100644 --- a/test/csot/legacy-timeouts.json +++ b/test/csot/legacy-timeouts.json @@ -1,6 +1,6 @@ { "description": "legacy timeouts continue to work if timeoutMS is not set", - "schemaVersion": "1.9", + "schemaVersion": "1.0", "runOnRequirements": [ { "minServerVersion": "4.4" @@ -280,7 +280,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/csot/non-tailable-cursors.json b/test/csot/non-tailable-cursors.json index 0a5448a6bb..291c6e72aa 100644 --- a/test/csot/non-tailable-cursors.json +++ b/test/csot/non-tailable-cursors.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -84,7 +84,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -143,7 +143,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -153,7 +153,7 @@ "object": "collection", "arguments": { "filter": {}, - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -221,7 +221,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -232,7 +232,7 @@ "arguments": { "filter": {}, "timeoutMode": "cursorLifetime", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 }, "expectError": { @@ -299,7 +299,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -355,7 +355,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 125 } } } @@ -366,7 +366,7 @@ "arguments": { "filter": {}, "timeoutMode": "iteration", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 2 } } @@ -427,7 +427,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/csot/override-operation-timeoutMS.json b/test/csot/override-operation-timeoutMS.json index 896b996ee8..f33f876137 100644 --- a/test/csot/override-operation-timeoutMS.json +++ b/test/csot/override-operation-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -3378,15 +3378,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 1000, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 1000, "name": "x_1" - }, - "expectError": { - "isTimeoutError": false } } ], @@ -3394,6 +3402,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", @@ -3436,15 +3450,23 @@ } } }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 0, + "name": "x_1" + } + }, { "name": "dropIndex", "object": "collection", "arguments": { "timeoutMS": 0, "name": "x_1" - }, - "expectError": { - "isTimeoutError": false } } ], @@ -3452,6 +3474,12 @@ { "client": "client", "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, { "commandStartedEvent": { "commandName": "dropIndexes", diff --git a/test/csot/retryability-legacy-timeouts.json b/test/csot/retryability-legacy-timeouts.json index cd2af7fab6..aded781aee 100644 --- a/test/csot/retryability-legacy-timeouts.json +++ b/test/csot/retryability-legacy-timeouts.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "socketTimeoutMS": 50 + "socketTimeoutMS": 100 }, "useMultipleMongoses": false, "observeEvents": [ @@ -73,7 +73,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -132,7 +132,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -194,7 +194,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -255,7 +255,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -319,7 +319,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -376,7 +376,7 @@ "delete" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -436,7 +436,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -496,7 +496,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -559,7 +559,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -621,7 +621,7 @@ "update" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -686,7 +686,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -743,7 +743,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -803,7 +803,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -863,7 +863,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -926,7 +926,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -988,7 +988,7 @@ "findAndModify" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1053,7 +1053,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1118,7 +1118,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1186,7 +1186,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1243,7 +1243,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1303,7 +1303,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1357,7 +1357,7 @@ "listDatabases" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1414,7 +1414,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1471,7 +1471,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1531,7 +1531,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1595,7 +1595,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1662,7 +1662,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1719,7 +1719,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1779,7 +1779,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1836,7 +1836,7 @@ "listCollections" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1896,7 +1896,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -1953,7 +1953,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2013,7 +2013,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2070,7 +2070,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2130,7 +2130,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2187,7 +2187,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2247,7 +2247,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2304,7 +2304,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2364,7 +2364,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2418,7 +2418,7 @@ "count" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2475,7 +2475,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2533,7 +2533,7 @@ "distinct" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2594,7 +2594,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2651,7 +2651,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2711,7 +2711,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2768,7 +2768,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2828,7 +2828,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2882,7 +2882,7 @@ "listIndexes" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2939,7 +2939,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } @@ -2996,7 +2996,7 @@ "aggregate" ], "blockConnection": true, - "blockTimeMS": 55 + "blockTimeMS": 125 } } } diff --git a/test/csot/retryability-timeoutMS.json b/test/csot/retryability-timeoutMS.json index 438ba6b8d2..9daad260ef 100644 --- a/test/csot/retryability-timeoutMS.json +++ b/test/csot/retryability-timeoutMS.json @@ -11,8 +11,7 @@ { "minServerVersion": "4.2", "topologies": [ - "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -109,6 +108,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -137,7 +141,7 @@ "name": "insertOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "document": { "x": 1 } @@ -199,6 +203,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -328,6 +337,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -356,7 +370,7 @@ "name": "insertMany", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "documents": [ { "x": 1 @@ -420,6 +434,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -547,6 +566,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -575,7 +599,7 @@ "name": "deleteOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -635,6 +659,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -761,6 +790,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -789,7 +823,7 @@ "name": "replaceOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -852,6 +886,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -983,6 +1022,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1011,7 +1055,7 @@ "name": "updateOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1076,6 +1120,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1204,6 +1253,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1232,7 +1286,7 @@ "name": "findOneAndDelete", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -1292,6 +1346,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1418,6 +1477,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1446,7 +1510,7 @@ "name": "findOneAndReplace", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "replacement": { "x": 1 @@ -1509,6 +1573,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1640,6 +1709,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1668,7 +1742,7 @@ "name": "findOneAndUpdate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {}, "update": { "$set": { @@ -1733,6 +1807,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1869,6 +1948,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -1897,7 +1981,7 @@ "name": "bulkWrite", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "requests": [ { "insertOne": { @@ -1965,6 +2049,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2096,6 +2185,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2124,7 +2218,7 @@ "name": "listDatabases", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -2184,6 +2278,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2304,6 +2403,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2332,7 +2436,7 @@ "name": "listDatabaseNames", "object": "client", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -2391,6 +2495,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2513,6 +2622,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2541,7 +2655,7 @@ "name": "createChangeStream", "object": "client", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -2601,6 +2715,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2731,6 +2850,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2759,7 +2883,7 @@ "name": "aggregate", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [ { "$listLocalSessions": {} @@ -2826,6 +2950,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2956,6 +3085,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -2984,7 +3118,7 @@ "name": "listCollections", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3044,6 +3178,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3167,6 +3306,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3195,7 +3339,7 @@ "name": "listCollectionNames", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3255,6 +3399,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3378,6 +3527,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3406,7 +3560,7 @@ "name": "createChangeStream", "object": "database", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3466,6 +3620,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3589,6 +3748,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3617,7 +3781,7 @@ "name": "aggregate", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -3677,6 +3841,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3800,6 +3969,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -3828,7 +4002,7 @@ "name": "count", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -3888,6 +4062,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4011,6 +4190,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4039,7 +4223,7 @@ "name": "countDocuments", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4099,6 +4283,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4219,6 +4408,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4247,7 +4441,7 @@ "name": "estimatedDocumentCount", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -4306,6 +4500,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4429,6 +4628,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4457,7 +4661,7 @@ "name": "distinct", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "fieldName": "x", "filter": {} } @@ -4518,6 +4722,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4642,6 +4851,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4670,7 +4884,7 @@ "name": "find", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4730,6 +4944,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4853,6 +5072,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -4881,7 +5105,7 @@ "name": "findOne", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "filter": {} } } @@ -4941,6 +5165,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5061,6 +5290,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5089,7 +5323,7 @@ "name": "listIndexes", "object": "collection", "arguments": { - "timeoutMS": 500 + "timeoutMS": 1000 } } ], @@ -5148,6 +5382,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5270,6 +5509,11 @@ }, { "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", @@ -5298,7 +5542,7 @@ "name": "createChangeStream", "object": "collection", "arguments": { - "timeoutMS": 500, + "timeoutMS": 1000, "pipeline": [] } } @@ -5358,6 +5602,11 @@ }, { "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], "operations": [ { "name": "failPoint", diff --git a/test/csot/runCursorCommand.json b/test/csot/runCursorCommand.json new file mode 100644 index 0000000000..36f774fb5a --- /dev/null +++ b/test/csot/runCursorCommand.json @@ -0,0 +1,583 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "commandClient", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent" + ] + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "commandDb", + "client": "commandClient", + "databaseName": "commandDb" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "errors if timeoutMode is set without timeoutMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "error if timeoutMode is cursorLifetime and cursorType is tailableAwait", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection" + }, + "timeoutMode": "cursorLifetime", + "cursorType": "tailableAwait" + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find", + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "timeoutMS": 100, + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "collection", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Non-tailable cursor iteration timeoutMS is refreshed for getMore if timeoutMode is iteration - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 2 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "cappedCollection", + "batchSize": 1, + "tailable": true + }, + "timeoutMode": "iteration", + "timeoutMS": 100, + "batchSize": 1, + "cursorType": "tailable" + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "Tailable cursor awaitData iteration timeoutMS is refreshed for getMore - failure", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 60 + } + } + } + }, + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "foo": "bar" + }, + { + "fizz": "buzz" + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true + }, + "cursorType": "tailableAwait", + "batchSize": 1 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "db", + "command": { + "find": "cappedCollection", + "tailable": true, + "awaitData": true, + "maxTimeMS": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "db", + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/csot/sessions-inherit-timeoutMS.json b/test/csot/sessions-inherit-timeoutMS.json index 8205c086bc..13ea91c794 100644 --- a/test/csot/sessions-inherit-timeoutMS.json +++ b/test/csot/sessions-inherit-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -21,7 +21,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 50 + "timeoutMS": 500 }, "useMultipleMongoses": false, "observeEvents": [ @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/sessions-override-operation-timeoutMS.json b/test/csot/sessions-override-operation-timeoutMS.json index ff26de29f5..441c698328 100644 --- a/test/csot/sessions-override-operation-timeoutMS.json +++ b/test/csot/sessions-override-operation-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -75,7 +75,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -98,7 +98,7 @@ "name": "commitTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 }, "expectError": { "isTimeoutError": true @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -188,7 +188,7 @@ "name": "abortTransaction", "object": "session", "arguments": { - "timeoutMS": 50 + "timeoutMS": 500 } } ], @@ -252,7 +252,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -261,7 +261,7 @@ "name": "withTransaction", "object": "session", "arguments": { - "timeoutMS": 50, + "timeoutMS": 500, "callback": [ { "name": "insertOne", @@ -306,6 +306,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/sessions-override-timeoutMS.json b/test/csot/sessions-override-timeoutMS.json index 1d3b8932af..d90152e909 100644 --- a/test/csot/sessions-override-timeoutMS.json +++ b/test/csot/sessions-override-timeoutMS.json @@ -6,7 +6,7 @@ "minServerVersion": "4.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -47,7 +47,7 @@ "id": "session", "client": "client", "sessionOptions": { - "defaultTimeoutMS": 50 + "defaultTimeoutMS": 500 } } } @@ -78,7 +78,7 @@ "commitTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -165,7 +165,7 @@ "abortTransaction" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -249,7 +249,7 @@ "insert" ], "blockConnection": true, - "blockTimeMS": 60 + "blockTimeMS": 600 } } } @@ -302,6 +302,26 @@ "commandFailedEvent": { "commandName": "insert" } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } } ] } diff --git a/test/csot/tailable-awaitData.json b/test/csot/tailable-awaitData.json index 6da85c7783..80e95ca906 100644 --- a/test/csot/tailable-awaitData.json +++ b/test/csot/tailable-awaitData.json @@ -3,7 +3,8 @@ "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "4.4" + "minServerVersion": "4.4", + "serverless": "forbid" } ], "createEntities": [ @@ -17,7 +18,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -77,7 +78,7 @@ ] }, { - "description": "error if maxAwaitTimeMS is greater than timeoutMS", + "description": "error on find if maxAwaitTimeMS is greater than timeoutMS", "operations": [ { "name": "find", @@ -89,13 +90,50 @@ "maxAwaitTimeMS": 10 }, "expectError": { - "isClientError": true + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false } } ] }, { - "description": "error if maxAwaitTimeMS is equal to timeoutMS", + "description": "error on watch if maxAwaitTimeMS is greater than timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 10 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on find if maxAwaitTimeMS is equal to timeoutMS", "operations": [ { "name": "find", @@ -107,7 +145,44 @@ "maxAwaitTimeMS": 5 }, "expectError": { - "isClientError": true + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on aggregate if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false + } + } + ] + }, + { + "description": "error on watch if maxAwaitTimeMS is equal to timeoutMS", + "operations": [ + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [], + "timeoutMS": 5, + "maxAwaitTimeMS": 5 + }, + "expectError": { + "isClientError": true, + "isTimeoutError": false } } ] @@ -130,7 +205,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 300 } } } @@ -188,7 +263,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -199,7 +274,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -272,7 +347,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -283,7 +358,7 @@ "arguments": { "filter": {}, "cursorType": "tailableAwait", - "timeoutMS": 20, + "timeoutMS": 250, "batchSize": 1, "maxAwaitTimeMS": 1 }, @@ -354,7 +429,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -417,6 +492,141 @@ ] } ] + }, + { + "description": "apply remaining timeoutMS if less than maxAwaitTimeMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "getMore" + ], + "blockConnection": true, + "blockTimeMS": 30 + } + } + } + }, + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + }, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "tailableCursor", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 70 + } + } + } + } + ] + } + ] + }, + { + "description": "apply maxAwaitTimeMS if less than remaining timeout", + "operations": [ + { + "name": "createFindCursor", + "object": "collection", + "arguments": { + "filter": {}, + "cursorType": "tailableAwait", + "batchSize": 1, + "maxAwaitTimeMS": 100, + "timeoutMS": 200 + }, + "saveResultAsEntity": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + }, + { + "name": "iterateOnce", + "object": "tailableCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "getMore", + "databaseName": "test", + "command": { + "maxTimeMS": { + "$$lte": 100 + } + } + } + } + ] + } + ] } ] } diff --git a/test/csot/tailable-non-awaitData.json b/test/csot/tailable-non-awaitData.json index 34ee660963..e88230e4f7 100644 --- a/test/csot/tailable-non-awaitData.json +++ b/test/csot/tailable-non-awaitData.json @@ -17,7 +17,7 @@ "client": { "id": "client", "uriOptions": { - "timeoutMS": 10 + "timeoutMS": 200 }, "useMultipleMongoses": false, "observeEvents": [ @@ -94,7 +94,7 @@ "find" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } @@ -154,7 +154,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 150 } } } @@ -165,7 +165,7 @@ "arguments": { "filter": {}, "cursorType": "tailable", - "timeoutMS": 20, + "timeoutMS": 200, "batchSize": 1 }, "saveResultAsEntity": "tailableCursor" @@ -239,7 +239,7 @@ "getMore" ], "blockConnection": true, - "blockTimeMS": 15 + "blockTimeMS": 250 } } } diff --git a/test/csot/waitQueueTimeout.json b/test/csot/waitQueueTimeout.json new file mode 100644 index 0000000000..138d5cc161 --- /dev/null +++ b/test/csot/waitQueueTimeout.json @@ -0,0 +1,176 @@ +{ + "description": "WaitQueueTimeoutError does not clear the pool", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "maxPoolSize": 1, + "appname": "waitQueueTimeoutErrorTest" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "WaitQueueTimeoutError does not clear the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "waitQueueTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "commandStartedEvent": { + "commandName": "ping" + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100, + "command": { + "hello": 1 + }, + "commandName": "hello" + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "hello": 1 + }, + "commandName": "hello" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "hello", + "databaseName": "test", + "command": { + "hello": 1 + } + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/test/data_lake/aggregate.json b/test/data_lake/aggregate.json deleted file mode 100644 index 99995bca41..0000000000 --- a/test/data_lake/aggregate.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "Aggregate with pipeline (project, sort, limit)", - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 0 - } - }, - { - "$sort": { - "a": 1 - } - }, - { - "$limit": 2 - } - ] - }, - "result": [ - { - "a": 1, - "b": 2, - "c": 3 - }, - { - "a": 2, - "b": 3, - "c": 4 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "driverdata" - } - } - } - ] - } - ] -} diff --git a/test/data_lake/estimatedDocumentCount.json b/test/data_lake/estimatedDocumentCount.json deleted file mode 100644 index 997a3ab3fc..0000000000 --- a/test/data_lake/estimatedDocumentCount.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "estimatedDocumentCount succeeds", - "operations": [ - { - "object": "collection", - "name": "estimatedDocumentCount", - "result": 15 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "driverdata" - }, - "command_name": "count", - "database_name": "test" - } - } - ] - } - ] -} diff --git a/test/data_lake/find.json b/test/data_lake/find.json deleted file mode 100644 index 8a3468a135..0000000000 --- a/test/data_lake/find.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "Find with projection and sort", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": { - "b": { - "$gt": 5 - } - }, - "projection": { - "_id": 0 - }, - "sort": { - "a": 1 - }, - "limit": 5 - }, - "result": [ - { - "a": 5, - "b": 6, - "c": 7 - }, - { - "a": 6, - "b": 7, - "c": 8 - }, - { - "a": 7, - "b": 8, - "c": 9 - }, - { - "a": 8, - "b": 9, - "c": 10 - }, - { - "a": 9, - "b": 10, - "c": 11 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "driverdata" - } - } - } - ] - } - ] -} diff --git a/test/data_lake/getMore.json b/test/data_lake/getMore.json deleted file mode 100644 index e2e1d4788a..0000000000 --- a/test/data_lake/getMore.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "collection_name": "driverdata", - "database_name": "test", - "tests": [ - { - "description": "A successful find event with getMore", - "operations": [ - { - "object": "collection", - "name": "find", - "arguments": { - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "driverdata", - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - }, - "command_name": "find", - "database_name": "test" - } - }, - { - "command_started_event": { - "command": { - "batchSize": 1 - }, - "command_name": "getMore", - "database_name": "cursors" - } - } - ] - } - ] -} diff --git a/test/data_lake/listCollections.json b/test/data_lake/listCollections.json deleted file mode 100644 index e419f7b3e9..0000000000 --- a/test/data_lake/listCollections.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "database_name": "test", - "tests": [ - { - "description": "ListCollections succeeds", - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "listCollections", - "database_name": "test", - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/listDatabases.json b/test/data_lake/listDatabases.json deleted file mode 100644 index 6458148e49..0000000000 --- a/test/data_lake/listDatabases.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "tests": [ - { - "description": "ListDatabases succeeds", - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "listDatabases", - "database_name": "admin", - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/runCommand.json b/test/data_lake/runCommand.json deleted file mode 100644 index d81ff1a64b..0000000000 --- a/test/data_lake/runCommand.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "database_name": "test", - "tests": [ - { - "description": "ping succeeds using runCommand", - "operations": [ - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": 1 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command_name": "ping", - "database_name": "test", - "command": { - "ping": 1 - } - } - } - ] - } - ] -} diff --git a/test/data_lake/unified/aggregate.json b/test/data_lake/unified/aggregate.json new file mode 100644 index 0000000000..68a3467c71 --- /dev/null +++ b/test/data_lake/unified/aggregate.json @@ -0,0 +1,84 @@ +{ + "description": "aggregate", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "Aggregate with pipeline (project, sort, limit)", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 0 + } + }, + { + "$sort": { + "a": 1 + } + }, + { + "$limit": 2 + } + ] + }, + "expectResult": [ + { + "a": 1, + "b": 2, + "c": 3 + }, + { + "a": 2, + "b": 3, + "c": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "driverdata" + }, + "commandName": "aggregate", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/estimatedDocumentCount.json b/test/data_lake/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..b7515a4418 --- /dev/null +++ b/test/data_lake/unified/estimatedDocumentCount.json @@ -0,0 +1,56 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "estimatedDocumentCount succeeds", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 15 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "driverdata" + }, + "commandName": "count", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/find.json b/test/data_lake/unified/find.json new file mode 100644 index 0000000000..d0652dc720 --- /dev/null +++ b/test/data_lake/unified/find.json @@ -0,0 +1,96 @@ +{ + "description": "find", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "Find with projection and sort", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "b": { + "$gt": 5 + } + }, + "projection": { + "_id": 0 + }, + "sort": { + "a": 1 + }, + "limit": 5 + }, + "expectResult": [ + { + "a": 5, + "b": 6, + "c": 7 + }, + { + "a": 6, + "b": 7, + "c": 8 + }, + { + "a": 7, + "b": 8, + "c": 9 + }, + { + "a": 8, + "b": 9, + "c": 10 + }, + { + "a": 9, + "b": 10, + "c": 11 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata" + }, + "commandName": "find", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/getMore.json b/test/data_lake/unified/getMore.json new file mode 100644 index 0000000000..109b6d3d8e --- /dev/null +++ b/test/data_lake/unified/getMore.json @@ -0,0 +1,95 @@ +{ + "description": "getMore", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "driverdata" + } + } + ], + "tests": [ + { + "description": "A successful find event with getMore", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "driverdata", + "filter": { + "a": { + "$gte": 2 + } + }, + "sort": { + "a": 1 + }, + "batchSize": 3, + "limit": 4 + }, + "commandName": "find", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": { + "$$type": "string" + }, + "batchSize": 1 + }, + "commandName": "getMore", + "databaseName": "cursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listCollections.json b/test/data_lake/unified/listCollections.json new file mode 100644 index 0000000000..642e7ed328 --- /dev/null +++ b/test/data_lake/unified/listCollections.json @@ -0,0 +1,48 @@ +{ + "description": "listCollections", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + }, + "commandName": "listCollections", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/listDatabases.json b/test/data_lake/unified/listDatabases.json new file mode 100644 index 0000000000..64506ee54e --- /dev/null +++ b/test/data_lake/unified/listDatabases.json @@ -0,0 +1,41 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "tests": [ + { + "description": "ListCollections succeeds", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + }, + "commandName": "listDatabases", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/data_lake/unified/runCommand.json b/test/data_lake/unified/runCommand.json new file mode 100644 index 0000000000..325b6b3f30 --- /dev/null +++ b/test/data_lake/unified/runCommand.json @@ -0,0 +1,54 @@ +{ + "description": "runCommand", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "ping succeeds using runCommand", + "operations": [ + { + "object": "database0", + "name": "runCommand", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "commandName": "ping", + "databaseName": "test" + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json deleted file mode 100644 index 9f6ea212e5..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedAtShutdown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedAtShutdown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedAtShutdown", - "code": 11600 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json b/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json deleted file mode 100644 index 7e5f235713..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-InterruptedDueToReplStateChange.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedDueToReplStateChange error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedDueToReplStateChange", - "code": 11602 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json b/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json deleted file mode 100644 index 1635f1a856..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-LegacyNotPrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 LegacyNotPrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "LegacyNotPrimary", - "code": 10058 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json deleted file mode 100644 index 0e70ede02c..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryNoSecondaryOk.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryNoSecondaryOk", - "code": 13435 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json b/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json deleted file mode 100644 index 3fefb21663..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotPrimaryOrSecondary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryOrSecondary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryOrSecondary", - "code": 13436 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json b/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json deleted file mode 100644 index d010da0a5b..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-NotWritablePrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotWritablePrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotWritablePrimary", - "code": 10107 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json b/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json deleted file mode 100644 index 02956d201d..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-PrimarySteppedDown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 PrimarySteppedDown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "PrimarySteppedDown", - "code": 189 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json b/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json deleted file mode 100644 index fc3a5aa6fe..0000000000 --- a/test/discovery_and_monitoring/errors/pre-42-ShutdownInProgress.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 ShutdownInProgress error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "ShutdownInProgress", - "code": 91 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/test/discovery_and_monitoring/rs/compatible.json b/test/discovery_and_monitoring/rs/compatible.json index 444b13e9d5..dfd5d57dfa 100644 --- a/test/discovery_and_monitoring/rs/compatible.json +++ b/test/discovery_and_monitoring/rs/compatible.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/compatible_unknown.json b/test/discovery_and_monitoring/rs/compatible_unknown.json index cf92dd1ed3..95e03ea958 100644 --- a/test/discovery_and_monitoring/rs/compatible_unknown.json +++ b/test/discovery_and_monitoring/rs/compatible_unknown.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_arbiters.json b/test/discovery_and_monitoring/rs/discover_arbiters.json index 53709b0cee..803462b156 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json index 64fb49f4fc..e58d7c7fb4 100644 --- a/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_arbiters_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_ghost.json b/test/discovery_and_monitoring/rs/discover_ghost.json index 2e24c83e0b..3b7fc836ec 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost.json +++ b/test/discovery_and_monitoring/rs/discover_ghost.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json index cf5fe83a54..1a8457983b 100644 --- a/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_ghost_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_hidden.json b/test/discovery_and_monitoring/rs/discover_hidden.json index e4a90f1f9c..10bd51edeb 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden.json +++ b/test/discovery_and_monitoring/rs/discover_hidden.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json index 04420596f0..63cf558675 100644 --- a/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_hidden_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_passives.json b/test/discovery_and_monitoring/rs/discover_passives.json index 30258409f6..0a292c675c 100644 --- a/test/discovery_and_monitoring/rs/discover_passives.json +++ b/test/discovery_and_monitoring/rs/discover_passives.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json index 266eaa5234..c48fd47625 100644 --- a/test/discovery_and_monitoring/rs/discover_passives_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_passives_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_primary.json b/test/discovery_and_monitoring/rs/discover_primary.json index 2d1292bbd4..04e7a4984c 100644 --- a/test/discovery_and_monitoring/rs/discover_primary.json +++ b/test/discovery_and_monitoring/rs/discover_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json index 54dfefba5f..3cdcfdcee2 100644 --- a/test/discovery_and_monitoring/rs/discover_primary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_primary_replicaset.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_rsother.json b/test/discovery_and_monitoring/rs/discover_rsother.json index 4ab25667f0..9c3b8d8b7d 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother.json +++ b/test/discovery_and_monitoring/rs/discover_rsother.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json index e3958d70ad..3da9efb066 100644 --- a/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_rsother_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -34,7 +34,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_secondary.json b/test/discovery_and_monitoring/rs/discover_secondary.json index 22325d4e03..64a1ce31e3 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary.json +++ b/test/discovery_and_monitoring/rs/discover_secondary.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json index d903b6444d..d230f976a2 100644 --- a/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json +++ b/test/discovery_and_monitoring/rs/discover_secondary_replicaset.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/discovery.json b/test/discovery_and_monitoring/rs/discovery.json index 50e1269223..e9deaa7587 100644 --- a/test/discovery_and_monitoring/rs/discovery.json +++ b/test/discovery_and_monitoring/rs/discovery.json @@ -18,7 +18,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ "d:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -103,7 +103,7 @@ "e:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -147,7 +147,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json new file mode 100644 index 0000000000..2fcea2bf66 --- /dev/null +++ b/test/discovery_and_monitoring/rs/electionId_precedence_setVersion.json @@ -0,0 +1,92 @@ +{ + "description": "ElectionId is considered higher precedence than setVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ], + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "setVersion": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/equal_electionids.json b/test/discovery_and_monitoring/rs/equal_electionids.json index 17df3207fa..f1deedf9f4 100644 --- a/test/discovery_and_monitoring/rs/equal_electionids.json +++ b/test/discovery_and_monitoring/rs/equal_electionids.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json index 4e02304c61..085e81e266 100644 --- a/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json +++ b/test/discovery_and_monitoring/rs/hosts_differ_from_seeds.json @@ -15,7 +15,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/incompatible_arbiter.json b/test/discovery_and_monitoring/rs/incompatible_arbiter.json index f0539cb337..bda18d9f6f 100644 --- a/test/discovery_and_monitoring/rs/incompatible_arbiter.json +++ b/test/discovery_and_monitoring/rs/incompatible_arbiter.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/incompatible_ghost.json b/test/discovery_and_monitoring/rs/incompatible_ghost.json index 824e953f90..9d82e31682 100644 --- a/test/discovery_and_monitoring/rs/incompatible_ghost.json +++ b/test/discovery_and_monitoring/rs/incompatible_ghost.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/incompatible_other.json b/test/discovery_and_monitoring/rs/incompatible_other.json index 6f301ef5de..149ba01142 100644 --- a/test/discovery_and_monitoring/rs/incompatible_other.json +++ b/test/discovery_and_monitoring/rs/incompatible_other.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/ls_timeout.json b/test/discovery_and_monitoring/rs/ls_timeout.json index 96389d3b76..c68790ddfd 100644 --- a/test/discovery_and_monitoring/rs/ls_timeout.json +++ b/test/discovery_and_monitoring/rs/ls_timeout.json @@ -20,7 +20,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 3, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -58,7 +58,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -104,7 +104,7 @@ "setName": "rs", "arbiterOnly": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -152,7 +152,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -194,7 +194,7 @@ "hidden": true, "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -244,7 +244,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": null, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/member_reconfig.json b/test/discovery_and_monitoring/rs/member_reconfig.json index 0e2c2c462e..a05fed0efb 100644 --- a/test/discovery_and_monitoring/rs/member_reconfig.json +++ b/test/discovery_and_monitoring/rs/member_reconfig.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/member_standalone.json b/test/discovery_and_monitoring/rs/member_standalone.json index 0756003a89..db100db9f3 100644 --- a/test/discovery_and_monitoring/rs/member_standalone.json +++ b/test/discovery_and_monitoring/rs/member_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -40,7 +40,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/new_primary.json b/test/discovery_and_monitoring/rs/new_primary.json index ed1a6245f9..69b07516b9 100644 --- a/test/discovery_and_monitoring/rs/new_primary.json +++ b/test/discovery_and_monitoring/rs/new_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -50,7 +50,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -58,7 +58,8 @@ "servers": { "a:27017": { "type": "Unknown", - "setName": null + "setName": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json index ccb3a41f75..90ef0ce8dc 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_electionid.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -76,7 +76,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -114,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -123,7 +124,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json index 415a0f66aa..9c1e2d4bdd 100644 --- a/test/discovery_and_monitoring/rs/new_primary_new_setversion.json +++ b/test/discovery_and_monitoring/rs/new_primary_new_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -76,7 +76,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -114,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -123,7 +124,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" }, "b:27017": { "type": "RSPrimary", diff --git a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json index d7b19cfe8f..774b3a5736 100644 --- a/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/new_primary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/non_rs_member.json b/test/discovery_and_monitoring/rs/non_rs_member.json index 538077ef09..6bf10bd628 100644 --- a/test/discovery_and_monitoring/rs/non_rs_member.json +++ b/test/discovery_and_monitoring/rs/non_rs_member.json @@ -10,7 +10,7 @@ "ok": 1, "helloOk": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/normalize_case.json b/test/discovery_and_monitoring/rs/normalize_case.json index 96a944f0c3..62915495e0 100644 --- a/test/discovery_and_monitoring/rs/normalize_case.json +++ b/test/discovery_and_monitoring/rs/normalize_case.json @@ -21,7 +21,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/normalize_case_me.json b/test/discovery_and_monitoring/rs/normalize_case_me.json index ab1720cefc..0d9ba6213e 100644 --- a/test/discovery_and_monitoring/rs/normalize_case_me.json +++ b/test/discovery_and_monitoring/rs/normalize_case_me.json @@ -22,7 +22,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json new file mode 100644 index 0000000000..8a77f31c50 --- /dev/null +++ b/test/discovery_and_monitoring/rs/null_election_id-pre-6.0.json @@ -0,0 +1,203 @@ +{ + "description": "Pre 6.0 Primaries with and without electionIds", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setVersion": 1, + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "c:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017", + "c:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "c:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/null_election_id.json b/test/discovery_and_monitoring/rs/null_election_id.json index 62120e8448..8a99a78475 100644 --- a/test/discovery_and_monitoring/rs/null_election_id.json +++ b/test/discovery_and_monitoring/rs/null_election_id.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -116,23 +116,26 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, @@ -167,23 +170,26 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, - "electionId": null - }, - "b:27017": { "type": "Unknown", "setName": null, + "setVersion": null, "electionId": null }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, "c:27017": { "type": "Unknown", "setName": null, diff --git a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json index 9c54b39856..e34280e88c 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_ghost.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_ghost.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json index ac416e57d5..79510d9399 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_mongos.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_mongos.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json index a64524d0ca..abcc1e2d01 100644 --- a/test/discovery_and_monitoring/rs/primary_becomes_standalone.json +++ b/test/discovery_and_monitoring/rs/primary_becomes_standalone.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -38,7 +38,7 @@ { "ok": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_changes_set_name.json b/test/discovery_and_monitoring/rs/primary_changes_set_name.json index bf70ca3014..3b564d2c93 100644 --- a/test/discovery_and_monitoring/rs/primary_changes_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_changes_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -44,7 +44,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect.json b/test/discovery_and_monitoring/rs/primary_disconnect.json index 3db854f085..73a01a82a9 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json index 3a80b150fe..b030bd2c53 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -48,7 +48,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -115,7 +116,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -124,6 +125,7 @@ "a:27017": { "type": "Unknown", "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", "electionId": null }, "b:27017": { @@ -159,7 +161,7 @@ "$oid": "000000000000000000000003" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +205,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json index 32e03fb7d4..653a5f29e8 100644 --- a/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json +++ b/test/discovery_and_monitoring/rs/primary_disconnect_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -48,7 +48,8 @@ "a:27017": { "type": "Unknown", "setName": null, - "electionId": null + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" }, "b:27017": { "type": "RSPrimary", @@ -115,7 +116,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -124,6 +125,7 @@ "a:27017": { "type": "Unknown", "setName": null, + "error": "primary marked stale due to electionId/setVersion mismatch", "electionId": null }, "b:27017": { @@ -159,7 +161,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +205,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json index bc02cc9571..1ca72225a2 100644 --- a/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -18,7 +18,7 @@ "setName": "rs", "primary": "b:27017", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -48,7 +48,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_mismatched_me.json index 2d2c0f40d8..6bb6226f8a 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me.json @@ -31,7 +31,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json index 4c40093659..a55dcfc6d4 100644 --- a/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json +++ b/test/discovery_and_monitoring/rs/primary_mismatched_me_not_removed.json @@ -18,7 +18,7 @@ "primary": "localhost:27017", "me": "a:27017", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 25 } ] ], @@ -55,7 +55,7 @@ "primary": "localhost:27017", "me": "localhost:27018", "minWireVersion": 0, - "maxWireVersion": 7 + "maxWireVersion": 25 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_reports_new_member.json b/test/discovery_and_monitoring/rs/primary_reports_new_member.json index ac0d9374f0..ed28c48c87 100644 --- a/test/discovery_and_monitoring/rs/primary_reports_new_member.json +++ b/test/discovery_and_monitoring/rs/primary_reports_new_member.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -86,7 +86,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -127,7 +127,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json index 6dbd73dadc..798a648d19 100644 --- a/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/primary_to_no_primary_mismatched_me.json @@ -17,7 +17,7 @@ "me": "a:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -52,7 +52,7 @@ "me": "c:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json index cc0691fb8c..1366e38996 100644 --- a/test/discovery_and_monitoring/rs/primary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/primary_wrong_set_name.json @@ -15,7 +15,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/repeated.json b/test/discovery_and_monitoring/rs/repeated.json index 610aeae0ac..3ce0948ab8 100644 --- a/test/discovery_and_monitoring/rs/repeated.json +++ b/test/discovery_and_monitoring/rs/repeated.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -84,7 +84,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -120,7 +120,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/replicaset_rsnp.json b/test/discovery_and_monitoring/rs/replicaset_rsnp.json index 3148e1c141..1cd732b82f 100644 --- a/test/discovery_and_monitoring/rs/replicaset_rsnp.json +++ b/test/discovery_and_monitoring/rs/replicaset_rsnp.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/response_from_removed.json b/test/discovery_and_monitoring/rs/response_from_removed.json index 87a66d9e72..fa46a14ceb 100644 --- a/test/discovery_and_monitoring/rs/response_from_removed.json +++ b/test/discovery_and_monitoring/rs/response_from_removed.json @@ -15,7 +15,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -46,7 +46,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/sec_not_auth.json b/test/discovery_and_monitoring/rs/sec_not_auth.json index a39855e654..ccbe7a08af 100644 --- a/test/discovery_and_monitoring/rs/sec_not_auth.json +++ b/test/discovery_and_monitoring/rs/sec_not_auth.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json new file mode 100644 index 0000000000..f27060533c --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0-pre-6.0.json @@ -0,0 +1,83 @@ +{ + "description": "Pre 6.0 New primary", + "uri": "mongodb://a,b/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ], + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "hosts": [ + "a:27017", + "b:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 0, + "minWireVersion": 0, + "maxWireVersion": 21 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs" + }, + "b:27017": { + "type": "Unknown", + "setName": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json index 4c1cb011a5..9ffff58ef0 100644 --- a/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json +++ b/test/discovery_and_monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "Secondary ignored when ok is zero", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ { "ok": 0, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json new file mode 100644 index 0000000000..c23d8dc4c9 --- /dev/null +++ b/test/discovery_and_monitoring/rs/secondary_ipv6_literal.json @@ -0,0 +1,38 @@ +{ + "description": "Secondary with IPv6 literal", + "uri": "mongodb://[::1]/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "[::1]:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "me": "[::1]:27017", + "hosts": [ + "[::1]:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 26 + } + ] + ], + "outcome": { + "servers": { + "[::1]:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json index 6f1b9b5986..790e4bfca8 100644 --- a/test/discovery_and_monitoring/rs/secondary_mismatched_me.json +++ b/test/discovery_and_monitoring/rs/secondary_mismatched_me.json @@ -32,7 +32,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json index 8d2f152f59..1f86b50543 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json index b7ef2d6d6a..6b89914151 100644 --- a/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json +++ b/test/discovery_and_monitoring/rs/secondary_wrong_set_name_with_primary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/set_version_can_rollback.json b/test/discovery_and_monitoring/rs/set_version_can_rollback.json new file mode 100644 index 0000000000..1cc608a344 --- /dev/null +++ b/test/discovery_and_monitoring/rs/set_version_can_rollback.json @@ -0,0 +1,147 @@ +{ + "description": "Set version rolls back after new primary with higher election Id", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000002" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json new file mode 100644 index 0000000000..3669511c5a --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_equal_max_without_electionid.json @@ -0,0 +1,84 @@ +{ + "description": "setVersion version that is equal is treated the same as greater than if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json new file mode 100644 index 0000000000..06c89609f5 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_greaterthan_max_without_electionid.json @@ -0,0 +1,85 @@ +{ + "description": "setVersion that is greater than maxSetVersion is used if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 17 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..9a1ee61399 --- /dev/null +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid-pre-6.0.json @@ -0,0 +1,85 @@ +{ + "description": "Pre 6.0 setVersion is ignored if there is no electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, + "electionId": null + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2 + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/setversion_without_electionid.json b/test/discovery_and_monitoring/rs/setversion_without_electionid.json index 2f68287f1d..256fafe108 100644 --- a/test/discovery_and_monitoring/rs/setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/setversion_without_electionid.json @@ -1,5 +1,5 @@ { - "description": "setVersion is ignored if there is no electionId", + "description": "setVersion that is less than maxSetVersion is ignored if there is no electionId", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -56,21 +56,21 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2, "electionId": null }, "b:27017": { - "type": "RSPrimary", - "setName": "rs", - "setVersion": 1, + "type": "Unknown", + "setName": null, "electionId": null } }, diff --git a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json index e9075f97f2..6de995518d 100644 --- a/test/discovery_and_monitoring/rs/stepdown_change_set_name.json +++ b/test/discovery_and_monitoring/rs/stepdown_change_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/too_new.json b/test/discovery_and_monitoring/rs/too_new.json index 0433d27a36..696246f8e1 100644 --- a/test/discovery_and_monitoring/rs/too_new.json +++ b/test/discovery_and_monitoring/rs/too_new.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/rs/too_old.json b/test/discovery_and_monitoring/rs/too_old.json index 461d00acc4..dc8a5b2b9c 100644 --- a/test/discovery_and_monitoring/rs/too_old.json +++ b/test/discovery_and_monitoring/rs/too_old.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -30,7 +30,9 @@ "hosts": [ "a:27017", "b:27017" - ] + ], + "minWireVersion": 999, + "maxWireVersion": 1000 } ] ], diff --git a/test/discovery_and_monitoring/rs/unexpected_mongos.json b/test/discovery_and_monitoring/rs/unexpected_mongos.json index cc19a961f2..c6ffb321ca 100644 --- a/test/discovery_and_monitoring/rs/unexpected_mongos.json +++ b/test/discovery_and_monitoring/rs/unexpected_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json new file mode 100644 index 0000000000..03195aacde --- /dev/null +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid-pre-6.0.json @@ -0,0 +1,140 @@ +{ + "description": "Pre 6.0 Record max setVersion, even from primary without electionId", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 1, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "b:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 2, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to discovery of newer primary" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + }, + "minWireVersion": 0, + "maxWireVersion": 16 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" + }, + "b:27017": { + "type": "RSPrimary", + "setName": "rs", + "setVersion": 2 + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs", + "maxSetVersion": 2, + "maxElectionId": { + "$oid": "000000000000000000000001" + } + } + } + ] +} diff --git a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json index 421ff57c8d..eaf586d728 100644 --- a/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json +++ b/test/discovery_and_monitoring/rs/use_setversion_without_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], @@ -64,27 +64,31 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", - "setVersion": 2 + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000001" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { "$oid": "000000000000000000000001" } @@ -108,29 +112,33 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 17 } ] ], "outcome": { "servers": { "a:27017": { - "type": "Unknown", - "setName": null, - "electionId": null - }, - "b:27017": { "type": "RSPrimary", "setName": "rs", - "setVersion": 2 + "setVersion": 1, + "electionId": { + "$oid": "000000000000000000000002" + } + }, + "b:27017": { + "type": "Unknown", + "setName": null, + "electionId": null, + "error": "primary marked stale due to electionId/setVersion mismatch" } }, "topologyType": "ReplicaSetWithPrimary", "logicalSessionTimeoutMinutes": null, "setName": "rs", - "maxSetVersion": 2, + "maxSetVersion": 1, "maxElectionId": { - "$oid": "000000000000000000000001" + "$oid": "000000000000000000000002" } } } diff --git a/test/discovery_and_monitoring/rs/wrong_set_name.json b/test/discovery_and_monitoring/rs/wrong_set_name.json index 9654ff7b79..d0764d24dc 100644 --- a/test/discovery_and_monitoring/rs/wrong_set_name.json +++ b/test/discovery_and_monitoring/rs/wrong_set_name.json @@ -17,7 +17,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/compatible.json b/test/discovery_and_monitoring/sharded/compatible.json index e531db97f9..ceb0ec24c4 100644 --- a/test/discovery_and_monitoring/sharded/compatible.json +++ b/test/discovery_and_monitoring/sharded/compatible.json @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/discover_single_mongos.json b/test/discovery_and_monitoring/sharded/discover_single_mongos.json index 9e877a0840..bf7e57521c 100644 --- a/test/discovery_and_monitoring/sharded/discover_single_mongos.json +++ b/test/discovery_and_monitoring/sharded/discover_single_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json index 93fa398d52..3da0f84ca2 100644 --- a/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json +++ b/test/discovery_and_monitoring/sharded/ls_timeout_mongos.json @@ -13,7 +13,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -25,7 +25,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -67,7 +67,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/mongos_disconnect.json b/test/discovery_and_monitoring/sharded/mongos_disconnect.json index 50a93eda5f..29b3351869 100644 --- a/test/discovery_and_monitoring/sharded/mongos_disconnect.json +++ b/test/discovery_and_monitoring/sharded/mongos_disconnect.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -76,7 +76,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/multiple_mongoses.json b/test/discovery_and_monitoring/sharded/multiple_mongoses.json index 311592d715..ae0c2d9cde 100644 --- a/test/discovery_and_monitoring/sharded/multiple_mongoses.json +++ b/test/discovery_and_monitoring/sharded/multiple_mongoses.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/non_mongos_removed.json b/test/discovery_and_monitoring/sharded/non_mongos_removed.json index d74375ebbf..4698f576d5 100644 --- a/test/discovery_and_monitoring/sharded/non_mongos_removed.json +++ b/test/discovery_and_monitoring/sharded/non_mongos_removed.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -26,7 +26,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/sharded/too_new.json b/test/discovery_and_monitoring/sharded/too_new.json index 4b997d2163..c4e984ddec 100644 --- a/test/discovery_and_monitoring/sharded/too_new.json +++ b/test/discovery_and_monitoring/sharded/too_new.json @@ -21,7 +21,9 @@ "ok": 1, "helloOk": true, "isWritablePrimary": true, - "msg": "isdbgrid" + "msg": "isdbgrid", + "minWireVersion": 7, + "maxWireVersion": 900 } ] ], diff --git a/test/discovery_and_monitoring/sharded/too_old.json b/test/discovery_and_monitoring/sharded/too_old.json index 688e1db0f5..b918715ada 100644 --- a/test/discovery_and_monitoring/sharded/too_old.json +++ b/test/discovery_and_monitoring/sharded/too_old.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/discovery_and_monitoring/single/compatible.json b/test/discovery_and_monitoring/single/compatible.json index 302927598c..493d9b748e 100644 --- a/test/discovery_and_monitoring/single/compatible.json +++ b/test/discovery_and_monitoring/single/compatible.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_external_ip.json b/test/discovery_and_monitoring/single/direct_connection_external_ip.json index 90676a8f9b..1461b4c469 100644 --- a/test/discovery_and_monitoring/single/direct_connection_external_ip.json +++ b/test/discovery_and_monitoring/single/direct_connection_external_ip.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_mongos.json b/test/discovery_and_monitoring/single/direct_connection_mongos.json index 25fe965185..72be020862 100644 --- a/test/discovery_and_monitoring/single/direct_connection_mongos.json +++ b/test/discovery_and_monitoring/single/direct_connection_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_replicaset.json b/test/discovery_and_monitoring/single/direct_connection_replicaset.json index cd8660888a..82a51d390e 100644 --- a/test/discovery_and_monitoring/single/direct_connection_replicaset.json +++ b/test/discovery_and_monitoring/single/direct_connection_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json index e204956056..e06d284364 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsarbiter.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json index 409e8502b3..45eb1602fb 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rsprimary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rsprimary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json index 305f283b52..b1bef8a49f 100644 --- a/test/discovery_and_monitoring/single/direct_connection_rssecondary.json +++ b/test/discovery_and_monitoring/single/direct_connection_rssecondary.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_standalone.json b/test/discovery_and_monitoring/single/direct_connection_standalone.json index b47278482a..e71ba07e74 100644 --- a/test/discovery_and_monitoring/single/direct_connection_standalone.json +++ b/test/discovery_and_monitoring/single/direct_connection_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json index 71080e6810..8014a0a533 100644 --- a/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json +++ b/test/discovery_and_monitoring/single/direct_connection_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/discover_standalone.json b/test/discovery_and_monitoring/single/discover_standalone.json index 858cbdaf63..d78c81654b 100644 --- a/test/discovery_and_monitoring/single/discover_standalone.json +++ b/test/discovery_and_monitoring/single/discover_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/ls_timeout_standalone.json b/test/discovery_and_monitoring/single/ls_timeout_standalone.json index 87b3e4e8a1..236eabe00a 100644 --- a/test/discovery_and_monitoring/single/ls_timeout_standalone.json +++ b/test/discovery_and_monitoring/single/ls_timeout_standalone.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/not_ok_response.json b/test/discovery_and_monitoring/single/not_ok_response.json index 8e7c2a10e3..cfaac3564a 100644 --- a/test/discovery_and_monitoring/single/not_ok_response.json +++ b/test/discovery_and_monitoring/single/not_ok_response.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/standalone_removed.json b/test/discovery_and_monitoring/single/standalone_removed.json index 57f8f861b1..675cdbb008 100644 --- a/test/discovery_and_monitoring/single/standalone_removed.json +++ b/test/discovery_and_monitoring/single/standalone_removed.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json index 46660fa8de..488cac4918 100644 --- a/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json +++ b/test/discovery_and_monitoring/single/standalone_using_legacy_hello.json @@ -10,7 +10,7 @@ "ok": 1, "ismaster": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/single/too_old_then_upgraded.json b/test/discovery_and_monitoring/single/too_old_then_upgraded.json index 58ae7d9de4..c3dd98cf62 100644 --- a/test/discovery_and_monitoring/single/too_old_then_upgraded.json +++ b/test/discovery_and_monitoring/single/too_old_then_upgraded.json @@ -1,5 +1,5 @@ { - "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 6", + "description": "Standalone with default maxWireVersion of 0 is upgraded to one with maxWireVersion 21", "uri": "mongodb://a", "phases": [ { @@ -35,7 +35,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/discovery_and_monitoring/unified/auth-error.json b/test/discovery_and_monitoring/unified/auth-error.json new file mode 100644 index 0000000000..62d26494c7 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-misc-command-error.json b/test/discovery_and_monitoring/unified/auth-misc-command-error.json new file mode 100644 index 0000000000..fd62fe604e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-misc-command-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-misc-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-misc-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-misc-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-error.json b/test/discovery_and_monitoring/unified/auth-network-error.json new file mode 100644 index 0000000000..84763af32e --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-network-timeout-error.json b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json new file mode 100644 index 0000000000..3cf9576eba --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-network-timeout-error.json @@ -0,0 +1,233 @@ +{ + "description": "auth-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-network-timeout-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/auth-shutdown-error.json b/test/discovery_and_monitoring/unified/auth-shutdown-error.json new file mode 100644 index 0000000000..b9e503af66 --- /dev/null +++ b/test/discovery_and_monitoring/unified/auth-shutdown-error.json @@ -0,0 +1,230 @@ +{ + "description": "auth-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "auth": true, + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "auth-shutdown-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "auth-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/cancel-server-check.json b/test/discovery_and_monitoring/unified/cancel-server-check.json new file mode 100644 index 0000000000..a60ccfcb41 --- /dev/null +++ b/test/discovery_and_monitoring/unified/cancel-server-check.json @@ -0,0 +1,201 @@ +{ + "description": "cancel-server-check", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ], + "serverless": "forbid" + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Cancel server check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "heartbeatFrequencyMS": 10000, + "serverSelectionTimeoutMS": 5000, + "appname": "cancelServerCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "cancel-server-check" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectResult": { + "insertedId": 2 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectResult": { + "insertedId": 3 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "cancel-server-check", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/connectTimeoutMS.json b/test/discovery_and_monitoring/unified/connectTimeoutMS.json new file mode 100644 index 0000000000..d3e860a9cb --- /dev/null +++ b/test/discovery_and_monitoring/unified/connectTimeoutMS.json @@ -0,0 +1,221 @@ +{ + "description": "connectTimeoutMS", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "connectTimeoutMS=0", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 0, + "heartbeatFrequencyMS": 500, + "appname": "connectTimeoutMS=0" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "connectTimeoutMS" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "connectTimeoutMS=0", + "blockConnection": true, + "blockTimeMS": 550 + } + }, + "client": "setupClient" + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 750 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "connectTimeoutMS", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "connectTimeoutMS", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-error.json b/test/discovery_and_monitoring/unified/find-network-error.json new file mode 100644 index 0000000000..c1b6db40ca --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-error.json @@ -0,0 +1,234 @@ +{ + "description": "find-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-network-timeout-error.json b/test/discovery_and_monitoring/unified/find-network-timeout-error.json new file mode 100644 index 0000000000..e5ac9f21aa --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-network-timeout-error.json @@ -0,0 +1,199 @@ +{ + "description": "find-network-timeout-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-timeout-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "find-network-timeout-error" + }, + "commandName": "find", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "find-network-timeout-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/find-shutdown-error.json b/test/discovery_and_monitoring/unified/find-shutdown-error.json new file mode 100644 index 0000000000..6e5a2cac05 --- /dev/null +++ b/test/discovery_and_monitoring/unified/find-shutdown-error.json @@ -0,0 +1,251 @@ +{ + "description": "find-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on find", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorFindTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "appName": "shutdownErrorFindTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "find-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-command-error.json b/test/discovery_and_monitoring/unified/hello-command-error.json new file mode 100644 index 0000000000..87958cb2c0 --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-command-error.json @@ -0,0 +1,376 @@ +{ + "description": "hello-command-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Command error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorHandshakeTest", + "closeConnection": false, + "errorCode": 91 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Command error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 1000, + "heartbeatFrequencyMS": 500, + "appname": "commandErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-command-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "commandErrorCheckTest", + "closeConnection": false, + "blockConnection": true, + "blockTimeMS": 750, + "errorCode": 91 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-command-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-command-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-network-error.json b/test/discovery_and_monitoring/unified/hello-network-error.json new file mode 100644 index 0000000000..15ed2b605e --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-network-error.json @@ -0,0 +1,346 @@ +{ + "description": "hello-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorHandshakeTest", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network error on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "networkErrorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "networkErrorCheckTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/hello-timeout.json b/test/discovery_and_monitoring/unified/hello-timeout.json new file mode 100644 index 0000000000..fe7cf4e78d --- /dev/null +++ b/test/discovery_and_monitoring/unified/hello-timeout.json @@ -0,0 +1,514 @@ +{ + "description": "hello-timeout", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network timeout on Monitor handshake", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorHandshakeTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorHandshakeTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "Network timeout on Monitor check", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 750, + "heartbeatFrequencyMS": 500, + "appname": "timeoutMonitorCheckTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "timeoutMonitorCheckTest", + "blockConnection": true, + "blockTimeMS": 1000 + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "Driver extends timeout while streaming", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "connectTimeoutMS": 250, + "heartbeatFrequencyMS": 500, + "appname": "extendsTimeoutTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "hello-timeout" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 2000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "hello-timeout", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "hello-timeout", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-network-error.json b/test/discovery_and_monitoring/unified/insert-network-error.json new file mode 100644 index 0000000000..bfe41a4cb6 --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-network-error.json @@ -0,0 +1,246 @@ +{ + "description": "insert-network-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Reset server and pool after network error on insert", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true, + "appName": "insertNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent", + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "insertNetworkErrorTest" + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-network-error" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "insert-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "insert-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/insert-shutdown-error.json b/test/discovery_and_monitoring/unified/insert-shutdown-error.json new file mode 100644 index 0000000000..af7c6c987a --- /dev/null +++ b/test/discovery_and_monitoring/unified/insert-shutdown-error.json @@ -0,0 +1,250 @@ +{ + "description": "insert-shutdown-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Concurrent shutdown error on insert", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appname": "shutdownErrorInsertTest" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "insert-shutdown-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "appName": "shutdownErrorInsertTest", + "errorCode": 91, + "blockConnection": true, + "blockTimeMS": 500 + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "insert-shutdown-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json new file mode 100644 index 0000000000..d9329646d4 --- /dev/null +++ b/test/discovery_and_monitoring/unified/interruptInUse-pool-clear.json @@ -0,0 +1,591 @@ +{ + "description": "interruptInUse", + "schemaVersion": "1.11", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Connection pool clear uses interruptInUseConnections=true after monitor timeout", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUse", + "retryReads": false, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + } + }, + "expectError": { + "isError": true + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUse" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandFailedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryable", + "retryReads": true, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + } + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryable" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandFailedEvent": { + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "Error returned from connection pool clear with interruptInUseConnections=true is retryable for write", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionClosedEvent", + "commandStartedEvent", + "commandFailedEvent", + "commandSucceededEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "connectTimeoutMS": 500, + "heartbeatFrequencyMS": 500, + "appname": "interruptInUseRetryableWrite", + "retryWrites": true, + "minPoolSize": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "interruptInUse" + } + }, + { + "thread": { + "id": "thread1" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": { + "$where": "sleep(2000) || true" + }, + "update": { + "$set": { + "a": "bar" + } + } + } + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "blockConnection": true, + "blockTimeMS": 1500, + "appName": "interruptInUseRetryableWrite" + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandFailedEvent": { + "commandName": "update" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "poolClearedEvent": { + "interruptInUseConnections": true + } + }, + { + "connectionCheckedInEvent": {} + }, + { + "connectionClosedEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ], + "outcome": [ + { + "collectionName": "interruptInUse", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1, + "a": "bar" + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..30c0657630 --- /dev/null +++ b/test/discovery_and_monitoring/unified/loadbalanced-emit-topology-changed-before-close.json @@ -0,0 +1,88 @@ +{ + "description": "loadbalanced-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": {} + } + }, + { + "topologyDescriptionChangedEvent": { + "newDescription": { + "type": "LoadBalanced" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-loadbalanced.json b/test/discovery_and_monitoring/unified/logging-loadbalanced.json new file mode 100644 index 0000000000..0ad3b0ceaa --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-loadbalanced.json @@ -0,0 +1,166 @@ +{ + "description": "loadbalanced-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-replicaset.json b/test/discovery_and_monitoring/unified/logging-replicaset.json new file mode 100644 index 0000000000..fe6ac60b68 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-replicaset.json @@ -0,0 +1,610 @@ +{ + "description": "replicaset-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 3 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-sharded.json b/test/discovery_and_monitoring/unified/logging-sharded.json new file mode 100644 index 0000000000..3788708ab0 --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-sharded.json @@ -0,0 +1,494 @@ +{ + "description": "sharded-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + }, + "client": "setupClient" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/logging-standalone.json b/test/discovery_and_monitoring/unified/logging-standalone.json new file mode 100644 index 0000000000..0682a1a4fb --- /dev/null +++ b/test/discovery_and_monitoring/unified/logging-standalone.json @@ -0,0 +1,519 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.16", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring", + "topologyId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring", + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed", + "topologyId": { + "$$exists": true + }, + "previousDescription": { + "$$exists": true + }, + "newDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring", + "topologyId": { + "$$exists": true + } + } + } + ] + } + ] + }, + { + "description": "Successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatSucceededEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "serverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "reply": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "ok": 1 + } + } + } + } + } + ] + } + ] + }, + { + "description": "Failing heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeLogMessages": { + "topology": "debug" + }, + "observeEvents": [ + "serverHeartbeatFailedEvent" + ], + "uriOptions": { + "appname": "failingHeartbeatLoggingTest" + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "failingHeartbeatLoggingTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatFailedEvent": {} + }, + "count": 1 + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "ignoreExtraMessages": true, + "ignoreMessages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Stopped server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Topology description changed" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting server monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Starting topology monitoring" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat started" + } + }, + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat succeeded" + } + } + ], + "messages": [ + { + "level": "debug", + "component": "topology", + "data": { + "message": "Server heartbeat failed", + "awaited": { + "$$exists": true + }, + "topologyId": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + }, + "driverConnectionId": { + "$$exists": true + }, + "durationMS": { + "$$type": [ + "double", + "int", + "long" + ] + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/minPoolSize-error.json b/test/discovery_and_monitoring/unified/minPoolSize-error.json new file mode 100644 index 0000000000..bd9e9fcdec --- /dev/null +++ b/test/discovery_and_monitoring/unified/minPoolSize-error.json @@ -0,0 +1,177 @@ +{ + "description": "minPoolSize-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "sdam-minPoolSize-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent", + "poolReadyEvent" + ], + "uriOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "sdam-minPoolSize-error" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": {} + }, + "commandName": "ping" + }, + "expectError": { + "isError": true + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + }, + "client": "setupClient" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolReadyEvent": {} + }, + "count": 2 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/pool-cleared-error.json b/test/discovery_and_monitoring/unified/pool-cleared-error.json new file mode 100644 index 0000000000..b7f6924f2b --- /dev/null +++ b/test/discovery_and_monitoring/unified/pool-cleared-error.json @@ -0,0 +1,373 @@ +{ + "description": "pool-cleared-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.9", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "PoolClearedError does not mark server unknown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "serverDescriptionChangedEvent", + "poolClearedEvent" + ], + "uriOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "pool-cleared-error" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100, + "closeConnection": true, + "appName": "poolClearedErrorTest" + } + }, + "client": "setupClient" + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + }, + { + "thread": { + "id": "thread1" + } + }, + { + "thread": { + "id": "thread2" + } + }, + { + "thread": { + "id": "thread3" + } + }, + { + "thread": { + "id": "thread4" + } + }, + { + "thread": { + "id": "thread5" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread3", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 5 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread4", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread5", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 7 + } + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread2" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread3" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread4" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread5" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 8 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + } + ], + "outcome": [ + { + "collectionName": "pool-cleared-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json new file mode 100644 index 0000000000..3147a07a1e --- /dev/null +++ b/test/discovery_and_monitoring/unified/rediscover-quickly-after-step-down.json @@ -0,0 +1,242 @@ +{ + "description": "rediscover-quickly-after-step-down", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient" + } + }, + { + "database": { + "id": "adminDatabase", + "client": "setupClient", + "databaseName": "admin" + } + } + ], + "initialData": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Rediscover quickly after replSetStepDown", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "poolClearedEvent", + "commandStartedEvent" + ], + "uriOptions": { + "appname": "replSetStepDownTest", + "heartbeatFrequencyMS": 60000, + "serverSelectionTimeoutMS": 5000, + "w": "majority" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test-replSetStepDown" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + }, + { + "name": "recordTopologyDescription", + "object": "testRunner", + "arguments": { + "client": "client", + "id": "topologyDescription" + } + }, + { + "name": "assertTopologyType", + "object": "testRunner", + "arguments": { + "topologyDescription": "topologyDescription", + "topologyType": "ReplicaSetWithPrimary" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetFreeze": 0 + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "replSetFreeze" + } + }, + { + "name": "runCommand", + "object": "adminDatabase", + "arguments": { + "command": { + "replSetStepDown": 30, + "secondaryCatchUpPeriodSecs": 30, + "force": false + }, + "commandName": "replSetStepDown" + } + }, + { + "name": "waitForPrimaryChange", + "object": "testRunner", + "arguments": { + "client": "client", + "priorTopologyDescription": "topologyDescription", + "timeoutMS": 15000 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test-replSetStepDown", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "commandName": "insert", + "databaseName": "sdam-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test-replSetStepDown", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..066a4ffee5 --- /dev/null +++ b/test/discovery_and_monitoring/unified/replicaset-emit-topology-changed-before-close.json @@ -0,0 +1,89 @@ +{ + "description": "replicaset-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "ReplicaSetWithPrimary" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/serverMonitoringMode.json b/test/discovery_and_monitoring/unified/serverMonitoringMode.json new file mode 100644 index 0000000000..e44fad1bcd --- /dev/null +++ b/test/discovery_and_monitoring/unified/serverMonitoringMode.json @@ -0,0 +1,511 @@ +{ + "description": "serverMonitoringMode", + "schemaVersion": "1.17", + "runOnRequirements": [ + { + "topologies": [ + "single", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "connect with serverMonitoringMode=auto >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=auto <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "auto", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream >=4.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4.0" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": true + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=stream <4.4", + "runOnRequirements": [ + { + "maxServerVersion": "4.2.99" + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "stream", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "connect with serverMonitoringMode=poll", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 500 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 2 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + }, + { + "serverHeartbeatSucceededEvent": { + "awaited": false + } + }, + { + "serverHeartbeatStartedEvent": { + "awaited": false + } + } + ] + } + ] + }, + { + "description": "poll waits after successful heartbeat", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "serverHeartbeatStartedEvent", + "serverHeartbeatSucceededEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "sdam-tests" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatSucceededEvent": {} + }, + "count": 1 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 500 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverHeartbeatStartedEvent": {} + }, + "count": 1 + } + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..98fb585531 --- /dev/null +++ b/test/discovery_and_monitoring/unified/sharded-emit-topology-changed-before-close.json @@ -0,0 +1,108 @@ +{ + "description": "sharded-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ], + "useMultipleMongoses": true + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 3 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Sharded" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Sharded" + }, + "newDescription": { + "type": "Sharded" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Sharded" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json b/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json new file mode 100644 index 0000000000..27b5444d54 --- /dev/null +++ b/test/discovery_and_monitoring/unified/standalone-emit-topology-changed-before-close.json @@ -0,0 +1,97 @@ +{ + "description": "standalone-emit-topology-description-changed-before-close", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "single" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "Topology lifecycle", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "topologyOpeningEvent", + "topologyClosedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "close", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": false, + "events": [ + { + "topologyOpeningEvent": {} + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + }, + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Single" + }, + "newDescription": { + "type": "Unknown" + } + } + }, + { + "topologyClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/test/discovery_and_monitoring_integration/auth-error.json b/test/discovery_and_monitoring_integration/auth-error.json deleted file mode 100644 index 064d660e32..0000000000 --- a/test/discovery_and_monitoring_integration/auth-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after AuthenticationFailure error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authErrorTest", - "errorCode": 18 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-misc-command-error.json b/test/discovery_and_monitoring_integration/auth-misc-command-error.json deleted file mode 100644 index 70dd59251d..0000000000 --- a/test/discovery_and_monitoring_integration/auth-misc-command-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-misc-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after misc command error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authMiscErrorTest", - "errorCode": 1 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authMiscErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-misc-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-error.json b/test/discovery_and_monitoring_integration/auth-network-error.json deleted file mode 100644 index a75a398c5e..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "closeConnection": true, - "appName": "authNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json b/test/discovery_and_monitoring_integration/auth-network-timeout-error.json deleted file mode 100644 index a4ee7d9eff..0000000000 --- a/test/discovery_and_monitoring_integration/auth-network-timeout-error.json +++ /dev/null @@ -1,143 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network timeout error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "authNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authNetworkTimeoutErrorTest", - "connectTimeoutMS": 250, - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-network-timeout-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/auth-shutdown-error.json b/test/discovery_and_monitoring_integration/auth-shutdown-error.json deleted file mode 100644 index 2dab90e1c5..0000000000 --- a/test/discovery_and_monitoring_integration/auth-shutdown-error.json +++ /dev/null @@ -1,140 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "authEnabled": true - } - ], - "database_name": "sdam-tests", - "collection_name": "auth-shutdown-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after shutdown error during authentication", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "saslContinue" - ], - "appName": "authShutdownErrorTest", - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "authShutdownErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "auth-shutdown-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/cancel-server-check.json b/test/discovery_and_monitoring_integration/cancel-server-check.json deleted file mode 100644 index 9586350959..0000000000 --- a/test/discovery_and_monitoring_integration/cancel-server-check.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.2", - "topology": [ - "sharded" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "cancel-server-check", - "data": [], - "tests": [ - { - "description": "Cancel server check", - "clientOptions": { - "retryWrites": true, - "heartbeatFrequencyMS": 10000, - "serverSelectionTimeoutMS": 5000, - "appname": "cancelServerCheckTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/connectTimeoutMS.json b/test/discovery_and_monitoring_integration/connectTimeoutMS.json deleted file mode 100644 index 36a6dc4507..0000000000 --- a/test/discovery_and_monitoring_integration/connectTimeoutMS.json +++ /dev/null @@ -1,149 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "connectTimeoutMS", - "data": [], - "tests": [ - { - "description": "connectTimeoutMS=0", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 0, - "heartbeatFrequencyMS": 500, - "appname": "connectTimeoutMS=0" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "connectTimeoutMS=0", - "blockConnection": true, - "blockTimeMS": 550 - } - } - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 750 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "connectTimeoutMS", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-error.json b/test/discovery_and_monitoring_integration/find-network-error.json deleted file mode 100644 index 4db2634cd6..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-error.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true, - "appName": "findNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkErrorTest" - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-network-timeout-error.json b/test/discovery_and_monitoring_integration/find-network-timeout-error.json deleted file mode 100644 index c4e10b3a76..0000000000 --- a/test/discovery_and_monitoring_integration/find-network-timeout-error.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-network-timeout-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Ignore network timeout error on find", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "blockConnection": true, - "blockTimeMS": 500, - "appName": "findNetworkTimeoutErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "appname": "findNetworkTimeoutErrorTest", - "socketTimeoutMS": 250 - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "find-network-timeout-error" - }, - "command_name": "find", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "find-network-timeout-error", - "documents": [ - { - "_id": 3 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/find-shutdown-error.json b/test/discovery_and_monitoring_integration/find-shutdown-error.json deleted file mode 100644 index 65de8398b1..0000000000 --- a/test/discovery_and_monitoring_integration/find-shutdown-error.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "find-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on find", - "clientOptions": { - "retryWrites": false, - "retryReads": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorFindTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "appName": "shutdownErrorFindTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-command-error.json b/test/discovery_and_monitoring_integration/hello-command-error.json deleted file mode 100644 index d3bccd3900..0000000000 --- a/test/discovery_and_monitoring_integration/hello-command-error.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-command-error", - "data": [], - "tests": [ - { - "description": "Command error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorHandshakeTest", - "closeConnection": false, - "errorCode": 91 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Command error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 1000, - "heartbeatFrequencyMS": 500, - "appname": "commandErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "commandErrorCheckTest", - "closeConnection": false, - "blockConnection": true, - "blockTimeMS": 750, - "errorCode": 91 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-command-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-network-error.json b/test/discovery_and_monitoring_integration/hello-network-error.json deleted file mode 100644 index f9761d7556..0000000000 --- a/test/discovery_and_monitoring_integration/hello-network-error.json +++ /dev/null @@ -1,219 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-network-error", - "data": [], - "tests": [ - { - "description": "Network error on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorHandshakeTest", - "closeConnection": true - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network error on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "networkErrorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "networkErrorCheckTest", - "closeConnection": true - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/hello-timeout.json b/test/discovery_and_monitoring_integration/hello-timeout.json deleted file mode 100644 index 004f8f449d..0000000000 --- a/test/discovery_and_monitoring_integration/hello-timeout.json +++ /dev/null @@ -1,337 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "hello-timeout", - "data": [], - "tests": [ - { - "description": "Network timeout on Monitor handshake", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorHandshakeTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - }, - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorHandshakeTest" - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "Network timeout on Monitor check", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 750, - "heartbeatFrequencyMS": 500, - "appname": "timeoutMonitorCheckTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "timeoutMonitorCheckTest", - "blockConnection": true, - "blockTimeMS": 1000 - } - } - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "Driver extends timeout while streaming", - "clientOptions": { - "retryWrites": false, - "connectTimeoutMS": 250, - "heartbeatFrequencyMS": 500, - "appname": "extendsTimeoutTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - }, - { - "name": "wait", - "object": "testRunner", - "arguments": { - "ms": 2000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 0 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "hello-timeout", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-network-error.json b/test/discovery_and_monitoring_integration/insert-network-error.json deleted file mode 100644 index fa8bb253e1..0000000000 --- a/test/discovery_and_monitoring_integration/insert-network-error.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-network-error", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Reset server and pool after network error on insert", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true, - "appName": "insertNetworkErrorTest" - } - }, - "clientOptions": { - "retryWrites": false, - "appname": "insertNetworkErrorTest" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "error": true - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "insert-network-error", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/insert-shutdown-error.json b/test/discovery_and_monitoring_integration/insert-shutdown-error.json deleted file mode 100644 index edde149a91..0000000000 --- a/test/discovery_and_monitoring_integration/insert-shutdown-error.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4" - } - ], - "database_name": "sdam-tests", - "collection_name": "insert-shutdown-error", - "data": [], - "tests": [ - { - "description": "Concurrent shutdown error on insert", - "clientOptions": { - "retryWrites": false, - "heartbeatFrequencyMS": 500, - "appname": "shutdownErrorInsertTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "appName": "shutdownErrorInsertTest", - "errorCode": 91, - "blockConnection": true, - "blockTimeMS": 500 - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - }, - "error": true - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - }, - "error": true - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/minPoolSize-error.json b/test/discovery_and_monitoring_integration/minPoolSize-error.json deleted file mode 100644 index 9f8e4f6f8b..0000000000 --- a/test/discovery_and_monitoring_integration/minPoolSize-error.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9" - } - ], - "database_name": "sdam-tests", - "collection_name": "sdam-minPoolSize-error", - "data": [], - "tests": [ - { - "description": "Network error on minPoolSize background creation", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "skip": 3 - }, - "data": { - "failCommands": [ - "hello", - "isMaster" - ], - "appName": "SDAMminPoolSizeError", - "closeConnection": true - } - }, - "clientOptions": { - "heartbeatFrequencyMS": 10000, - "appname": "SDAMminPoolSizeError", - "minPoolSize": 10, - "serverSelectionTimeoutMS": 1000, - "directConnection": true - }, - "operations": [ - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": {} - } - }, - "error": true - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": "off" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "ping", - "arguments": { - "command": { - "ping": 1 - } - }, - "error": false - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolReadyEvent", - "count": 2 - } - } - ] - } - ] -} diff --git a/test/discovery_and_monitoring_integration/pool-cleared-error.json b/test/discovery_and_monitoring_integration/pool-cleared-error.json deleted file mode 100644 index 52456f9e13..0000000000 --- a/test/discovery_and_monitoring_integration/pool-cleared-error.json +++ /dev/null @@ -1,307 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.9", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "pool-cleared-error", - "data": [], - "tests": [ - { - "description": "PoolClearedError does not mark server unknown", - "clientOptions": { - "retryWrites": true, - "maxPoolSize": 1, - "appname": "poolClearedErrorTest" - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "configureFailPoint", - "object": "testRunner", - "arguments": { - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "blockConnection": true, - "blockTimeMS": 100, - "closeConnection": true, - "appName": "poolClearedErrorTest" - } - } - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread3" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread4" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread5" - } - }, - { - "name": "startThread", - "object": "testRunner", - "arguments": { - "name": "thread6" - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread1", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 2 - } - } - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread2", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - } - } - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread3", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - } - } - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread4", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 5 - } - } - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread5", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 6 - } - } - } - } - }, - { - "name": "runOnThread", - "object": "testRunner", - "arguments": { - "name": "thread6", - "operation": { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 7 - } - } - } - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread1" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread2" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread3" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread4" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread5" - } - }, - { - "name": "waitForThread", - "object": "testRunner", - "arguments": { - "name": "thread6" - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 8 - } - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "ServerMarkedUnknownEvent", - "count": 1 - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - }, - { - "_id": 7 - }, - { - "_id": 8 - } - ] - } - } - } - ] -} diff --git a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json b/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json deleted file mode 100644 index 41fbdc695c..0000000000 --- a/test/discovery_and_monitoring_integration/rediscover-quickly-after-step-down.json +++ /dev/null @@ -1,165 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.4", - "topology": [ - "replicaset" - ] - } - ], - "database_name": "sdam-tests", - "collection_name": "test-replSetStepDown", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "Rediscover quickly after replSetStepDown", - "clientOptions": { - "appname": "replSetStepDownTest", - "heartbeatFrequencyMS": 60000, - "serverSelectionTimeoutMS": 5000, - "w": "majority" - }, - "operations": [ - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - }, - { - "name": "recordPrimary", - "object": "testRunner" - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetFreeze", - "arguments": { - "command": { - "replSetFreeze": 0 - }, - "readPreference": { - "mode": "Secondary" - } - } - }, - { - "name": "runAdminCommand", - "object": "testRunner", - "command_name": "replSetStepDown", - "arguments": { - "command": { - "replSetStepDown": 30, - "secondaryCatchUpPeriodSecs": 30, - "force": false - } - } - }, - { - "name": "waitForPrimaryChange", - "object": "testRunner", - "arguments": { - "timeoutMS": 15000 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - }, - { - "name": "assertEventCount", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 0 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test-replSetStepDown", - "documents": [ - { - "_id": 5 - }, - { - "_id": 6 - } - ] - }, - "command_name": "insert", - "database_name": "sdam-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - } - ] - } - } - } - ] -} diff --git a/test/gridfs/delete.json b/test/gridfs/delete.json index 7a4ec27f88..9a9b22fc1e 100644 --- a/test/gridfs/delete.json +++ b/test/gridfs/delete.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -197,10 +185,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -212,10 +197,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -227,10 +209,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -330,10 +309,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -345,10 +321,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -360,10 +333,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -448,10 +418,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -463,10 +430,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -478,10 +442,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -536,7 +497,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -554,10 +515,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -569,10 +527,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -584,10 +539,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -599,10 +551,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -701,7 +650,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ], @@ -719,10 +668,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -734,10 +680,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -749,10 +692,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] diff --git a/test/gridfs/deleteByName.json b/test/gridfs/deleteByName.json new file mode 100644 index 0000000000..884d0300ce --- /dev/null +++ b/test/gridfs/deleteByName.json @@ -0,0 +1,230 @@ +{ + "description": "gridfs-deleteByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "delete when multiple revisions of the file exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "filename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "delete when file name does not exist", + "operations": [ + { + "name": "deleteByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/download.json b/test/gridfs/download.json index 48d3246218..67658ac512 100644 --- a/test/gridfs/download.json +++ b/test/gridfs/download.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "d41d8cd98f00b204e9800998ecf8427e", "filename": "length-0-with-empty-chunk", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", "filename": "length-2", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "dd254cdc958e53abaa67da9f797125f5", "filename": "length-8", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -109,10 +97,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "57d83cd477bfb1ccd975ab33d827a92b", "filename": "length-10", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -124,9 +109,6 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "c700ed4fdb1d27055aa3faa2c2432283", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -356,7 +338,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -388,7 +370,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -420,7 +402,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -489,7 +471,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -532,7 +514,7 @@ } }, "expectError": { - "isError": true + "isClientError": true } } ] diff --git a/test/gridfs/downloadByName.json b/test/gridfs/downloadByName.json index cd44663957..45abaf7b42 100644 --- a/test/gridfs/downloadByName.json +++ b/test/gridfs/downloadByName.json @@ -49,10 +49,7 @@ "uploadDate": { "$date": "1970-01-01T00:00:00.000Z" }, - "md5": "47ed733b8d10be225eceba344d533586", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -64,10 +61,7 @@ "uploadDate": { "$date": "1970-01-02T00:00:00.000Z" }, - "md5": "b15835f133ff2e27c7cb28117bfae8f4", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -79,10 +73,7 @@ "uploadDate": { "$date": "1970-01-03T00:00:00.000Z" }, - "md5": "eccbc87e4b5ce2fe28308fd9f2a7baf3", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -94,10 +85,7 @@ "uploadDate": { "$date": "1970-01-04T00:00:00.000Z" }, - "md5": "f623e75af30e62bbd73d6df5b50bb7b5", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} }, { @@ -109,10 +97,7 @@ "uploadDate": { "$date": "1970-01-05T00:00:00.000Z" }, - "md5": "4c614360da93c0a041b22e537de151eb", "filename": "abc", - "contentType": "application/octet-stream", - "aliases": [], "metadata": {} } ] @@ -305,7 +290,7 @@ "filename": "xyz" }, "expectError": { - "isError": true + "isClientError": true } } ] @@ -321,7 +306,7 @@ "revision": 999 }, "expectError": { - "isError": true + "isClientError": true } } ] diff --git a/test/gridfs/rename.json b/test/gridfs/rename.json new file mode 100644 index 0000000000..08064d4a5c --- /dev/null +++ b/test/gridfs/rename.json @@ -0,0 +1,179 @@ +{ + "description": "gridfs-rename", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename by id", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + }, + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file id does not exist", + "operations": [ + { + "name": "rename", + "object": "bucket0", + "arguments": { + "id": { + "$oid": "000000000000000000000003" + }, + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/renameByName.json b/test/gridfs/renameByName.json new file mode 100644 index 0000000000..26f04fb9e0 --- /dev/null +++ b/test/gridfs/renameByName.json @@ -0,0 +1,313 @@ +{ + "description": "gridfs-renameByName", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "gridfs-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + }, + { + "collection": { + "id": "bucket0_files_collection", + "database": "database0", + "collectionName": "fs.files" + } + }, + { + "collection": { + "id": "bucket0_chunks_collection", + "database": "database0", + "collectionName": "fs.chunks" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "filename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "rename when multiple revisions of the file exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "filename", + "newFilename": "newfilename" + } + } + ], + "outcome": [ + { + "collectionName": "fs.files", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "length": 0, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "length": 2, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "newfilename", + "metadata": {} + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "length": 8, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "otherfilename", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "gridfs-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "files_id": { + "$oid": "000000000000000000000002" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000003" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000003" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 0, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + }, + { + "_id": { + "$oid": "000000000000000000000004" + }, + "files_id": { + "$oid": "000000000000000000000004" + }, + "n": 1, + "data": { + "$binary": { + "base64": "", + "subType": "00" + } + } + } + ] + } + ] + }, + { + "description": "rename when file name does not exist", + "operations": [ + { + "name": "renameByName", + "object": "bucket0", + "arguments": { + "filename": "missing-file", + "newFilename": "newfilename" + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/gridfs/upload.json b/test/gridfs/upload.json index 97e18d2bc2..3c1644653a 100644 --- a/test/gridfs/upload.json +++ b/test/gridfs/upload.json @@ -470,75 +470,6 @@ } ] }, - { - "description": "upload when contentType is provided", - "operations": [ - { - "name": "upload", - "object": "bucket0", - "arguments": { - "filename": "filename", - "source": { - "$$hexBytes": "11" - }, - "chunkSizeBytes": 4, - "contentType": "image/jpeg" - }, - "expectResult": { - "$$type": "objectId" - }, - "saveResultAsEntity": "uploadedObjectId" - }, - { - "name": "find", - "object": "bucket0_files_collection", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": { - "$$matchesEntity": "uploadedObjectId" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$$type": "date" - }, - "md5": { - "$$unsetOrMatches": "47ed733b8d10be225eceba344d533586" - }, - "filename": "filename", - "contentType": "image/jpeg" - } - ] - }, - { - "name": "find", - "object": "bucket0_chunks_collection", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": { - "$$type": "objectId" - }, - "files_id": { - "$$matchesEntity": "uploadedObjectId" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - } - ] - }, { "description": "upload when metadata is provided", "operations": [ diff --git a/test/handshake/unified/metadata-not-propagated.json b/test/handshake/unified/metadata-not-propagated.json new file mode 100644 index 0000000000..500b579b89 --- /dev/null +++ b/test/handshake/unified/metadata-not-propagated.json @@ -0,0 +1,100 @@ +{ + "description": "client metadata is not propagated to the server", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "6.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandSucceededEvent", + "commandFailedEvent", + "connectionClosedEvent", + "connectionCreatedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "metadata append does not create new connections or close existing ones and no hello command is sent", + "operations": [ + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "appendMetadata", + "object": "client", + "arguments": { + "driverInfoOptions": { + "name": "framework", + "version": "2.0", + "platform": "Framework Platform" + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + } + ] + }, + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandSucceededEvent": { + "commandName": "ping" + } + }, + { + "commandSucceededEvent": { + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/helpers.py b/test/helpers.py new file mode 100644 index 0000000000..163bf01c12 --- /dev/null +++ b/test/helpers.py @@ -0,0 +1,176 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared helper methods for pymongo, bson, and gridfs test suites.""" +from __future__ import annotations + +import asyncio +import threading +import traceback +from functools import wraps +from typing import Optional, no_type_check + +from bson import SON +from pymongo import common +from pymongo._asyncio_task import create_task +from pymongo.read_preferences import ReadPreference + +_IS_SYNC = True + + +def repl_set_step_down(client, **kwargs): + """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" + cmd = SON([("replSetStepDown", 1)]) + cmd.update(kwargs) + + # Unfreeze a secondary to ensure a speedy election. + client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) + client.admin.command(cmd) + + +class client_knobs: + def __init__( + self, + heartbeat_frequency=None, + min_heartbeat_interval=None, + kill_cursor_frequency=None, + events_queue_frequency=None, + ): + self.heartbeat_frequency = heartbeat_frequency + self.min_heartbeat_interval = min_heartbeat_interval + self.kill_cursor_frequency = kill_cursor_frequency + self.events_queue_frequency = events_queue_frequency + + self.old_heartbeat_frequency = None + self.old_min_heartbeat_interval = None + self.old_kill_cursor_frequency = None + self.old_events_queue_frequency = None + self._enabled = False + self._stack = None + + def enable(self): + self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY + self.old_min_heartbeat_interval = common.MIN_HEARTBEAT_INTERVAL + self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY + self.old_events_queue_frequency = common.EVENTS_QUEUE_FREQUENCY + + if self.heartbeat_frequency is not None: + common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency + + if self.min_heartbeat_interval is not None: + common.MIN_HEARTBEAT_INTERVAL = self.min_heartbeat_interval + + if self.kill_cursor_frequency is not None: + common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency + + if self.events_queue_frequency is not None: + common.EVENTS_QUEUE_FREQUENCY = self.events_queue_frequency + self._enabled = True + # Store the allocation traceback to catch non-disabled client_knobs. + self._stack = "".join(traceback.format_stack()) + + def __enter__(self): + self.enable() + + @no_type_check + def disable(self): + common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency + common.MIN_HEARTBEAT_INTERVAL = self.old_min_heartbeat_interval + common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency + common.EVENTS_QUEUE_FREQUENCY = self.old_events_queue_frequency + self._enabled = False + + def __exit__(self, exc_type, exc_val, exc_tb): + self.disable() + + def __call__(self, func): + def make_wrapper(f): + @wraps(f) + def wrap(*args, **kwargs): + with self: + return f(*args, **kwargs) + + return wrap + + return make_wrapper(func) + + def __del__(self): + if self._enabled: + msg = ( + "ERROR: client_knobs still enabled! HEARTBEAT_FREQUENCY={}, " + "MIN_HEARTBEAT_INTERVAL={}, KILL_CURSOR_FREQUENCY={}, " + "EVENTS_QUEUE_FREQUENCY={}, stack:\n{}".format( + common.HEARTBEAT_FREQUENCY, + common.MIN_HEARTBEAT_INTERVAL, + common.KILL_CURSOR_FREQUENCY, + common.EVENTS_QUEUE_FREQUENCY, + self._stack, + ) + ) + self.disable() + raise Exception(msg) + + +# Global knobs to speed up the test suite. +global_knobs = client_knobs(events_queue_frequency=0.05) + + +if _IS_SYNC: + PARENT = threading.Thread +else: + PARENT = object + + +class ConcurrentRunner(PARENT): + def __init__(self, **kwargs): + if _IS_SYNC: + super().__init__(**kwargs) + self.name = kwargs.get("name", "ConcurrentRunner") + self.stopped = False + self.task = None + self.target = kwargs.get("target", None) + self.args = kwargs.get("args", []) + + if not _IS_SYNC: + + def start(self): + self.task = create_task(self.run(), name=self.name) + + def join(self, timeout: Optional[float] = None): # type: ignore[override] + if self.task is not None: + asyncio.wait([self.task], timeout=timeout) + + def is_alive(self): + return not self.stopped + + def run(self): + try: + self.target(*self.args) + finally: + self.stopped = True + + +class ExceptionCatchingTask(ConcurrentRunner): + """A Task that stores any exception encountered while running.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.exc = None + + def run(self): + try: + super().run() + except BaseException as exc: + self.exc = exc + raise diff --git a/test/helpers_shared.py b/test/helpers_shared.py new file mode 100644 index 0000000000..49cf131808 --- /dev/null +++ b/test/helpers_shared.py @@ -0,0 +1,271 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import base64 +import gc +import os +import socket +import subprocess +import sys +import traceback +import unittest +from pathlib import Path + +try: + import ipaddress + + HAVE_IPADDRESS = True +except ImportError: + HAVE_IPADDRESS = False +from functools import wraps +from typing import no_type_check +from unittest import SkipTest + +from bson.son import SON +from pymongo import message +from pymongo.ssl_support import HAVE_SSL, _ssl # type:ignore[attr-defined] +from pymongo.synchronous.uri_parser import parse_uri + +if HAVE_SSL: + import ssl + + +# Enable debug output for uncollectable objects. PyPy does not have set_debug. +if hasattr(gc, "set_debug"): + gc.set_debug( + gc.DEBUG_UNCOLLECTABLE | getattr(gc, "DEBUG_OBJECTS", 0) | getattr(gc, "DEBUG_INSTANCES", 0) + ) + +# The host and port of a single mongod or mongos, or the seed host +# for a replica set. +host = os.environ.get("DB_IP", "localhost") +port = int(os.environ.get("DB_PORT", 27017)) +IS_SRV = "mongodb+srv" in host + +db_user = os.environ.get("DB_USER", "user") +db_pwd = os.environ.get("DB_PASSWORD", "password") + +HERE = Path(__file__).absolute() +CERT_PATH = str(HERE.parent / "certificates") +CLIENT_PEM = os.environ.get("CLIENT_PEM", os.path.join(CERT_PATH, "client.pem")) +CA_PEM = os.environ.get("CA_PEM", os.path.join(CERT_PATH, "ca.pem")) + +TLS_OPTIONS: dict = {"tls": True} +if CLIENT_PEM: + TLS_OPTIONS["tlsCertificateKeyFile"] = CLIENT_PEM +if CA_PEM: + TLS_OPTIONS["tlsCAFile"] = CA_PEM + +COMPRESSORS = os.environ.get("COMPRESSORS") +MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") +TEST_LOADBALANCER = bool(os.environ.get("TEST_LOAD_BALANCER")) +SINGLE_MONGOS_LB_URI = os.environ.get("SINGLE_MONGOS_LB_URI") +MULTI_MONGOS_LB_URI = os.environ.get("MULTI_MONGOS_LB_URI") + +if TEST_LOADBALANCER: + res = parse_uri(SINGLE_MONGOS_LB_URI or "") + host, port = res["nodelist"][0] + db_user = res["username"] or db_user + db_pwd = res["password"] or db_pwd + + +# Shared KMS data. +LOCAL_MASTER_KEY = base64.b64decode( + b"Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ" + b"5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" +) +AWS_CREDS = { + "accessKeyId": os.environ.get("FLE_AWS_KEY", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET", ""), +} +AWS_CREDS_2 = { + "accessKeyId": os.environ.get("FLE_AWS_KEY2", ""), + "secretAccessKey": os.environ.get("FLE_AWS_SECRET2", ""), +} +AZURE_CREDS = { + "tenantId": os.environ.get("FLE_AZURE_TENANTID", ""), + "clientId": os.environ.get("FLE_AZURE_CLIENTID", ""), + "clientSecret": os.environ.get("FLE_AZURE_CLIENTSECRET", ""), +} +GCP_CREDS = { + "email": os.environ.get("FLE_GCP_EMAIL", ""), + "privateKey": os.environ.get("FLE_GCP_PRIVATEKEY", ""), +} +KMIP_CREDS = {"endpoint": os.environ.get("FLE_KMIP_ENDPOINT", "localhost:5698")} +AWS_TEMP_CREDS = { + "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), + "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), + "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), +} + +ALL_KMS_PROVIDERS = dict( + aws=AWS_CREDS, + azure=AZURE_CREDS, + gcp=GCP_CREDS, + local=dict(key=LOCAL_MASTER_KEY), + kmip=KMIP_CREDS, +) +DEFAULT_KMS_TLS = dict(kmip=dict(tlsCAFile=CA_PEM, tlsCertificateKeyFile=CLIENT_PEM)) + +# Ensure Evergreen metadata doesn't result in truncation +os.environ.setdefault("MONGOB_LOG_MAX_DOCUMENT_LENGTH", "2000") + + +def is_server_resolvable(): + """Returns True if 'server' is resolvable.""" + socket_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(1) + try: + try: + socket.gethostbyname("server") + return True + except OSError: + return False + finally: + socket.setdefaulttimeout(socket_timeout) + + +def _create_user(authdb, user, pwd=None, roles=None, **kwargs): + cmd = SON([("createUser", user)]) + # X509 doesn't use a password + if pwd: + cmd["pwd"] = pwd + cmd["roles"] = roles or ["root"] + cmd.update(**kwargs) + return authdb.command(cmd) + + +def _all_users(db): + return {u["user"] for u in db.command("usersInfo").get("users", [])} + + +def sanitize_cmd(cmd): + cp = cmd.copy() + cp.pop("$clusterTime", None) + cp.pop("$db", None) + cp.pop("$readPreference", None) + cp.pop("lsid", None) + if MONGODB_API_VERSION: + # Stable API parameters + cp.pop("apiVersion", None) + # OP_MSG encoding may move the payload type one field to the + # end of the command. Do the same here. + name = next(iter(cp)) + try: + identifier = message._FIELD_MAP[name] + docs = cp.pop(identifier) + cp[identifier] = docs + except KeyError: + pass + return cp + + +def sanitize_reply(reply): + cp = reply.copy() + cp.pop("$clusterTime", None) + cp.pop("operationTime", None) + return cp + + +def print_thread_tracebacks() -> None: + """Print all Python thread tracebacks.""" + for thread_id, frame in sys._current_frames().items(): + sys.stderr.write(f"\n--- Traceback for thread {thread_id} ---\n") + traceback.print_stack(frame, file=sys.stderr) + + +def print_thread_stacks(pid: int) -> None: + """Print all C-level thread stacks for a given process id.""" + if sys.platform == "darwin": + cmd = ["lldb", "--attach-pid", f"{pid}", "--batch", "--one-line", '"thread backtrace all"'] + else: + cmd = ["gdb", f"--pid={pid}", "--batch", '--eval-command="thread apply all bt"'] + + try: + res = subprocess.run( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8" + ) + except Exception as exc: + sys.stderr.write(f"Could not print C-level thread stacks because {cmd[0]} failed: {exc}") + else: + sys.stderr.write(res.stdout) + + +def _get_executors(topology): + executors = [] + for server in topology._servers.values(): + # Some MockMonitor do not have an _executor. + if hasattr(server._monitor, "_executor"): + executors.append(server._monitor._executor) + if hasattr(server._monitor, "_rtt_monitor"): + executors.append(server._monitor._rtt_monitor._executor) + executors.append(topology._Topology__events_executor) + if topology._srv_monitor: + executors.append(topology._srv_monitor._executor) + + return [e for e in executors if e is not None] + + +def print_running_topology(topology): + running = [e for e in _get_executors(topology) if not e._stopped] + if running: + print( + "WARNING: found Topology with running threads:\n" + f" Threads: {running}\n" + f" Topology: {topology}\n" + f" Creation traceback:\n{topology._settings._stack}" + ) + + +def test_cases(suite): + """Iterator over all TestCases within a TestSuite.""" + for suite_or_case in suite._tests: + if isinstance(suite_or_case, unittest.TestCase): + # unittest.TestCase + yield suite_or_case + else: + # unittest.TestSuite + yield from test_cases(suite_or_case) + + +# Helper method to workaround https://bugs.python.org/issue21724 +def clear_warning_registry(): + """Clear the __warningregistry__ for all modules.""" + for _, module in list(sys.modules.items()): + if hasattr(module, "__warningregistry__"): + module.__warningregistry__ = {} # type:ignore[attr-defined] + + +class SystemCertsPatcher: + def __init__(self, ca_certs): + if ( + ssl.OPENSSL_VERSION.lower().startswith("libressl") + and sys.platform == "darwin" + and not _ssl.IS_PYOPENSSL + ): + raise SkipTest( + "LibreSSL on OSX doesn't support setting CA certificates " + "using SSL_CERT_FILE environment variable." + ) + self.original_certs = os.environ.get("SSL_CERT_FILE") + # Tell OpenSSL where CA certificates live. + os.environ["SSL_CERT_FILE"] = ca_certs + + def disable(self): + if self.original_certs is None: + os.environ.pop("SSL_CERT_FILE") + else: + os.environ["SSL_CERT_FILE"] = self.original_certs diff --git a/test/index_management/createSearchIndex.json b/test/index_management/createSearchIndex.json new file mode 100644 index 0000000000..f9c4e44d3e --- /dev/null +++ b/test/index_management/createSearchIndex.json @@ -0,0 +1,136 @@ +{ + "description": "createSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/createSearchIndexes.json b/test/index_management/createSearchIndexes.json new file mode 100644 index 0000000000..3cf56ce12e --- /dev/null +++ b/test/index_management/createSearchIndexes.json @@ -0,0 +1,172 @@ +{ + "description": "createSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "empty index definition array", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "no name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "name provided for an index definition", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + }, + "name": "test index" + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/dropSearchIndex.json b/test/index_management/dropSearchIndex.json new file mode 100644 index 0000000000..d8957a2227 --- /dev/null +++ b/test/index_management/dropSearchIndex.json @@ -0,0 +1,74 @@ +{ + "description": "dropSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/listSearchIndexes.json b/test/index_management/listSearchIndexes.json new file mode 100644 index 0000000000..a8cef42f7a --- /dev/null +++ b/test/index_management/listSearchIndexes.json @@ -0,0 +1,156 @@ +{ + "description": "listSearchIndexes", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "when no name is provided, it does not populate the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ] + } + } + } + ] + } + ] + }, + { + "description": "when a name is provided, it is present in the filter", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + }, + { + "description": "aggregation cursor options are supported", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "arguments": { + "name": "test index", + "aggregationOptions": { + "batchSize": 10 + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "cursor": { + "batchSize": 10 + }, + "pipeline": [ + { + "$listSearchIndexes": { + "name": "test index" + } + } + ], + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/searchIndexIgnoresReadWriteConcern.json b/test/index_management/searchIndexIgnoresReadWriteConcern.json new file mode 100644 index 0000000000..edf71b7b7e --- /dev/null +++ b/test/index_management/searchIndexIgnoresReadWriteConcern.json @@ -0,0 +1,252 @@ +{ + "description": "search index operations ignore read and write concern", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "createSearchIndex ignores read and write concern", + "operations": [ + { + "name": "createSearchIndex", + "object": "collection0", + "arguments": { + "model": { + "definition": { + "mappings": { + "dynamic": true + } + } + } + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [ + { + "definition": { + "mappings": { + "dynamic": true + } + } + } + ], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "createSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "createSearchIndexes", + "object": "collection0", + "arguments": { + "models": [] + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createSearchIndexes": "collection0", + "indexes": [], + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "dropSearchIndex ignores read and write concern", + "operations": [ + { + "name": "dropSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index" + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "dropSearchIndex": "collection0", + "name": "test index", + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "listSearchIndexes ignores read and write concern", + "operations": [ + { + "name": "listSearchIndexes", + "object": "collection0", + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection0", + "pipeline": [ + { + "$listSearchIndexes": {} + } + ], + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "updateSearchIndex ignores the read and write concern", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0", + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/index_management/updateSearchIndex.json b/test/index_management/updateSearchIndex.json new file mode 100644 index 0000000000..76a5962146 --- /dev/null +++ b/test/index_management/updateSearchIndex.json @@ -0,0 +1,76 @@ +{ + "description": "updateSearchIndex", + "schemaVersion": "1.4", + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "collection0" + } + } + ], + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ], + "serverless": "forbid" + } + ], + "tests": [ + { + "description": "sends the correct command", + "operations": [ + { + "name": "updateSearchIndex", + "object": "collection0", + "arguments": { + "name": "test index", + "definition": {} + }, + "expectError": { + "isError": true, + "errorContains": "Atlas" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "updateSearchIndex": "collection0", + "name": "test index", + "definition": {}, + "$db": "database0" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/lambda/README.md b/test/lambda/README.md new file mode 100644 index 0000000000..2727a2cee9 --- /dev/null +++ b/test/lambda/README.md @@ -0,0 +1,17 @@ +AWS Lambda Testing +------------------ + +Running locally +=============== + +Prerequisites: + +- AWS SAM CLI +- Docker daemon running + +Usage +===== + +- Start a local mongodb instance on port 27017 +- Run ``build.sh`` +- Run ``test.sh`` diff --git a/test/lambda/build_internal.sh b/test/lambda/build_internal.sh new file mode 100755 index 0000000000..84423db4d1 --- /dev/null +++ b/test/lambda/build_internal.sh @@ -0,0 +1,5 @@ +#!/bin/bash -ex + +cd /src +PYTHON=/opt/python/cp310-cp310/bin/python +$PYTHON -m pip install -v -e . diff --git a/test/lambda/events/event.json b/test/lambda/events/event.json new file mode 100644 index 0000000000..a6197dea6c --- /dev/null +++ b/test/lambda/events/event.json @@ -0,0 +1,62 @@ +{ + "body": "{\"message\": \"hello world\"}", + "resource": "/hello", + "path": "/hello", + "httpMethod": "GET", + "isBase64Encoded": false, + "queryStringParameters": { + "foo": "bar" + }, + "pathParameters": { + "proxy": "/path/to/resource" + }, + "stageVariables": { + "baz": "qux" + }, + "headers": { + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Accept-Encoding": "gzip, deflate, sdch", + "Accept-Language": "en-US,en;q=0.8", + "Cache-Control": "max-age=0", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-Country": "US", + "Host": "1234567890.execute-api.us-east-1.amazonaws.com", + "Upgrade-Insecure-Requests": "1", + "User-Agent": "Custom User Agent String", + "Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==", + "X-Forwarded-For": "127.0.0.1, 127.0.0.2", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "requestContext": { + "accountId": "123456789012", + "resourceId": "123456", + "stage": "prod", + "requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef", + "requestTime": "09/Apr/2015:12:34:56 +0000", + "requestTimeEpoch": 1428582896000, + "identity": { + "cognitoIdentityPoolId": null, + "accountId": null, + "cognitoIdentityId": null, + "caller": null, + "accessKey": null, + "sourceIp": "127.0.0.1", + "cognitoAuthenticationType": null, + "cognitoAuthenticationProvider": null, + "userArn": null, + "userAgent": "Custom User Agent String", + "user": null + }, + "path": "/prod/hello", + "resourcePath": "/hello", + "httpMethod": "POST", + "apiId": "1234567890", + "protocol": "HTTP/1.1" + } +} diff --git a/test/lambda/mongodb/Makefile b/test/lambda/mongodb/Makefile new file mode 100644 index 0000000000..3632dfb161 --- /dev/null +++ b/test/lambda/mongodb/Makefile @@ -0,0 +1,4 @@ + +build-MongoDBFunction: + cp -r . $(ARTIFACTS_DIR) + python -m pip install -t $(ARTIFACTS_DIR) dnspython diff --git a/test/lambda/mongodb/__init__.py b/test/lambda/mongodb/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/lambda/mongodb/app.py b/test/lambda/mongodb/app.py new file mode 100644 index 0000000000..274990d3bc --- /dev/null +++ b/test/lambda/mongodb/app.py @@ -0,0 +1,168 @@ +""" +Lambda function for Python Driver testing + +Creates the client that is cached for all requests, subscribes to +relevant events, and forces the connection pool to get populated. +""" +from __future__ import annotations + +import json +import os +import warnings + +from bson import has_c as has_bson_c +from pymongo import MongoClient +from pymongo import has_c as has_pymongo_c +from pymongo.monitoring import ( + CommandListener, + ConnectionPoolListener, + ServerHeartbeatListener, +) + +# Ensure there are no warnings raised in normal operation. +warnings.simplefilter("error") + +open_connections = 0 +heartbeat_count = 0 +streaming_heartbeat_count = 0 +total_heartbeat_duration = 0 +total_commands = 0 +total_command_duration = 0 + +# Ensure we are using C extensions +assert has_bson_c() +assert has_pymongo_c() + + +class CommandHandler(CommandListener): + def started(self, event): + print("command started", event) + + def succeeded(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command succeeded", event) + + def failed(self, event): + global total_commands, total_command_duration + total_commands += 1 + total_command_duration += event.duration_micros / 1e6 + print("command failed", event) + + +class ServerHeartbeatHandler(ServerHeartbeatListener): + def started(self, event): + print("server heartbeat started", event) + + def succeeded(self, event): + global heartbeat_count, total_heartbeat_duration, streaming_heartbeat_count + heartbeat_count += 1 + total_heartbeat_duration += event.duration + if event.awaited: + streaming_heartbeat_count += 1 + print("server heartbeat succeeded", event) + + def failed(self, event): + global heartbeat_count, total_heartbeat_duration + heartbeat_count += 1 + total_heartbeat_duration += event.duration + print("server heartbeat failed", event) + + +class ConnectionHandler(ConnectionPoolListener): + def connection_created(self, event): + global open_connections + open_connections += 1 + print("connection created") + + def connection_ready(self, event): + pass + + def connection_closed(self, event): + global open_connections + open_connections -= 1 + print("connection closed") + + def connection_check_out_started(self, event): + pass + + def connection_check_out_failed(self, event): + pass + + def connection_checked_out(self, event): + pass + + def connection_checked_in(self, event): + pass + + def pool_created(self, event): + pass + + def pool_ready(self, event): + pass + + def pool_cleared(self, event): + pass + + def pool_closed(self, event): + pass + + +listeners = [CommandHandler(), ServerHeartbeatHandler(), ConnectionHandler()] +print("Creating client") +client = MongoClient(os.environ["MONGODB_URI"], event_listeners=listeners) + + +# Populate the connection pool. +print("Connecting") +client.lambdaTest.list_collections() +print("Connected") + + +# Create the response to send back. +def create_response(): + return dict( + averageCommandDuration=total_command_duration / total_commands, + averageHeartbeatDuration=total_heartbeat_duration / heartbeat_count + if heartbeat_count + else 0, + openConnections=open_connections, + heartbeatCount=heartbeat_count, + ) + + +# Reset the numbers. +def reset(): + global \ + open_connections, \ + heartbeat_count, \ + total_heartbeat_duration, \ + total_commands, \ + total_command_duration + open_connections = 0 + heartbeat_count = 0 + total_heartbeat_duration = 0 + total_commands = 0 + total_command_duration = 0 + + +def lambda_handler(event, context): + """ + The handler function itself performs an insert/delete and returns the + id of the document in play. + """ + print("initializing") + db = client.lambdaTest + collection = db.test + result = collection.insert_one({"n": 1}) + collection.delete_one({"_id": result.inserted_id}) + # Create the response and then reset the numbers. + response = json.dumps(create_response()) + reset() + print("finished!") + assert ( + streaming_heartbeat_count == 0 + ), f"streaming_heartbeat_count was {streaming_heartbeat_count} not 0" + + return dict(statusCode=200, body=response) diff --git a/test/lambda/run.sh b/test/lambda/run.sh new file mode 100755 index 0000000000..5f1980a5f9 --- /dev/null +++ b/test/lambda/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -o errexit # Exit the script with error if any of the commands fail + +sam build +sam local invoke --docker-network host --parameter-overrides "MongoDbUri=mongodb://host.docker.internal:27017" diff --git a/test/lambda/template.yaml b/test/lambda/template.yaml new file mode 100644 index 0000000000..11052f88dd --- /dev/null +++ b/test/lambda/template.yaml @@ -0,0 +1,49 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + Python driver lambda function test + +# More info about Globals: https://github.com/awslabs/serverless-application-model/blob/master/docs/globals.rst +Globals: + Function: + Timeout: 30 + MemorySize: 128 + +Parameters: + MongoDbUri: + Type: String + Description: The MongoDB connection string. + +Resources: + MongoDBFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: mongodb/ + Environment: + Variables: + MONGODB_URI: !Ref MongoDbUri + Handler: app.lambda_handler + Runtime: python3.10 + Architectures: + - x86_64 + Events: + MongoDB: + Type: Api + Properties: + Path: /mongodb + Method: get + # Use a custom build method to make sure *.so files are copied. + # https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/building-custom-runtimes.html + Metadata: + BuildMethod: makefile + +Outputs: + MongoDBApi: + Description: "API Gateway endpoint URL for Prod stage for Python driver lambda function" + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/hello/" + MongoDBFunction: + Description: "Python driver lambda Function ARN" + Value: !GetAtt MongoDBFunction.Arn + MongoDBFunctionIamRole: + Description: "Implicit IAM Role created for Python driver lambda function" + Value: !GetAtt MongoDBFunctionRole.Arn diff --git a/test/load_balancer/cursors.json b/test/load_balancer/cursors.json index e66c46c0c3..27aaddd5b6 100644 --- a/test/load_balancer/cursors.json +++ b/test/load_balancer/cursors.json @@ -1,6 +1,6 @@ { "description": "cursors are correctly pinned to connections for load-balanced clusters", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -222,7 +222,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -239,7 +242,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -333,7 +339,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -475,7 +484,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -492,7 +504,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -605,7 +620,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -750,7 +768,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "firstBatch": { "$$type": "array" @@ -767,7 +788,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -858,7 +882,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -950,7 +977,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": { "$$type": "string" @@ -1100,7 +1130,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, diff --git a/test/load_balancer/sdam-error-handling.json b/test/load_balancer/sdam-error-handling.json index 8760b723fd..5892dcacd6 100644 --- a/test/load_balancer/sdam-error-handling.json +++ b/test/load_balancer/sdam-error-handling.json @@ -1,6 +1,6 @@ { "description": "state change errors are correctly handled", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -263,7 +263,7 @@ "description": "errors during the initial connection hello are ignored", "runOnRequirements": [ { - "minServerVersion": "4.9" + "minServerVersion": "4.4.7" } ], "operations": [ @@ -279,7 +279,6 @@ }, "data": { "failCommands": [ - "ismaster", "isMaster", "hello" ], diff --git a/test/load_balancer/transactions.json b/test/load_balancer/transactions.json index 8cf24f4ca4..ca9c145217 100644 --- a/test/load_balancer/transactions.json +++ b/test/load_balancer/transactions.json @@ -1,6 +1,6 @@ { "description": "transactions are correctly pinned to connections for load-balanced clusters", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "topologies": [ @@ -1616,6 +1616,50 @@ ] } ] + }, + { + "description": "pinned connection is released when session ended", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + }, + "session": "session0" + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "endSession", + "object": "session0" + } + ], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index 5afebbbdcb..db8b061b30 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json index 492d8a2f62..10b6f28786 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetNoPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json index 6602561c1d..38b9986500 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json index 16d9a673bd..586b47ccd2 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json index 54f318872f..15a62090e3 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json +++ b/test/max_staleness/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -17,7 +17,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -35,7 +35,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -48,7 +48,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json index 7956b8e516..7c036f725c 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 453dce6605..56fcb156bb 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json index b383f275dc..5a4b0c8226 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/Secondary.json +++ b/test/max_staleness/ReplicaSetNoPrimary/Secondary.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json index 7bce7d0aa4..19a948e928 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index 32c9ca770b..b4633d88f3 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json index fd84cd1193..ccb916f107 100644 --- a/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index 35eaa9d69d..00137cf69e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json index 18450beaed..9d1db2de65 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json index b9fb407f9e..b0636236cc 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json index b695e1caeb..76edfcb836 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 9b798d37da..aa936e3c67 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index 1fa7bb4dd0..c24752a7f1 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/test/max_staleness/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json index 198be4a681..d3a9535b09 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json index 3ae629c898..f91706e804 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json index 675df82631..4ed0b9ed2e 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Nearest_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json index 795b47a111..7945530e6a 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json index 5455708a70..b433d6a430 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 6670b54c89..e594af7832 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index 642fee1fb3..bc0953c657 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json index 502120dce6..2817cf9225 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json index 6978a1807b..7aa487a078 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/test/max_staleness/ReplicaSetWithPrimary/Secondary_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json index e1e4a7ffb7..fff5609fcc 100644 --- a/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/test/max_staleness/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Sharded/SmallMaxStaleness.json b/test/max_staleness/Sharded/SmallMaxStaleness.json index 91d89720d1..98e05be363 100644 --- a/test/max_staleness/Sharded/SmallMaxStaleness.json +++ b/test/max_staleness/Sharded/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Single/SmallMaxStaleness.json b/test/max_staleness/Single/SmallMaxStaleness.json index b8d2db24be..d948739855 100644 --- a/test/max_staleness/Single/SmallMaxStaleness.json +++ b/test/max_staleness/Single/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -27,7 +27,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -41,7 +41,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/test/max_staleness/Unknown/SmallMaxStaleness.json b/test/max_staleness/Unknown/SmallMaxStaleness.json index 8d69f46a1e..0e609bcf94 100644 --- a/test/max_staleness/Unknown/SmallMaxStaleness.json +++ b/test/max_staleness/Unknown/SmallMaxStaleness.json @@ -6,7 +6,7 @@ { "address": "a:27017", "type": "Unknown", - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, diff --git a/test/mockupdb/operations.py b/test/mockupdb/operations.py index 90d7f27c39..d4d8f53ff8 100644 --- a/test/mockupdb/operations.py +++ b/test/mockupdb/operations.py @@ -11,10 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from collections import namedtuple -from mockupdb import OpMsgReply, OpReply +try: + from mockupdb import OpMsgReply, OpReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False from pymongo import ReadPreference @@ -50,69 +56,71 @@ sharded cluster (PYTHON-868). """ -not_master_reply = OpMsgReply(ok=0, errmsg="not master") - -operations = [ - Operation( - "find_one", - lambda client: client.db.collection.find_one(), - reply={"cursor": {"id": 0, "firstBatch": []}}, - op_type="may-use-secondary", - not_master=not_master_reply, - ), - Operation( - "count_documents", - lambda client: client.db.collection.count_documents({}), - reply={"n": 1}, - op_type="may-use-secondary", - not_master=not_master_reply, - ), - Operation( - "estimated_document_count", - lambda client: client.db.collection.estimated_document_count(), - reply={"n": 1}, - op_type="may-use-secondary", - not_master=not_master_reply, - ), - Operation( - "aggregate", - lambda client: client.db.collection.aggregate([]), - reply={"cursor": {"id": 0, "firstBatch": []}}, - op_type="may-use-secondary", - not_master=not_master_reply, - ), - Operation( - "options", - lambda client: client.db.collection.options(), - reply={"cursor": {"id": 0, "firstBatch": []}}, - op_type="must-use-primary", - not_master=not_master_reply, - ), - Operation( - "command", - lambda client: client.db.command("foo"), - reply={"ok": 1}, - op_type="must-use-primary", # Ignores client's read preference. - not_master=not_master_reply, - ), - Operation( - "secondary command", - lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), - reply={"ok": 1}, - op_type="always-use-secondary", - not_master=OpReply(ok=0, errmsg="node is recovering"), - ), - Operation( - "listIndexes", - lambda client: client.db.collection.index_information(), - reply={"cursor": {"id": 0, "firstBatch": []}}, - op_type="must-use-primary", - not_master=not_master_reply, - ), -] - - -_ops_by_name = dict([(op.name, op) for op in operations]) +if _HAVE_MOCKUPDB: + not_master_reply = OpMsgReply(ok=0, errmsg="not master") + + operations = [ + Operation( + "find_one", + lambda client: client.db.collection.find_one(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "count_documents", + lambda client: client.db.collection.count_documents({}), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "estimated_document_count", + lambda client: client.db.collection.estimated_document_count(), + reply={"n": 1}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "aggregate", + lambda client: client.db.collection.aggregate([]), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="may-use-secondary", + not_master=not_master_reply, + ), + Operation( + "options", + lambda client: client.db.collection.options(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), + Operation( + "command", + lambda client: client.db.command("foo"), + reply={"ok": 1}, + op_type="must-use-primary", # Ignores client's read preference. + not_master=not_master_reply, + ), + Operation( + "secondary command", + lambda client: client.db.command("foo", read_preference=ReadPreference.SECONDARY), + reply={"ok": 1}, + op_type="always-use-secondary", + not_master=OpReply(ok=0, errmsg="node is recovering"), + ), + Operation( + "listIndexes", + lambda client: client.db.collection.index_information(), + reply={"cursor": {"id": 0, "firstBatch": []}}, + op_type="must-use-primary", + not_master=not_master_reply, + ), + ] +else: + operations = [] + +_ops_by_name = {op.name: op for op in operations} Upgrade = namedtuple("Upgrade", ["name", "function", "old", "new", "wire_version"]) diff --git a/test/mockupdb/test_auth_recovering_member.py b/test/mockupdb/test_auth_recovering_member.py old mode 100755 new mode 100644 index 33d33da24c..046d8d4b0f --- a/test/mockupdb/test_auth_recovering_member.py +++ b/test/mockupdb/test_auth_recovering_member.py @@ -11,16 +11,27 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import unittest +from test import PyMongoTestCase -from mockupdb import MockupDB +import pytest -from pymongo import MongoClient +try: + from mockupdb import MockupDB + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from pymongo import common from pymongo.errors import ServerSelectionTimeoutError +pytestmark = pytest.mark.mockupdb -class TestAuthRecoveringMember(unittest.TestCase): + +class TestAuthRecoveringMember(PyMongoTestCase): def test_auth_recovering_member(self): # Test that we don't attempt auth against a recovering RS member. server = MockupDB() @@ -28,7 +39,7 @@ def test_auth_recovering_member(self): "ismaster", { "minWireVersion": 2, - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, "ismaster": False, "secondary": False, "setName": "rs", @@ -38,12 +49,10 @@ def test_auth_recovering_member(self): server.run() self.addCleanup(server.stop) - client = MongoClient( + client = self.simple_client( server.uri, replicaSet="rs", serverSelectionTimeoutMS=100, socketTimeoutMS=100 ) - self.addCleanup(client.close) - # Should see there's no primary or secondary and raise selection timeout # error. If it raises AutoReconnect we know it actually tried the # server, and that's wrong. diff --git a/test/mockupdb/test_cluster_time.py b/test/mockupdb/test_cluster_time.py index cb06a129d2..42ca916971 100644 --- a/test/mockupdb/test_cluster_time.py +++ b/test/mockupdb/test_cluster_time.py @@ -13,26 +13,42 @@ # limitations under the License. """Test $clusterTime handling.""" +from __future__ import annotations import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -from mockupdb import MockupDB, going from bson import Timestamp from pymongo import DeleteMany, InsertOne, MongoClient, UpdateOne +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb -class TestClusterTime(unittest.TestCase): - def cluster_time_conversation(self, callback, replies): +class TestClusterTime(PyMongoTestCase): + def cluster_time_conversation( + self, callback, replies, max_wire_version=MIN_SUPPORTED_WIRE_VERSION + ): cluster_time = Timestamp(0, 0) server = MockupDB() - # First test all commands include $clusterTime with wire version 6. + # First test all commands include $clusterTime with max_wire_version. _ = server.autoresponds( "ismaster", { "minWireVersion": 0, - "maxWireVersion": 6, + "maxWireVersion": max_wire_version, "$clusterTime": {"clusterTime": cluster_time}, }, ) @@ -40,8 +56,7 @@ def cluster_time_conversation(self, callback, replies): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) with going(callback, client): for reply in replies: @@ -60,7 +75,7 @@ def callback(client): self.cluster_time_conversation(callback, [{"ok": 1}] * 2) def test_bulk(self): - def callback(client): + def callback(client: MongoClient[dict]) -> None: client.db.collection.bulk_write( [InsertOne({}), InsertOne({}), UpdateOne({}, {"$inc": {"x": 1}}), DeleteMany({})] ) @@ -98,7 +113,7 @@ def test_monitor(self): cluster_time = Timestamp(0, 0) reply = { "minWireVersion": 0, - "maxWireVersion": 6, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, "$clusterTime": {"clusterTime": cluster_time}, } @@ -106,54 +121,38 @@ def test_monitor(self): server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri, heartbeatFrequencyMS=500) - self.addCleanup(client.close) - - request = server.receives("ismaster") - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn("$clusterTime", request) - request.ok(reply) - - # Next exchange: client returns first clusterTime, we send the second. - request = server.receives("ismaster") - self.assertIn("$clusterTime", request) - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) - reply["$clusterTime"] = {"clusterTime": cluster_time} - request.reply(reply) - - # Third exchange: client returns second clusterTime. - request = server.receives("ismaster") - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - - # Return command error with a new clusterTime. - cluster_time = Timestamp(cluster_time.time, cluster_time.inc + 1) - error = { - "ok": 0, - "code": 211, - "errmsg": "Cache Reader No keys found for HMAC ...", - "$clusterTime": {"clusterTime": cluster_time}, - } - request.reply(error) + client = self.simple_client(server.uri, heartbeatFrequencyMS=500) - # PyMongo 3.11+ closes the monitoring connection on command errors. + for _ in range(3): + request = server.receives("ismaster") + # No $clusterTime in heartbeats or handshakes. + self.assertNotIn("$clusterTime", request) + request.ok(reply) + client.close() - # Fourth exchange: the Monitor closes the connection and runs the - # handshake on a new connection. - request = server.receives("ismaster") - # No $clusterTime in first ismaster, only in subsequent ones - self.assertNotIn("$clusterTime", request) + def test_collection_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.db.collection.bulk_write([InsertOne({}), InsertOne({})]) - # Reply without $clusterTime. - reply.pop("$clusterTime") - request.reply(reply) + self.cluster_time_conversation( + callback, + [{"ok": 0, "errmsg": "mock error"}], + ) - # Fifth exchange: the Monitor attempt uses the clusterTime from - # the previous isMaster error. - request = server.receives("ismaster") - self.assertEqual(request["$clusterTime"]["clusterTime"], cluster_time) - request.reply(reply) - client.close() + def test_client_bulk_error(self): + def callback(client: MongoClient[dict]) -> None: + with self.assertRaises(OperationFailure): + client.bulk_write( + [ + InsertOne({}, namespace="db.collection"), + InsertOne({}, namespace="db.collection"), + ] + ) + + self.cluster_time_conversation( + callback, [{"ok": 0, "errmsg": "mock error"}], max_wire_version=25 + ) if __name__ == "__main__": diff --git a/test/mockupdb/test_cursor.py b/test/mockupdb/test_cursor.py new file mode 100644 index 0000000000..e61f220d5b --- /dev/null +++ b/test/mockupdb/test_cursor.py @@ -0,0 +1,99 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test PyMongo cursor does not set exhaustAllowed automatically (PYTHON-4007).""" +from __future__ import annotations + +import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from bson.objectid import ObjectId +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb + + +class TestCursor(PyMongoTestCase): + def test_getmore_load_balanced(self): + server = MockupDB() + server.autoresponds( + "hello", + isWritablePrimary=True, + msg="isdbgrid", + minWireVersion=0, + maxWireVersion=20, + helloOk=True, + serviceId=ObjectId(), + ) + server.run() + self.addCleanup(server.stop) + + client = self.simple_client(server.uri, loadBalanced=True) + self.addCleanup(client.close) + collection = client.db.coll + cursor = collection.find() + with going(next, cursor): + request = server.receives(OpMsg({"find": "coll"})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + # Respond with a different namespace. + request.reply({"cursor": {"id": 123, "firstBatch": [{}]}}) + + # 3 batches, check exhaustAllowed on all getMores. + for i in range(1, 3): + with going(next, cursor): + request = server.receives(OpMsg({"getMore": 123})) + self.assertEqual(request.flags, 0, "exhaustAllowed should not be set") + cursor_id = 123 if i < 2 else 0 + request.replies({"cursor": {"id": cursor_id, "nextBatch": [{}]}}) + + +class TestRetryableErrorCodeCatch(PyMongoTestCase): + def _test_fail_on_operation_failure_with_code(self, code): + """Test reads on error codes that should not be retried""" + server = MockupDB() + server.run() + self.addCleanup(server.stop) + server.autoresponds("ismaster", maxWireVersion=MIN_SUPPORTED_WIRE_VERSION) + + client = self.simple_client(server.uri) + + with going(lambda: server.receives(OpMsg({"find": "collection"})).command_err(code=code)): + cursor = client.db.collection.find() + with self.assertRaises(OperationFailure) as ctx: + cursor.next() + self.assertEqual(ctx.exception.code, code) + + def test_fail_on_operation_failure_none(self): + self._test_fail_on_operation_failure_with_code(None) + + def test_fail_on_operation_failure_zero(self): + self._test_fail_on_operation_failure_with_code(0) + + def test_fail_on_operation_failure_one(self): + self._test_fail_on_operation_failure_with_code(1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mockupdb/test_cursor_namespace.py b/test/mockupdb/test_cursor_namespace.py index 10788ac0f9..7538540bda 100644 --- a/test/mockupdb/test_cursor_namespace.py +++ b/test/mockupdb/test_cursor_namespace.py @@ -13,23 +13,36 @@ # limitations under the License. """Test list_indexes with more than one batch.""" +from __future__ import annotations import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -from mockupdb import MockupDB, going from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb -class TestCursorNamespace(unittest.TestCase): +class TestCursorNamespace(PyMongoTestCase): server: MockupDB client: MongoClient @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": 8}) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): @@ -77,15 +90,15 @@ def op(): self._test_cursor_namespace(op, "listIndexes") -class TestKillCursorsNamespace(unittest.TestCase): +class TestKillCursorsNamespace(PyMongoTestCase): server: MockupDB client: MongoClient @classmethod def setUpClass(cls): - cls.server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + cls.server = MockupDB(auto_ismaster={"maxWireVersion": MIN_SUPPORTED_WIRE_VERSION}) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): diff --git a/test/mockupdb/test_getmore_sharded.py b/test/mockupdb/test_getmore_sharded.py index 5f5400ab07..d24c8aa10a 100644 --- a/test/mockupdb/test_getmore_sharded.py +++ b/test/mockupdb/test_getmore_sharded.py @@ -13,15 +13,28 @@ # limitations under the License. """Test PyMongo cursor with a sharded cluster.""" +from __future__ import annotations + import unittest from queue import Queue +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + -from mockupdb import MockupDB, going +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION -from pymongo import MongoClient +pytestmark = pytest.mark.mockupdb -class TestGetmoreSharded(unittest.TestCase): +class TestGetmoreSharded(PyMongoTestCase): def test_getmore_sharded(self): servers = [MockupDB(), MockupDB()] @@ -30,16 +43,19 @@ def test_getmore_sharded(self): for server in servers: server.subscribe(q.put) server.autoresponds( - "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) server.run() self.addCleanup(server.stop) - client = MongoClient( + client = self.simple_client( "mongodb://%s:%d,%s:%d" % (servers[0].host, servers[0].port, servers[1].host, servers[1].port) ) - self.addCleanup(client.close) collection = client.db.collection cursor = collection.find() with going(next, cursor): diff --git a/test/mockupdb/test_handshake.py b/test/mockupdb/test_handshake.py index 39188e8ad0..c2c978c4ad 100644 --- a/test/mockupdb/test_handshake.py +++ b/test/mockupdb/test_handshake.py @@ -11,59 +11,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import unittest -from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go +import pytest + +try: + from mockupdb import Command, MockupDB, OpMsg, OpMsgReply, OpQuery, OpReply, absent, go + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + from bson.objectid import ObjectId -from pymongo import MongoClient +from pymongo import MongoClient, has_c from pymongo import version as pymongo_version +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.errors import OperationFailure from pymongo.server_api import ServerApi, ServerApiVersion - -def test_hello_with_option(self, protocol, **kwargs): - hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" - # `db.command("hello"|"ismaster")` commands are the same for primaries and - # secondaries, so we only need one server. - primary = MockupDB() - # Set up a custom handler to save the first request from the driver. - self.handshake_req = None - - def respond(r): - # Only save the very first request from the driver. - if self.handshake_req is None: - self.handshake_req = r - load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} - return r.reply( - OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) - ) - - primary.autoresponds(respond) - primary.run() - self.addCleanup(primary.stop) - - # We need a special dict because MongoClient uses "server_api" and all - # of the commands use "apiVersion". - k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} - client = MongoClient( - "mongodb://" + primary.address_string, - appname="my app", # For _check_handshake_data() - **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]) # type: ignore[arg-type] - ) - - self.addCleanup(client.close) - - # We have an autoresponder luckily, so no need for `go()`. - assert client.db.command(hello) - - # We do this checking here rather than in the autoresponder `respond()` - # because it runs in another Python thread so there are some funky things - # with error handling within that thread, and we want to be able to use - # self.assertRaises(). - self.handshake_req.assert_matches(protocol(hello, **kwargs)) - _check_handshake_data(self.handshake_req) +pytestmark = pytest.mark.mockupdb def _check_handshake_data(request): @@ -71,7 +40,11 @@ def _check_handshake_data(request): data = request["client"] assert data["application"] == {"name": "my app"} - assert data["driver"] == {"name": "PyMongo", "version": pymongo_version} + if has_c(): + name = "PyMongo|c" + else: + name = "PyMongo" + assert data["driver"] == {"name": name, "version": pymongo_version} # Keep it simple, just check these fields exist. assert "os" in data @@ -79,6 +52,48 @@ def _check_handshake_data(request): class TestHandshake(unittest.TestCase): + def hello_with_option_helper(self, protocol, **kwargs): + hello = "ismaster" if isinstance(protocol(), OpQuery) else "hello" + # `db.command("hello"|"ismaster")` commands are the same for primaries and + # secondaries, so we only need one server. + primary = MockupDB() + # Set up a custom handler to save the first request from the driver. + self.handshake_req = None + + def respond(r): + # Only save the very first request from the driver. + if self.handshake_req is None: + self.handshake_req = r + load_balanced_kwargs = {"serviceId": ObjectId()} if kwargs.get("loadBalanced") else {} + return r.reply( + OpMsgReply(minWireVersion=0, maxWireVersion=13, **kwargs, **load_balanced_kwargs) + ) + + primary.autoresponds(respond) + primary.run() + self.addCleanup(primary.stop) + + # We need a special dict because MongoClient uses "server_api" and all + # of the commands use "apiVersion". + k_map = {("apiVersion", "1"): ("server_api", ServerApi(ServerApiVersion.V1))} + client = MongoClient( + "mongodb://" + primary.address_string, + appname="my app", # For _check_handshake_data() + **dict([k_map.get((k, v), (k, v)) for k, v in kwargs.items()]), # type: ignore[arg-type] + ) + + self.addCleanup(client.close) + + # We have an autoresponder luckily, so no need for `go()`. + assert client.db.command(hello) + + # We do this checking here rather than in the autoresponder `respond()` + # because it runs in another Python thread so there are some funky things + # with error handling within that thread, and we want to be able to use + # self.assertRaises(). + self.handshake_req.assert_matches(protocol(hello, **kwargs)) + _check_handshake_data(self.handshake_req) + def test_client_handshake_data(self): primary, secondary = MockupDB(), MockupDB() for server in primary, secondary: @@ -87,7 +102,12 @@ def test_client_handshake_data(self): hosts = [server.address_string for server in (primary, secondary)] primary_response = OpReply( - "ismaster", True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + "ismaster", + True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) error_response = OpReply(0, errmsg="Cache Reader No keys found for HMAC ...", code=211) @@ -98,7 +118,7 @@ def test_client_handshake_data(self): hosts=hosts, secondary=True, minWireVersion=2, - maxWireVersion=6, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) client = MongoClient( @@ -107,7 +127,7 @@ def test_client_handshake_data(self): self.addCleanup(client.close) - # New monitoring sockets send data during handshake. + # New monitoring connections send data during handshake. heartbeat = primary.receives("ismaster") _check_handshake_data(heartbeat) heartbeat.ok(primary_response) @@ -165,12 +185,14 @@ def test_client_handshake_saslSupportedMechs(self): server.run() self.addCleanup(server.stop) - primary_response = OpReply("ismaster", True, minWireVersion=2, maxWireVersion=6) + primary_response = OpReply( + "ismaster", True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) client = MongoClient(server.uri, username="username", password="password") self.addCleanup(client.close) - # New monitoring sockets send data during handshake. + # New monitoring connections send data during handshake. heartbeat = server.receives("ismaster") heartbeat.ok(primary_response) @@ -196,37 +218,44 @@ def test_client_handshake_saslSupportedMechs(self): request.ok( "ismaster", True, - saslSupportedMechs=["SCRAM-SHA-256"], + # Unsupported auth mech should be ignored. + saslSupportedMechs=["SCRAM-SHA-256", "does_not_exist"], speculativeAuthenticate=auth, minWireVersion=2, - maxWireVersion=6, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) # Authentication should immediately fail with: # OperationFailure: Server returned an invalid nonce. - with self.assertRaises(OperationFailure): + with self.assertRaises(OperationFailure) as cm: future() + self.assertEqual(str(cm.exception), "Server returned an invalid nonce.") return def test_handshake_load_balanced(self): - test_hello_with_option(self, OpMsg, loadBalanced=True) + self.hello_with_option_helper(OpMsg, loadBalanced=True) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, loadBalanced=True) + self.hello_with_option_helper(Command, loadBalanced=True) def test_handshake_versioned_api(self): - test_hello_with_option(self, OpMsg, apiVersion="1") + self.hello_with_option_helper(OpMsg, apiVersion="1") with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, Command, apiVersion="1") + self.hello_with_option_helper(Command, apiVersion="1") def test_handshake_not_either(self): # If we don't specify either option then it should be using # OP_QUERY for the initial step of the handshake. - test_hello_with_option(self, Command) + self.hello_with_option_helper(Command) with self.assertRaisesRegex(AssertionError, "does not match"): - test_hello_with_option(self, OpMsg) + self.hello_with_option_helper(OpMsg) def test_handshake_max_wire(self): server = MockupDB() - primary_response = {"hello": 1, "ok": 1, "minWireVersion": 0, "maxWireVersion": 6} + primary_response = { + "hello": 1, + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, + } self.found_auth_msg = False def responder(request): @@ -237,16 +266,15 @@ def responder(request): request.reply( OpMsgReply( **primary_response, - **{ - "payload": b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" - b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" - b"tXdF9r," - b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" - b"PHrSmh4uhkg==,i=15000", - "saslSupportedMechs": ["SCRAM-SHA-1"], - } + payload=b"r=wPleNM8S5p8gMaffMDF7Py4ru9bnmmoqb0" + b"1WNPsil6o=pAvr6B1garhlwc6MKNQ93ZfFky" + b"tXdF9r," + b"s=4dcxugMJq2P4hQaDbGXZR8uR3ei" + b"PHrSmh4uhkg==,i=15000", + saslSupportedMechs=["SCRAM-SHA-1"], ) ) + return None else: return request.reply(**primary_response) diff --git a/test/mockupdb/test_initial_ismaster.py b/test/mockupdb/test_initial_ismaster.py index 155ae6152e..a52930c742 100644 --- a/test/mockupdb/test_initial_ismaster.py +++ b/test/mockupdb/test_initial_ismaster.py @@ -11,32 +11,47 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import time import unittest +from test import PyMongoTestCase -from mockupdb import MockupDB, wait_until +import pytest -from pymongo import MongoClient +try: + from mockupdb import MockupDB, wait_until + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -class TestInitialIsMaster(unittest.TestCase): + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestInitialIsMaster(PyMongoTestCase): def test_initial_ismaster(self): server = MockupDB() server.run() self.addCleanup(server.stop) start = time.time() - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) # A single ismaster is enough for the client to be connected. self.assertFalse(client.nodes) - server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + server.receives("ismaster").ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) wait_until(lambda: client.nodes, "update nodes", timeout=1) # At least 10 seconds before next heartbeat. - server.receives("ismaster").ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + server.receives("ismaster").ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) self.assertGreaterEqual(time.time() - start, 10) diff --git a/test/mockupdb/test_list_indexes.py b/test/mockupdb/test_list_indexes.py index 20764e6e5a..71cad43aa2 100644 --- a/test/mockupdb/test_list_indexes.py +++ b/test/mockupdb/test_list_indexes.py @@ -13,22 +13,33 @@ # limitations under the License. """Test list_indexes with more than one batch.""" +from __future__ import annotations import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -from mockupdb import MockupDB, going from bson import SON -from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb -class TestListIndexes(unittest.TestCase): +class TestListIndexes(PyMongoTestCase): def test_list_indexes_command(self): - server = MockupDB(auto_ismaster={"maxWireVersion": 6}) + server = MockupDB(auto_ismaster={"maxWireVersion": MIN_SUPPORTED_WIRE_VERSION}) server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) with going(client.test.collection.list_indexes) as cursor: request = server.receives(listIndexes="collection", namespace="test") request.reply({"cursor": {"firstBatch": [{"name": "index_0"}], "id": 123}}) diff --git a/test/mockupdb/test_max_staleness.py b/test/mockupdb/test_max_staleness.py index 02efb6a718..7168bd2954 100644 --- a/test/mockupdb/test_max_staleness.py +++ b/test/mockupdb/test_max_staleness.py @@ -11,26 +11,39 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import unittest +from test import PyMongoTestCase -from mockupdb import MockupDB, going +import pytest -from pymongo import MongoClient +try: + from mockupdb import MockupDB, going + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -class TestMaxStalenessMongos(unittest.TestCase): + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb + + +class TestMaxStalenessMongos(PyMongoTestCase): def test_mongos(self): mongos = MockupDB() - mongos.autoresponds("ismaster", maxWireVersion=6, ismaster=True, msg="isdbgrid") + mongos.autoresponds( + "ismaster", maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ismaster=True, msg="isdbgrid" + ) mongos.run() self.addCleanup(mongos.stop) # No maxStalenessSeconds. uri = "mongodb://localhost:%d/?readPreference=secondary" % mongos.port - client = MongoClient(uri) - self.addCleanup(client.close) + client = self.simple_client(uri) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertNotIn("maxStalenessSeconds", request.doc["$readPreference"]) @@ -49,8 +62,7 @@ def test_mongos(self): "&maxStalenessSeconds=1" % mongos.port ) - client = MongoClient(uri) - self.addCleanup(client.close) + client = self.simple_client(uri) with going(client.db.coll.find_one) as future: request = mongos.receives() self.assertEqual(1, request.doc["$readPreference"]["maxStalenessSeconds"]) diff --git a/test/mockupdb/test_mixed_version_sharded.py b/test/mockupdb/test_mixed_version_sharded.py index d5fb9913cc..adbe61204b 100644 --- a/test/mockupdb/test_mixed_version_sharded.py +++ b/test/mockupdb/test_mixed_version_sharded.py @@ -13,18 +13,28 @@ # limitations under the License. """Test PyMongo with a mixed-version cluster.""" +from __future__ import annotations import time import unittest from queue import Queue +from test import PyMongoTestCase -from mockupdb import MockupDB, go -from operations import upgrades +import pytest -from pymongo import MongoClient +try: + from mockupdb import MockupDB, go + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -class TestMixedVersionSharded(unittest.TestCase): +from operations import upgrades # type: ignore[import] + +pytestmark = pytest.mark.mockupdb + + +class TestMixedVersionSharded(PyMongoTestCase): def setup_server(self, upgrade): self.mongos_old, self.mongos_new = MockupDB(), MockupDB() @@ -46,12 +56,12 @@ def setup_server(self, upgrade): "ismaster", ismaster=True, msg="isdbgrid", maxWireVersion=upgrade.wire_version ) - self.mongoses_uri = "mongodb://%s,%s" % ( + self.mongoses_uri = "mongodb://{},{}".format( self.mongos_old.address_string, self.mongos_new.address_string, ) - self.client = MongoClient(self.mongoses_uri) + self.client = self.simple_client(self.mongoses_uri) def tearDown(self): if hasattr(self, "client") and self.client: diff --git a/test/mockupdb/test_mongos_command_read_mode.py b/test/mockupdb/test_mongos_command_read_mode.py index b7f8532e38..61744e184d 100644 --- a/test/mockupdb/test_mongos_command_read_mode.py +++ b/test/mockupdb/test_mongos_command_read_mode.py @@ -11,26 +11,42 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import itertools import unittest -from mockupdb import MockupDB, OpMsg, going -from operations import operations +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] from pymongo import MongoClient, ReadPreference +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.read_preferences import ( _MONGOS_MODES, make_read_preference, read_pref_mode_from_name, ) +pytestmark = pytest.mark.mockupdb + class TestMongosCommandReadMode(unittest.TestCase): def test_aggregate(self): server = MockupDB() server.autoresponds( - "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) self.addCleanup(server.stop) server.run() @@ -46,7 +62,6 @@ def test_aggregate(self): secondary_collection = collection.with_options(read_preference=ReadPreference.SECONDARY) with going(secondary_collection.aggregate, []): - command = server.receives( OpMsg( { @@ -66,7 +81,11 @@ def test(self): self.addCleanup(server.stop) server.run() server.autoresponds( - "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) @@ -110,7 +129,7 @@ def generate_mongos_read_mode_tests(): # Skip something like command('foo', read_preference=SECONDARY). continue test = create_mongos_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestMongosCommandReadMode, test_name, test) diff --git a/test/mockupdb/test_network_disconnect_primary.py b/test/mockupdb/test_network_disconnect_primary.py old mode 100755 new mode 100644 index ea13a3b042..b5ccd5276f --- a/test/mockupdb/test_network_disconnect_primary.py +++ b/test/mockupdb/test_network_disconnect_primary.py @@ -11,29 +11,45 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import unittest -from mockupdb import Future, MockupDB, OpReply, going, wait_until +import pytest + +try: + from mockupdb import Future, MockupDB, OpReply, going, wait_until + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.errors import ConnectionFailure from pymongo.topology_description import TOPOLOGY_TYPE +pytestmark = pytest.mark.mockupdb + class TestNetworkDisconnectPrimary(unittest.TestCase): def test_network_disconnect_primary(self): # Application operation fails against primary. Test that topology # type changes from ReplicaSetWithPrimary to ReplicaSetNoPrimary. # http://bit.ly/1B5ttuL - primary, secondary = servers = [MockupDB() for _ in range(2)] - for server in servers: + primary, secondary = MockupDB(), MockupDB() + for server in primary, secondary: server.run() self.addCleanup(server.stop) - hosts = [server.address_string for server in servers] + hosts = [server.address_string for server in (primary, secondary)] primary_response = OpReply( - ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + ismaster=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) primary.autoresponds("ismaster", primary_response) secondary.autoresponds( @@ -43,7 +59,7 @@ def test_network_disconnect_primary(self): setName="rs", hosts=hosts, minWireVersion=2, - maxWireVersion=6, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) client = MongoClient(primary.uri, replicaSet="rs") diff --git a/test/mockupdb/test_op_msg.py b/test/mockupdb/test_op_msg.py old mode 100755 new mode 100644 index da7ff3d33e..4b85c5a48a --- a/test/mockupdb/test_op_msg.py +++ b/test/mockupdb/test_op_msg.py @@ -11,253 +11,270 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import unittest from collections import namedtuple +from test import PyMongoTestCase -from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going +import pytest + +try: + from mockupdb import OP_MSG_FLAGS, MockupDB, OpMsg, OpMsgReply, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False from pymongo import MongoClient, WriteConcern -from pymongo.cursor import CursorType +from pymongo.cursor_shared import CursorType from pymongo.operations import DeleteOne, InsertOne, UpdateOne +pytestmark = pytest.mark.mockupdb + Operation = namedtuple("Operation", ["name", "function", "request", "reply"]) -operations = [ - Operation( - "find_one", - lambda coll: coll.find_one({}), - request=OpMsg({"find": "coll"}, flags=0), - reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, - ), - Operation( - "aggregate", - lambda coll: coll.aggregate([]), - request=OpMsg({"aggregate": "coll"}, flags=0), - reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, - ), - Operation( - "insert_one", - lambda coll: coll.insert_one({}), - request=OpMsg({"insert": "coll"}, flags=0), - reply={"ok": 1, "n": 1}, - ), - Operation( - "insert_one-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "insert_many", - lambda coll: coll.insert_many([{}, {}, {}]), - request=OpMsg({"insert": "coll"}, flags=0), - reply={"ok": 1, "n": 3}, - ), - Operation( - "insert_many-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many([{}, {}, {}]), - request=OpMsg({"insert": "coll"}, flags=0), - reply={"ok": 1, "n": 3}, - ), - Operation( - "insert_many-w0-unordered", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( - [{}, {}, {}], ordered=False +if _HAVE_MOCKUPDB: + operations = [ + Operation( + "find_one", + lambda coll: coll.find_one({}), + request=OpMsg({"find": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, ), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "replace_one", - lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), - request=OpMsg({"update": "coll"}, flags=0), - reply={"ok": 1, "n": 1, "nModified": 1}, - ), - Operation( - "replace_one-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( - {"_id": 1}, {"new": 1} + Operation( + "aggregate", + lambda coll: coll.aggregate([]), + request=OpMsg({"aggregate": "coll"}, flags=0), + reply={"ok": 1, "cursor": {"firstBatch": [], "id": 0}}, ), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "update_one", - lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=0), - reply={"ok": 1, "n": 1, "nModified": 1}, - ), - Operation( - "replace_one-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( - {"_id": 1}, {"$set": {"new": 1}} + Operation( + "insert_one", + lambda coll: coll.insert_one({}), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, ), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "update_many", - lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), - request=OpMsg({"update": "coll"}, flags=0), - reply={"ok": 1, "n": 1, "nModified": 1}, - ), - Operation( - "update_many-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( - {"_id": 1}, {"$set": {"new": 1}} + Operation( + "insert_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_one({}), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, ), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "delete_one", - lambda coll: coll.delete_one({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=0), - reply={"ok": 1, "n": 1}, - ), - Operation( - "delete_one-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "delete_many", - lambda coll: coll.delete_many({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=0), - reply={"ok": 1, "n": 1}, - ), - Operation( - "delete_many-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - # Legacy methods - Operation( - "bulk_write_insert", - lambda coll: coll.bulk_write([InsertOne({}), InsertOne({})]), - request=OpMsg({"insert": "coll"}, flags=0), - reply={"ok": 1, "n": 2}, - ), - Operation( - "bulk_write_insert-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})] + Operation( + "insert_many", + lambda coll: coll.insert_many([{}, {}, {}]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, ), - request=OpMsg({"insert": "coll"}, flags=0), - reply={"ok": 1, "n": 2}, - ), - Operation( - "bulk_write_insert-w0-unordered", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [InsertOne({}), InsertOne({})], ordered=False + Operation( + "insert_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}] + ), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 3}, ), - request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "bulk_write_update", - lambda coll: coll.bulk_write( - [ - UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}}), - ] + Operation( + "insert_many-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).insert_many( + [{}, {}, {}], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, ), - request=OpMsg({"update": "coll"}, flags=0), - reply={"ok": 1, "n": 2, "nModified": 2}, - ), - Operation( - "bulk_write_update-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [ - UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}}), - ] + Operation( + "replace_one", + lambda coll: coll.replace_one({"_id": 1}, {"new": 1}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, ), - request=OpMsg({"update": "coll"}, flags=0), - reply={"ok": 1, "n": 2, "nModified": 2}, - ), - Operation( - "bulk_write_update-w0-unordered", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [ - UpdateOne({"_id": 1}, {"$set": {"new": 1}}), - UpdateOne({"_id": 2}, {"$set": {"new": 1}}), - ], - ordered=False, + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).replace_one( + {"_id": 1}, {"new": 1} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_one", + lambda coll: coll.update_one({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "replace_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_one( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "update_many", + lambda coll: coll.update_many({"_id": 1}, {"$set": {"new": 1}}), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 1, "nModified": 1}, + ), + Operation( + "update_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).update_many( + {"_id": 1}, {"$set": {"new": 1}} + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_one", + lambda coll: coll.delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_one-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_one({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "delete_many", + lambda coll: coll.delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 1}, + ), + Operation( + "delete_many-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).delete_many({"a": 1}), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + # Legacy methods + Operation( + "bulk_write_insert", + lambda coll: coll.bulk_write([InsertOne[dict]({}), InsertOne[dict]({})]), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_insert-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})] + ), + request=OpMsg({"insert": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, ), - request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), - Operation( - "bulk_write_delete", - lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), - request=OpMsg({"delete": "coll"}, flags=0), - reply={"ok": 1, "n": 2}, - ), - Operation( - "bulk_write_delete-w0", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + Operation( + "bulk_write_insert-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [InsertOne[dict]({}), InsertOne[dict]({})], ordered=False + ), + request=OpMsg({"insert": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, ), - request=OpMsg({"delete": "coll"}, flags=0), - reply={"ok": 1, "n": 2}, - ), - Operation( - "bulk_write_delete-w0-unordered", - lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( - [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + Operation( + "bulk_write_update", + lambda coll: coll.bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, ), - request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), - reply=None, - ), -] + Operation( + "bulk_write_update-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ] + ), + request=OpMsg({"update": "coll"}, flags=0), + reply={"ok": 1, "n": 2, "nModified": 2}, + ), + Operation( + "bulk_write_update-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [ + UpdateOne({"_id": 1}, {"$set": {"new": 1}}), + UpdateOne({"_id": 2}, {"$set": {"new": 1}}), + ], + ordered=False, + ), + request=OpMsg({"update": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + Operation( + "bulk_write_delete", + lambda coll: coll.bulk_write([DeleteOne({"_id": 1}), DeleteOne({"_id": 2})]), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})] + ), + request=OpMsg({"delete": "coll"}, flags=0), + reply={"ok": 1, "n": 2}, + ), + Operation( + "bulk_write_delete-w0-unordered", + lambda coll: coll.with_options(write_concern=WriteConcern(w=0)).bulk_write( + [DeleteOne({"_id": 1}), DeleteOne({"_id": 2})], ordered=False + ), + request=OpMsg({"delete": "coll"}, flags=OP_MSG_FLAGS["moreToCome"]), + reply=None, + ), + ] -operations_312 = [ - Operation( - "find_raw_batches", - lambda coll: list(coll.find_raw_batches({})), - request=[ - OpMsg({"find": "coll"}, flags=0), - OpMsg({"getMore": 7}, flags=0), - ], - reply=[ - {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, - {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, - ], - ), - Operation( - "aggregate_raw_batches", - lambda coll: list(coll.aggregate_raw_batches([])), - request=[ - OpMsg({"aggregate": "coll"}, flags=0), - OpMsg({"getMore": 7}, flags=0), - ], - reply=[ - {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, - {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, - ], - ), - Operation( - "find_exhaust_cursor", - lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), - request=[ - OpMsg({"find": "coll"}, flags=0), - OpMsg({"getMore": 7}, flags=1 << 16), - ], - reply=[ - OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), - OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), - OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), - OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), - ], - ), -] + operations_312 = [ + Operation( + "find_raw_batches", + lambda coll: list(coll.find_raw_batches({})), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "aggregate_raw_batches", + lambda coll: list(coll.aggregate_raw_batches([])), + request=[ + OpMsg({"aggregate": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=0), + ], + reply=[ + {"ok": 1, "cursor": {"firstBatch": [], "id": 7}}, + {"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, + ], + ), + Operation( + "find_exhaust_cursor", + lambda coll: list(coll.find({}, cursor_type=CursorType.EXHAUST)), + request=[ + OpMsg({"find": "coll"}, flags=0), + OpMsg({"getMore": 7}, flags=1 << 16), + ], + reply=[ + OpMsgReply({"ok": 1, "cursor": {"firstBatch": [{}], "id": 7}}, flags=0), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 7}}, flags=2), + OpMsgReply({"ok": 1, "cursor": {"nextBatch": [{}], "id": 0}}, flags=0), + ], + ), + ] +else: + operations = [] + operations_312 = [] -class TestOpMsg(unittest.TestCase): +class TestOpMsg(PyMongoTestCase): server: MockupDB client: MongoClient @@ -265,7 +282,7 @@ class TestOpMsg(unittest.TestCase): def setUpClass(cls): cls.server = MockupDB(auto_ismaster=True, max_wire_version=8) cls.server.run() - cls.client = MongoClient(cls.server.uri) + cls.client = cls.unmanaged_simple_client(cls.server.uri) @classmethod def tearDownClass(cls): @@ -304,7 +321,7 @@ def test(self): def create_tests(ops): for op in ops: - test_name = "test_op_msg_%s" % (op.name,) + test_name = f"test_op_msg_{op.name}" setattr(TestOpMsg, test_name, operation_test(op)) diff --git a/test/mockupdb/test_op_msg_read_preference.py b/test/mockupdb/test_op_msg_read_preference.py index b8d1348b97..4cf82c760e 100644 --- a/test/mockupdb/test_op_msg_read_preference.py +++ b/test/mockupdb/test_op_msg_read_preference.py @@ -11,51 +11,63 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import copy import itertools import unittest +from test import PyMongoTestCase from typing import Any -from mockupdb import CommandBase, MockupDB, going -from operations import operations +import pytest -from pymongo import MongoClient, ReadPreference +try: + from mockupdb import CommandBase, MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] + +from pymongo import ReadPreference +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.read_preferences import ( _MONGOS_MODES, make_read_preference, read_pref_mode_from_name, ) +pytestmark = pytest.mark.mockupdb + -class OpMsgReadPrefBase(unittest.TestCase): +class OpMsgReadPrefBase(PyMongoTestCase): single_mongod = False primary: MockupDB secondary: MockupDB @classmethod def setUpClass(cls): - super(OpMsgReadPrefBase, cls).setUpClass() + super().setUpClass() @classmethod def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, read_preference=read_preference) - self.addCleanup(client.close) + client = self.simple_client(self.primary.uri, read_preference=read_preference) return client class TestOpMsgMongos(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgMongos, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "msg": "isdbgrid", # Mongos. "minWireVersion": 2, - "maxWireVersion": 6, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -64,13 +76,13 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgMongos, cls).tearDownClass() + super().tearDownClass() class TestOpMsgReplicaSet(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgReplicaSet, cls).setUpClass() + super().setUpClass() cls.primary, cls.secondary = MockupDB(), MockupDB() for server in cls.primary, cls.secondary: server.run() @@ -82,7 +94,7 @@ def setUpClass(cls): "setName": "rs", "hosts": hosts, "minWireVersion": 2, - "maxWireVersion": 6, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, } cls.primary.autoresponds(CommandBase("ismaster"), primary_ismaster) secondary_ismaster = copy.copy(primary_ismaster) @@ -94,7 +106,7 @@ def setUpClass(cls): def tearDownClass(cls): for server in cls.primary, cls.secondary: server.stop() - super(TestOpMsgReplicaSet, cls).tearDownClass() + super().tearDownClass() @classmethod def add_test(cls, mode, test_name, test): @@ -104,12 +116,13 @@ def add_test(cls, mode, test_name, test): setattr(cls, test_name, test) def setup_client(self, read_preference): - client = MongoClient(self.primary.uri, replicaSet="rs", read_preference=read_preference) + client = self.simple_client( + self.primary.uri, replicaSet="rs", read_preference=read_preference + ) # Run a command on a secondary to discover the topology. This ensures # that secondaryPreferred commands will select the secondary. client.admin.command("ismaster", read_preference=ReadPreference.SECONDARY) - self.addCleanup(client.close) return client @@ -118,11 +131,11 @@ class TestOpMsgSingle(OpMsgReadPrefBase): @classmethod def setUpClass(cls): - super(TestOpMsgSingle, cls).setUpClass() + super().setUpClass() auto_ismaster = { "ismaster": True, "minWireVersion": 2, - "maxWireVersion": 6, + "maxWireVersion": MIN_SUPPORTED_WIRE_VERSION, } cls.primary = MockupDB(auto_ismaster=auto_ismaster) cls.primary.run() @@ -131,7 +144,7 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): cls.primary.stop() - super(TestOpMsgSingle, cls).tearDownClass() + super().tearDownClass() def create_op_msg_read_mode_test(mode, operation): @@ -181,7 +194,7 @@ def generate_op_msg_read_mode_tests(): for entry in matrix: mode, operation = entry test = create_op_msg_read_mode_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name for cls in TestOpMsgMongos, TestOpMsgReplicaSet, TestOpMsgSingle: cls.add_test(mode, test_name, test) diff --git a/test/mockupdb/test_query_read_pref_sharded.py b/test/mockupdb/test_query_read_pref_sharded.py index 7ad4f2afc8..2fae46be76 100644 --- a/test/mockupdb/test_query_read_pref_sharded.py +++ b/test/mockupdb/test_query_read_pref_sharded.py @@ -13,13 +13,23 @@ # limitations under the License. """Test PyMongo query and read preference with a sharded cluster.""" +from __future__ import annotations import unittest +from test import PyMongoTestCase + +import pytest + +try: + from mockupdb import MockupDB, OpMsg, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False -from mockupdb import MockupDB, OpMsg, going from bson import SON -from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.read_preferences import ( Nearest, Primary, @@ -28,19 +38,24 @@ SecondaryPreferred, ) +pytestmark = pytest.mark.mockupdb + -class TestQueryAndReadModeSharded(unittest.TestCase): +class TestQueryAndReadModeSharded(PyMongoTestCase): def test_query_and_read_mode_sharded_op_msg(self): """Test OP_MSG sends non-primary $readPreference and never $query.""" server = MockupDB() server.autoresponds( - "ismaster", ismaster=True, msg="isdbgrid", minWireVersion=2, maxWireVersion=6 + "ismaster", + ismaster=True, + msg="isdbgrid", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) server.run() self.addCleanup(server.stop) - client = MongoClient(server.uri) - self.addCleanup(client.close) + client = self.simple_client(server.uri) read_prefs = ( Primary(), diff --git a/test/mockupdb/test_reset_and_request_check.py b/test/mockupdb/test_reset_and_request_check.py old mode 100755 new mode 100644 index 778be3d5ca..b438afe894 --- a/test/mockupdb/test_reset_and_request_check.py +++ b/test/mockupdb/test_reset_and_request_check.py @@ -11,22 +11,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import itertools import time import unittest +from test import PyMongoTestCase -from mockupdb import MockupDB, going, wait_until -from operations import operations +import pytest -from pymongo import MongoClient +try: + from mockupdb import MockupDB, going, wait_until + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from operations import operations # type: ignore[import] + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.errors import ConnectionFailure +from pymongo.operations import _Op from pymongo.server_type import SERVER_TYPE +pytestmark = pytest.mark.mockupdb + -class TestResetAndRequestCheck(unittest.TestCase): +class TestResetAndRequestCheck(PyMongoTestCase): def __init__(self, *args, **kwargs): - super(TestResetAndRequestCheck, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.ismaster_time = 0.0 self.client = None self.server = None @@ -36,7 +50,9 @@ def setup_server(self): def responder(request): self.ismaster_time = time.time() - return request.ok(ismaster=True, minWireVersion=2, maxWireVersion=6) + return request.ok( + ismaster=True, minWireVersion=2, maxWireVersion=MIN_SUPPORTED_WIRE_VERSION + ) self.server.autoresponds("ismaster", responder) self.server.run() @@ -45,7 +61,7 @@ def responder(request): kwargs = {"socketTimeoutMS": 100} # Disable retryable reads when pymongo supports it. kwargs["retryReads"] = False - self.client = MongoClient(self.server.uri, **kwargs) # type: ignore + self.client = self.simple_client(self.server.uri, **kwargs) # type: ignore wait_until(lambda: self.client.nodes, "connect to standalone") def tearDown(self): @@ -67,7 +83,7 @@ def _test_disconnect(self, operation): # Server is Unknown. topology = self.client._topology with self.assertRaises(ConnectionFailure): - topology.select_server_by_address(self.server.address, 0) + topology.select_server_by_address(self.server.address, _Op.TEST, 0) time.sleep(0.5) after = time.time() @@ -94,7 +110,7 @@ def _test_timeout(self, operation): # Server is *not* Unknown. topology = self.client._topology - server = topology.select_server_by_address(self.server.address, 0) + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) @@ -116,7 +132,7 @@ def _test_not_master(self, operation): # Server is rediscovered. topology = self.client._topology - server = topology.select_server_by_address(self.server.address, 0) + server = topology.select_server_by_address(self.server.address, _Op.TEST, 0) assert server is not None self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) @@ -143,7 +159,7 @@ def generate_reset_tests(): for entry in matrix: operation, (test_method, name) = entry test = create_reset_test(operation, test_method) - test_name = "%s_%s" % (name, operation.name.replace(" ", "_")) + test_name = "{}_{}".format(name, operation.name.replace(" ", "_")) test.__name__ = test_name setattr(TestResetAndRequestCheck, test_name, test) diff --git a/test/mockupdb/test_rsghost.py b/test/mockupdb/test_rsghost.py index 354399728d..84d52170db 100644 --- a/test/mockupdb/test_rsghost.py +++ b/test/mockupdb/test_rsghost.py @@ -13,15 +13,26 @@ # limitations under the License. """Test connections to RSGhost nodes.""" +from __future__ import annotations import datetime import unittest -from mockupdb import MockupDB, going +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + from pymongo import MongoClient from pymongo.errors import ServerSelectionTimeoutError +pytestmark = pytest.mark.mockupdb + class TestRSGhost(unittest.TestCase): def test_rsghost(self): diff --git a/test/mockupdb/test_slave_okay_rs.py b/test/mockupdb/test_slave_okay_rs.py index 5a162c08e3..728e4e2ce0 100644 --- a/test/mockupdb/test_slave_okay_rs.py +++ b/test/mockupdb/test_slave_okay_rs.py @@ -16,13 +16,25 @@ Just make sure SlaveOkay is *not* set on primary reads. """ +from __future__ import annotations import unittest -from mockupdb import MockupDB, going -from operations import operations +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION + +pytestmark = pytest.mark.mockupdb class TestSlaveOkayRS(unittest.TestCase): @@ -34,7 +46,12 @@ def setup_server(self): hosts = [server.address_string for server in (self.primary, self.secondary)] self.primary.autoresponds( - "ismaster", ismaster=True, setName="rs", hosts=hosts, minWireVersion=2, maxWireVersion=6 + "ismaster", + ismaster=True, + setName="rs", + hosts=hosts, + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) self.secondary.autoresponds( "ismaster", @@ -43,14 +60,14 @@ def setup_server(self): setName="rs", hosts=hosts, minWireVersion=2, - maxWireVersion=6, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, ) def create_slave_ok_rs_test(operation): def test(self): self.setup_server() - assert not operation.op_type == "always-use-secondary" + assert operation.op_type != "always-use-secondary" client = MongoClient(self.primary.uri, replicaSet="rs") self.addCleanup(client.close) diff --git a/test/mockupdb/test_slave_okay_sharded.py b/test/mockupdb/test_slave_okay_sharded.py index 52c643b417..6efbff6583 100644 --- a/test/mockupdb/test_slave_okay_sharded.py +++ b/test/mockupdb/test_slave_okay_sharded.py @@ -18,16 +18,29 @@ - A direct connection to a slave. - A direct connection to a mongos. """ +from __future__ import annotations + import itertools import unittest from queue import Queue -from mockupdb import MockupDB, going -from operations import operations +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name +pytestmark = pytest.mark.mockupdb + class TestSlaveOkaySharded(unittest.TestCase): def setup_server(self): @@ -40,13 +53,14 @@ def setup_server(self): server.run() self.addCleanup(server.stop) server.autoresponds( - "ismaster", minWireVersion=2, maxWireVersion=6, ismaster=True, msg="isdbgrid" + "ismaster", + minWireVersion=2, + maxWireVersion=MIN_SUPPORTED_WIRE_VERSION, + ismaster=True, + msg="isdbgrid", ) - self.mongoses_uri = "mongodb://%s,%s" % ( - self.mongos1.address_string, - self.mongos2.address_string, - ) + self.mongoses_uri = f"mongodb://{self.mongos1.address_string},{self.mongos2.address_string}" def create_slave_ok_sharded_test(mode, operation): @@ -59,7 +73,7 @@ def test(self): elif operation.op_type == "must-use-primary": slave_ok = False else: - assert False, "unrecognized op_type %r" % operation.op_type + raise AssertionError("unrecognized op_type %r" % operation.op_type) pref = make_read_preference(read_pref_mode_from_name(mode), tag_sets=None) @@ -84,7 +98,7 @@ def generate_slave_ok_sharded_tests(): for entry in matrix: mode, operation = entry test = create_slave_ok_sharded_test(mode, operation) - test_name = "test_%s_with_mode_%s" % (operation.name.replace(" ", "_"), mode) + test_name = "test_{}_with_mode_{}".format(operation.name.replace(" ", "_"), mode) test.__name__ = test_name setattr(TestSlaveOkaySharded, test_name, test) diff --git a/test/mockupdb/test_slave_okay_single.py b/test/mockupdb/test_slave_okay_single.py index 07cd6c7448..88cb1a48a5 100644 --- a/test/mockupdb/test_slave_okay_single.py +++ b/test/mockupdb/test_slave_okay_single.py @@ -18,17 +18,29 @@ - A direct connection to a slave. - A direct connection to a mongos. """ +from __future__ import annotations import itertools import unittest -from mockupdb import MockupDB, going -from operations import operations +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +from operations import operations # type: ignore[import] from pymongo import MongoClient +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION from pymongo.read_preferences import make_read_preference, read_pref_mode_from_name from pymongo.topology_description import TOPOLOGY_TYPE +pytestmark = pytest.mark.mockupdb + def topology_type_name(client): topology_type = client._topology._description.topology_type @@ -46,7 +58,7 @@ def create_slave_ok_single_test(mode, server_type, ismaster, operation): def test(self): ismaster_with_version = ismaster.copy() ismaster_with_version["minWireVersion"] = 2 - ismaster_with_version["maxWireVersion"] = 6 + ismaster_with_version["maxWireVersion"] = MIN_SUPPORTED_WIRE_VERSION self.server.autoresponds("ismaster", **ismaster_with_version) self.assertIn( operation.op_type, ("always-use-secondary", "may-use-secondary", "must-use-primary") @@ -78,7 +90,7 @@ def generate_slave_ok_single_tests(): mode, (server_type, ismaster), operation = entry test = create_slave_ok_single_test(mode, server_type, ismaster, operation) - test_name = "test_%s_%s_with_mode_%s" % ( + test_name = "test_{}_{}_with_mode_{}".format( operation.name.replace(" ", "_"), server_type, mode, diff --git a/test/mockupdb/test_standalone_shard.py b/test/mockupdb/test_standalone_shard.py new file mode 100644 index 0000000000..28a582071c --- /dev/null +++ b/test/mockupdb/test_standalone_shard.py @@ -0,0 +1,67 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test errors that come from a standalone shard.""" +from __future__ import annotations + +import unittest + +import pytest + +try: + from mockupdb import MockupDB, going + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + + +from pymongo import MongoClient +from pymongo.errors import OperationFailure + +pytestmark = pytest.mark.mockupdb + + +class TestStandaloneShard(unittest.TestCase): + # See PYTHON-2048 and SERVER-44591. + def test_bulk_txn_error_message(self): + server = MockupDB(auto_ismaster={"maxWireVersion": 8}) + server.run() + self.addCleanup(server.stop) + client = MongoClient(server.uri) + self.addCleanup(client.close) + + with self.assertRaisesRegex( + OperationFailure, "This MongoDB deployment does not support retryable writes" + ): + with going(client.db.collection.insert_many, [{}, {}]): + request = server.receives() + request.reply( + { + "n": 0, + "ok": 1.0, + "writeErrors": [ + { + "code": 20, + "codeName": "IllegalOperation", + "errmsg": "Transaction numbers are only allowed on a replica set member or mongos", + "index": 0, + } + ], + } + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/mod_wsgi_test/README.rst b/test/mod_wsgi_test/README.rst index 2ea50c9074..e96db9406c 100644 --- a/test/mod_wsgi_test/README.rst +++ b/test/mod_wsgi_test/README.rst @@ -15,7 +15,7 @@ Test Matrix PyMongo should be tested with several versions of mod_wsgi and a selection of Python versions. Each combination of mod_wsgi and Python version should -be tested with a standalone and a replica set. ``mod_wsgi_test.wsgi`` +be tested with a standalone and a replica set. ``mod_wsgi_test.py`` detects if the deployment is a replica set and connects to the whole set. Setup @@ -74,31 +74,37 @@ Run the test Run the included ``test_client.py`` script:: python test/mod_wsgi_test/test_client.py -n 2500 -t 100 parallel \ - http://localhost/${WORKSPACE} + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} ...where the "n" argument is the total number of requests to make to Apache, and "t" specifies the number of threads. ``WORKSPACE`` is the location of -the PyMongo checkout. +the PyMongo checkout. Note that multiple URLs are passed, each one corresponds +to a different sub interpreter. Run this script again with different arguments to make serial requests:: python test/mod_wsgi_test/test_client.py -n 25000 serial \ - http://localhost/${WORKSPACE} + http://localhost/interpreter1${WORKSPACE} http://localhost/interpreter2${WORKSPACE} The ``test_client.py`` script merely makes HTTP requests to Apache. Its exit code is non-zero if any of its requests fails, for example with an HTTP 500. -The core of the test is in the WSGI script, ``mod_wsgi_test.wsgi``. +The core of the test is in the WSGI script, ``mod_wsgi_test.py``. This script inserts some documents into MongoDB at startup, then queries documents for each HTTP request. If PyMongo is leaking connections and "n" is much greater than the ulimit, the test will fail when PyMongo exhausts its file descriptors. +The script also encodes and decodes all BSON types to ensure that +multiple sub interpreters in the same process are supported. This tests +the workaround added in `PYTHON-569 `_. + Automation ---------- At MongoDB, Inc. we use a continuous integration job that tests each combination in the matrix. The job starts up Apache, starts a single server or replica set, and runs ``test_client.py`` with the proper arguments. +See `run-mod-wsgi-tests.sh `_ diff --git a/test/mod_wsgi_test/apache22amazon.conf b/test/mod_wsgi_test/apache22amazon.conf index d9d892bbe2..7755336b07 100644 --- a/test/mod_wsgi_test/apache22amazon.conf +++ b/test/mod_wsgi_test/apache22amazon.conf @@ -31,4 +31,4 @@ CustomLog ${PWD}/access_log combined Allow from All -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache22ubuntu1204.conf b/test/mod_wsgi_test/apache22ubuntu1204.conf index 281c5862c2..9fa4b2060b 100644 --- a/test/mod_wsgi_test/apache22ubuntu1204.conf +++ b/test/mod_wsgi_test/apache22ubuntu1204.conf @@ -26,4 +26,4 @@ CustomLog ${PWD}/access_log combined Allow from All -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/apache24ubuntu161404.conf b/test/mod_wsgi_test/apache24ubuntu161404.conf index f0a1734375..eb5414f0f7 100644 --- a/test/mod_wsgi_test/apache24ubuntu161404.conf +++ b/test/mod_wsgi_test/apache24ubuntu161404.conf @@ -25,4 +25,4 @@ CustomLog ${PWD}/access_log combined Require all granted -Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/mod_wsgi_test.conf +Include ${PROJECT_DIRECTORY}/test/mod_wsgi_test/${MOD_WSGI_CONF} diff --git a/test/mod_wsgi_test/mod_wsgi_test.conf b/test/mod_wsgi_test/mod_wsgi_test.conf index 9505933e96..a5b09e437f 100644 --- a/test/mod_wsgi_test/mod_wsgi_test.conf +++ b/test/mod_wsgi_test/mod_wsgi_test.conf @@ -1,4 +1,4 @@ -# Copyright 2012-2015 MongoDB, Inc. +# Copyright 2012-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,17 +20,13 @@ LoadModule wsgi_module ${MOD_WSGI_SO} WSGISocketPrefix /tmp/ - ServerName localhost - WSGIDaemonProcess mod_wsgi_test processes=1 threads=15 display-name=mod_wsgi_test - WSGIProcessGroup mod_wsgi_test - - # For the convienience of unittests, rather than hard-code the location of - # mod_wsgi_test.wsgi, include it in the URL, so - # http://localhost/location-of-pymongo-checkout will work: - - WSGIScriptAliasMatch ^/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.wsgi - + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py diff --git a/test/mod_wsgi_test/mod_wsgi_test.py b/test/mod_wsgi_test/mod_wsgi_test.py new file mode 100644 index 0000000000..4ab2df2442 --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test.py @@ -0,0 +1,114 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 +""" +from __future__ import annotations + +import datetime +import os +import re +import sys +import uuid + +this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) + +# Location of PyMongo checkout +repository_path = os.path.normpath(os.path.join(this_path, "..", "..")) +sys.path.insert(0, repository_path) + +import bson +import pymongo +from bson.binary import STANDARD, Binary +from bson.code import Code +from bson.codec_options import CodecOptions +from bson.datetime_ms import DatetimeConversion, DatetimeMS +from bson.dbref import DBRef +from bson.objectid import ObjectId +from bson.regex import Regex +from pymongo.synchronous.mongo_client import MongoClient + +try: + from mod_wsgi import version as mod_wsgi_version # type: ignore[import] + + _HAVE_MOD_WSGI = True +except: + mod_wsgi_version = None + _HAVE_MOD_WSGI = False + +if _HAVE_MOD_WSGI: + # Ensure the C extensions are installed. + assert bson.has_c() + assert pymongo.has_c() + + OPTS: CodecOptions[dict] = CodecOptions( + uuid_representation=STANDARD, datetime_conversion=DatetimeConversion.DATETIME_AUTO + ) + client: MongoClient[dict] = MongoClient() + # Use a unique collection name for each process: + coll_name = f"test-{uuid.uuid4()}" + collection = client.test.get_collection(coll_name, codec_options=OPTS) + ndocs = 20 + collection.drop() + doc = { + "int32": 2 << 15, + "int64": 2 << 50, + "null": None, + "bool": True, + "float": 1.5, + "str": "string", + "list": [1, 2, 3], + "dict": {"a": 1, "b": 2, "c": 3}, + "datetime": datetime.datetime.fromtimestamp(1690328577.446), + "datetime_ms_out_of_range": DatetimeMS(-2 << 60), + "regex_native": re.compile("regex*"), + "regex_pymongo": Regex("regex*"), + "binary": Binary(b"bytes", 128), + "oid": ObjectId(), + "dbref": DBRef("test", 1), + "code": Code("function(){ return true; }"), + "code_w_scope": Code("return function(){ return x; }", scope={"x": False}), + "bytes": b"bytes", + "uuid": uuid.uuid4(), + } + collection.insert_many([dict(i=i, **doc) for i in range(ndocs)]) + client.close() # Discard main thread's request socket. + client = MongoClient() + collection = client.test.get_collection(coll_name, codec_options=OPTS) + + def application(environ, start_response): + results = list(collection.find().batch_size(10)) + assert len(results) == ndocs, f"n_actual={len(results)} n_expected={ndocs}" + # Test encoding and decoding works (for sub interpreter support). + decoded = bson.decode(bson.encode(doc, codec_options=OPTS), codec_options=OPTS) + for key, value in doc.items(): + # Native regex objects are decoded as bson Regex. + if isinstance(value, re.Pattern): + value = Regex.from_native(value) + assert decoded[key] == value, f"failed on doc[{key!r}]: {decoded[key]!r} != {value!r}" + assert isinstance( + decoded[key], type(value) + ), f"failed on doc[{key}]: {decoded[key]!r} is not an instance of {type(value)}" + + output = ( + f" python {sys.version}, mod_wsgi {mod_wsgi_version}," + f" pymongo {pymongo.version}," + f' mod_wsgi.process_group = {environ["mod_wsgi.process_group"]!r}' + f' mod_wsgi.application_group = {environ["mod_wsgi.application_group"]!r}' + f' wsgi.multithread = {environ["wsgi.multithread"]!r}' + "\n" + ) + response_headers = [("Content-Length", str(len(output)))] + start_response("200 OK", response_headers) + return [output.encode("ascii")] diff --git a/test/mod_wsgi_test/mod_wsgi_test.wsgi b/test/mod_wsgi_test/mod_wsgi_test.wsgi deleted file mode 100644 index 7c7b24cb70..0000000000 --- a/test/mod_wsgi_test/mod_wsgi_test.wsgi +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright 2012-2015 MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Minimal test of PyMongo in a WSGI application, see bug PYTHON-353 -""" - -import os -import sys - -this_path = os.path.dirname(os.path.join(os.getcwd(), __file__)) - -# Location of PyMongo checkout -repository_path = os.path.normpath(os.path.join(this_path, '..', '..')) -sys.path.insert(0, repository_path) - -import pymongo -from pymongo.hello import HelloCompat # noqa -from pymongo.mongo_client import MongoClient - -client = MongoClient() -collection = client.test.test -ndocs = 20 -collection.drop() -collection.insert_many([{'i': i} for i in range(ndocs)]) -client.close() # Discard main thread's request socket. -client = MongoClient() -collection = client.test.test - -try: - from mod_wsgi import version as mod_wsgi_version -except: - mod_wsgi_version = None - - -def application(environ, start_response): - results = list(collection.find().batch_size(10)) - assert len(results) == ndocs - output = ' python %s, mod_wsgi %s, pymongo %s ' % ( - sys.version, mod_wsgi_version, pymongo.version) - response_headers = [('Content-Length', str(len(output)))] - start_response('200 OK', response_headers) - return [output.encode('ascii')] diff --git a/test/mod_wsgi_test/mod_wsgi_test_embedded.conf b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf new file mode 100644 index 0000000000..306dab4ab6 --- /dev/null +++ b/test/mod_wsgi_test/mod_wsgi_test_embedded.conf @@ -0,0 +1,30 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Minimal test of PyMongo in an *Embedded mode* WSGI application. + +LoadModule wsgi_module ${MOD_WSGI_SO} + +# Avoid permissions issues +WSGISocketPrefix /tmp/ + + + ServerName localhost + # Mount the script twice so that multiple interpreters are used. + # For the convenience of unittests, rather than hard-code the location of + # mod_wsgi_test.py, include it in the URL, so + # http://localhost/interpreter1/location-of-pymongo-checkout will work: + WSGIScriptAliasMatch ^/interpreter1/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + WSGIScriptAliasMatch ^/interpreter2/(.+) $1/test/mod_wsgi_test/mod_wsgi_test.py + diff --git a/test/mod_wsgi_test/test_client.py b/test/mod_wsgi_test/test_client.py index bfdae9e824..c122863bfa 100644 --- a/test/mod_wsgi_test/test_client.py +++ b/test/mod_wsgi_test/test_client.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test client for mod_wsgi application, see bug PYTHON-353. -""" +"""Test client for mod_wsgi application, see bug PYTHON-353.""" +from __future__ import annotations import _thread as thread +import random import sys import threading import time @@ -23,9 +24,9 @@ from urllib.request import urlopen -def parse_args(): +def parse_args(args=None): parser = OptionParser( - """usage: %prog [options] mode url + """usage: %prog [options] mode url [...] mode:\tparallel or serial""" ) @@ -38,7 +39,7 @@ def parse_args(): type="int", dest="nrequests", default=50 * 1000, - help="Number of times to GET the URL, in total", + help="Number of times to GET the URLs, in total", ) parser.add_option( @@ -69,8 +70,9 @@ def parse_args(): ) try: - options, (mode, url) = parser.parse_args() - except ValueError: + options, args = parser.parse_args(args or sys.argv[1:]) + mode, urls = args[0], args[1:] + except (ValueError, IndexError): parser.print_usage() sys.exit(1) @@ -78,10 +80,11 @@ def parse_args(): parser.print_usage() sys.exit(1) - return options, mode, url + return options, mode, urls -def get(url): +def get(urls): + url = random.choice(urls) urlopen(url).read().strip() @@ -90,21 +93,21 @@ class URLGetterThread(threading.Thread): counter_lock = threading.Lock() counter = 0 - def __init__(self, options, url, nrequests_per_thread): - super(URLGetterThread, self).__init__() + def __init__(self, options, urls, nrequests_per_thread): + super().__init__() self.options = options - self.url = url + self.urls = urls self.nrequests_per_thread = nrequests_per_thread self.errors = 0 def run(self): - for i in range(self.nrequests_per_thread): + for _i in range(self.nrequests_per_thread): try: - get(url) + get(self.urls) except Exception as e: print(e) - if not options.continue_: + if not self.options.continue_: thread.interrupt_main() thread.exit() @@ -114,13 +117,13 @@ def run(self): URLGetterThread.counter += 1 counter = URLGetterThread.counter - should_print = options.verbose and not counter % 1000 + should_print = self.options.verbose and not counter % 1000 if should_print: print(counter) -def main(options, mode, url): +def main(options, mode, urls): start_time = time.time() errors = 0 if mode == "parallel": @@ -128,17 +131,15 @@ def main(options, mode, url): if options.verbose: print( - "Getting %s %s times total in %s threads, " - "%s times per thread" - % ( - url, + "Getting {} {} times total in {} threads, " "{} times per thread".format( + urls, nrequests_per_thread * options.nthreads, options.nthreads, nrequests_per_thread, ) ) threads = [ - URLGetterThread(options, url, nrequests_per_thread) for _ in range(options.nthreads) + URLGetterThread(options, urls, nrequests_per_thread) for _ in range(options.nthreads) ] for t in threads: @@ -154,11 +155,11 @@ def main(options, mode, url): else: assert mode == "serial" if options.verbose: - print("Getting %s %s times in one thread" % (url, options.nrequests)) + print(f"Getting {urls} {options.nrequests} times in one thread") for i in range(1, options.nrequests + 1): try: - get(url) + get(urls) except Exception as e: print(e) if not options.continue_: @@ -181,5 +182,5 @@ def main(options, mode, url): if __name__ == "__main__": - options, mode, url = parse_args() - main(options, mode, url) + options, mode, urls = parse_args() + main(options, mode, urls) diff --git a/test/mypy_fails/insert_many_dict.py b/test/mypy_fails/insert_many_dict.py index 7cbabc28f0..5f9a2d45a9 100644 --- a/test/mypy_fails/insert_many_dict.py +++ b/test/mypy_fails/insert_many_dict.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from pymongo import MongoClient client: MongoClient = MongoClient() diff --git a/test/mypy_fails/insert_one_list.py b/test/mypy_fails/insert_one_list.py index 12079ffc6d..7c27d5cac9 100644 --- a/test/mypy_fails/insert_one_list.py +++ b/test/mypy_fails/insert_one_list.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from pymongo import MongoClient client: MongoClient = MongoClient() diff --git a/test/mypy_fails/raw_bson_document.py b/test/mypy_fails/raw_bson_document.py index 427140dfac..49f3659e90 100644 --- a/test/mypy_fails/raw_bson_document.py +++ b/test/mypy_fails/raw_bson_document.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from bson.raw_bson import RawBSONDocument from pymongo import MongoClient @@ -5,9 +7,9 @@ coll = client.test.test doc = {"my": "doc"} coll.insert_one(doc) -retreived = coll.find_one({"_id": doc["_id"]}) -assert retreived is not None -assert len(retreived.raw) > 0 -retreived[ +retrieved = coll.find_one({"_id": doc["_id"]}) +assert retrieved is not None +assert len(retrieved.raw) > 0 +retrieved[ "foo" ] = "bar" # error: Unsupported target for indexed assignment ("RawBSONDocument") [index] diff --git a/test/mypy_fails/typedict_client.py b/test/mypy_fails/typedict_client.py index 24dd84ee28..37c3f0bfcc 100644 --- a/test/mypy_fails/typedict_client.py +++ b/test/mypy_fails/typedict_client.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import TypedDict from pymongo import MongoClient @@ -10,9 +12,9 @@ class Movie(TypedDict): client: MongoClient[Movie] = MongoClient() coll = client.test.test -retreived = coll.find_one({"_id": "foo"}) -assert retreived is not None -assert retreived["year"] == 1 +retrieved = coll.find_one({"_id": "foo"}) +assert retrieved is not None +assert retrieved["year"] == 1 assert ( - retreived["name"] == 2 + retrieved["name"] == 2 ) # error: Non-overlapping equality check (left operand type: "str", right operand type: "Literal[2]") [comparison-overlap] diff --git a/test/ocsp/test_ocsp.py b/test/ocsp/test_ocsp.py index a0770afefa..b20eaa35d6 100644 --- a/test/ocsp/test_ocsp.py +++ b/test/ocsp/test_ocsp.py @@ -13,17 +13,24 @@ # limitations under the License. """Test OCSP.""" +from __future__ import annotations import logging import os import sys import unittest +from pathlib import Path + +import pytest sys.path[0:0] = [""] import pymongo from pymongo.errors import ServerSelectionTimeoutError +pytestmark = pytest.mark.ocsp + + CA_FILE = os.environ.get("CA_FILE") OCSP_TLS_SHOULD_SUCCEED = os.environ.get("OCSP_TLS_SHOULD_SUCCEED") == "true" @@ -32,22 +39,16 @@ FORMAT = "%(asctime)s %(levelname)s %(module)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.DEBUG) -if sys.platform == "win32": - # The non-stapled OCSP endpoint check is slow on Windows. - TIMEOUT_MS = 5000 -else: - TIMEOUT_MS = 500 - def _connect(options): - uri = ("mongodb://localhost:27017/?serverSelectionTimeoutMS=%s&tlsCAFile=%s&%s") % ( - TIMEOUT_MS, - CA_FILE, - options, - ) + assert CA_FILE is not None + uri = f"mongodb://localhost:27017/?serverSelectionTimeoutMS=10000&tlsCAFile={Path(CA_FILE).as_posix()}&{options}" print(uri) - client = pymongo.MongoClient(uri) - client.admin.command("ping") + try: + client = pymongo.MongoClient(uri) + client.admin.command("ping") + finally: + client.close() class TestOCSP(unittest.TestCase): diff --git a/test/performance/async_perf_test.py b/test/performance/async_perf_test.py new file mode 100644 index 0000000000..6eb31ea4fe --- /dev/null +++ b/test/performance/async_perf_test.py @@ -0,0 +1,496 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Asynchronous Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/async_perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" +from __future__ import annotations + +import asyncio +import os +import sys +import tempfile +import time +import warnings +from typing import Any, List, Optional, Union + +import pytest + +try: + import simplejson as json +except ImportError: + import json # type: ignore[no-redef] + +sys.path[0:0] = [""] + +from test.asynchronous import AsyncPyMongoTestCase, async_client_context, unittest + +from bson import encode +from gridfs import AsyncGridFSBucket +from pymongo import ( + DeleteOne, + InsertOne, + ReplaceOne, +) + +pytestmark = pytest.mark.perf + +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. +NUM_ITERATIONS = 100 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 120 +NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 1 + MAX_ITERATION_TIME = 30 + NUM_DOCS = 1000 + +TEST_PATH = os.environ.get( + "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) +) + +OUTPUT_FILE = os.environ.get("OUTPUT_FILE") + +result_data: List = [] + + +def tearDownModule(): + output = json.dumps(result_data, indent=4) + if OUTPUT_FILE: + with open(OUTPUT_FILE, "w") as opf: + opf.write(output) + else: + print(output) + + +class Timer: + def __enter__(self): + self.start = time.monotonic() + return self + + def __exit__(self, *args): + self.end = time.monotonic() + self.interval = self.end - self.start + + +async def concurrent(n_tasks, func): + tasks = [func() for _ in range(n_tasks)] + await asyncio.gather(*tasks) + + +class PerformanceTest: + dataset: str + data_size: int + fail: Any + n_tasks: int = 1 + did_init: bool = False + + async def asyncSetUp(self): + await async_client_context.init() + self.setup_time = time.monotonic() + + async def asyncTearDown(self): + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] + median = self.percentile(50) + megabytes_per_sec = (self.data_size * self.n_tasks) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) + result_data.append( + { + "info": { + "test_name": name, + "args": { + "tasks": self.n_tasks, + }, + }, + "metrics": [ + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, + ], + } + ) + + async def before(self): + pass + + async def do_task(self): + raise NotImplementedError + + async def after(self): + pass + + def percentile(self, percentile): + if hasattr(self, "results"): + sorted_results = sorted(self.results) + percentile_index = int(len(sorted_results) * percentile / 100) - 1 + return sorted_results[percentile_index] + else: + self.fail("Test execution failed") + return None + + async def runTest(self): + results = [] + start = time.monotonic() + i = 0 + while True: + i += 1 + await self.before() + with Timer() as timer: + if self.n_tasks == 1: + await self.do_task() + else: + await concurrent(self.n_tasks, self.do_task) + await self.after() + results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break + + self.results = results + + +# SINGLE-DOC BENCHMARKS +class TestRunCommand(PerformanceTest, AsyncPyMongoTestCase): + data_size = len(encode({"hello": True})) * NUM_DOCS + + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def do_task(self): + command = self.client.perftest.command + for _ in range(NUM_DOCS): + await command("hello", True) + + +class TestRunCommand8Tasks(TestRunCommand): + n_tasks = 8 + + +class TestRunCommand80Tasks(TestRunCommand): + n_tasks = 80 + + +class TestRunCommandUnlimitedTasks(TestRunCommand): + async def do_task(self): + command = self.client.perftest.command + await asyncio.gather(*[command("hello", True) for _ in range(NUM_DOCS)]) + + +class TestDocument(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + # Location of test data. + with open( # noqa: ASYNC101 + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) + ) as data: + self.document = json.loads(data.read()) + + self.client = async_client_context.client + await self.client.drop_database("perftest") + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + async def before(self): + self.corpus = await self.client.perftest.create_collection("corpus") + + async def after(self): + await self.client.perftest.drop_collection("corpus") + + +class FindTest(TestDocument): + dataset = "tweet.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + documents = [self.document.copy() for _ in range(NUM_DOCS)] + self.corpus = self.client.perftest.corpus + result = await self.corpus.insert_many(documents) + self.inserted_ids = result.inserted_ids + + async def before(self): + pass + + async def after(self): + pass + + +class TestFindOneByID(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + find_one = self.corpus.find_one + for _id in self.inserted_ids: + await find_one({"_id": _id}) + + +class TestFindOneByID8Tasks(TestFindOneByID): + n_tasks = 8 + + +class TestFindOneByID80Tasks(TestFindOneByID): + n_tasks = 80 + + +class TestFindOneByIDUnlimitedTasks(TestFindOneByID): + async def do_task(self): + find_one = self.corpus.find_one + await asyncio.gather(*[find_one({"_id": _id}) for _id in self.inserted_ids]) + + +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + + +class TestSmallDocInsertOne(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestSmallDocInsertOneUnlimitedTasks(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" + + async def asyncSetUp(self): + await super().asyncSetUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] + + +class TestLargeDocInsertOne(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + for doc in self.documents: + await insert_one(doc) + + +class TestLargeDocInsertOneUnlimitedTasks(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + insert_one = self.corpus.insert_one + await asyncio.gather(*[insert_one(doc) for doc in self.documents]) + + +# MULTI-DOC BENCHMARKS +class TestFindManyAndEmptyCursor(FindTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.find().to_list() + + +class TestFindManyAndEmptyCursor8Tasks(TestFindManyAndEmptyCursor): + n_tasks = 8 + + +class TestFindManyAndEmptyCursor80Tasks(TestFindManyAndEmptyCursor): + n_tasks = 80 + + +class TestSmallDocBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkInsert(SmallDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc)) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class TestLargeDocBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def do_task(self): + await self.corpus.insert_many(self.documents, ordered=True) + + +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + async def do_task(self): + await self.corpus.bulk_write(self.models, ordered=True) + + +class TestLargeDocClientBulkInsert(LargeDocInsertTest, AsyncPyMongoTestCase): + @async_client_context.require_version_min(8, 0, 0, -24) + async def asyncSetUp(self): + await super().asyncSetUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @async_client_context.require_version_min(8, 0, 0, -24) + async def do_task(self): + await self.client.bulk_write(self.models, ordered=True) + + +class GridFsTest(PerformanceTest): + async def asyncSetUp(self): + await super().asyncSetUp() + self.client = async_client_context.client + await self.client.drop_database("perftest") + + gridfs_path = os.path.join( + TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") + ) + with open(gridfs_path, "rb") as data: # noqa: ASYNC101 + self.document = data.read() + self.data_size = len(self.document) + self.bucket = AsyncGridFSBucket(self.client.perftest) + + async def asyncTearDown(self): + await super().asyncTearDown() + await self.client.drop_database("perftest") + + +class TestGridFsUpload(GridFsTest, AsyncPyMongoTestCase): + async def before(self): + # Create the bucket. + await self.bucket.upload_from_stream("init", b"x") + + async def do_task(self): + await self.bucket.upload_from_stream("gridfstest", self.document) + + +class TestGridFsDownload(GridFsTest, AsyncPyMongoTestCase): + async def asyncSetUp(self): + await super().asyncSetUp() + self.uploaded_id = await self.bucket.upload_from_stream("gridfstest", self.document) + + async def do_task(self): + await (await self.bucket.open_download_stream(self.uploaded_id)).read() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/performance/perf_test.py b/test/performance/perf_test.py index 3cb4b5d5d1..5688d28d2d 100644 --- a/test/performance/perf_test.py +++ b/test/performance/perf_test.py @@ -1,4 +1,4 @@ -# Copyright 2015 MongoDB, Inc. +# Copyright 2015-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,15 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the MongoDB Driver Performance Benchmarking Spec.""" +"""Tests for the MongoDB Driver Performance Benchmarking Spec. + +See https://github.com/mongodb/specifications/blob/master/source/benchmarking/benchmarking.md + + +To set up the benchmarks locally:: + + python -m pip install simplejson + git clone --depth 1 https://github.com/mongodb/specifications.git + pushd specifications/source/benchmarking/data + tar xf extended_bson.tgz + tar xf parallel.tgz + tar xf single_and_multi_document.tgz + popd + export TEST_PATH="specifications/source/benchmarking/data" + export OUTPUT_FILE="results.json" + +Then to run all benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v + +To run individual benchmarks quickly:: + + FASTBENCH=1 python test/performance/perf_test.py -v TestRunCommand TestFindManyAndEmptyCursor +""" +from __future__ import annotations import multiprocessing as mp import os import sys import tempfile +import threading import time import warnings -from typing import Any, List +from typing import Any, List, Optional, Union + +import pytest try: import simplejson as json @@ -29,16 +57,31 @@ sys.path[0:0] = [""] -from test import client_context, host, port, unittest +from test import client_context, unittest -from bson import decode, encode -from bson.json_util import loads +from bson import decode, encode, json_util from gridfs import GridFSBucket -from pymongo import MongoClient +from pymongo import ( + DeleteOne, + InsertOne, + MongoClient, + ReplaceOne, +) +pytestmark = pytest.mark.perf + +# Spec says to use at least 1 minute cumulative execution time and up to 100 iterations or 5 minutes but that +# makes the benchmarks too slow. Instead, we use at least 30 seconds and at most 60 seconds. NUM_ITERATIONS = 100 -MAX_ITERATION_TIME = 300 +MIN_ITERATION_TIME = 30 +MAX_ITERATION_TIME = 60 NUM_DOCS = 10000 +# When debugging or prototyping it's often useful to run the benchmarks locally, set FASTBENCH=1 to run quickly. +if bool(os.getenv("FASTBENCH")): + NUM_ITERATIONS = 2 + MIN_ITERATION_TIME = 0.1 + MAX_ITERATION_TIME = 0.5 + NUM_DOCS = 1000 TEST_PATH = os.environ.get( "TEST_PATH", os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join("data")) @@ -58,7 +101,7 @@ def tearDownModule(): print(output) -class Timer(object): +class Timer: def __enter__(self): self.start = time.monotonic() return self @@ -68,34 +111,55 @@ def __exit__(self, *args): self.interval = self.end - self.start -class PerformanceTest(object): - dataset: Any - data_size: Any - do_task: Any +def threaded(n_threads, func): + threads = [threading.Thread(target=func) for _ in range(n_threads)] + for t in threads: + t.start() + for t in threads: + t.join() + + +class PerformanceTest: + dataset: str + data_size: int fail: Any + n_threads: int = 1 @classmethod def setUpClass(cls): client_context.init() def setUp(self): - pass + self.setup_time = time.monotonic() def tearDown(self): - name = self.__class__.__name__ + duration = time.monotonic() - self.setup_time + # Remove "Test" so that TestFlatEncoding is reported as "FlatEncoding". + name = self.__class__.__name__[4:] median = self.percentile(50) - bytes_per_sec = self.data_size / median - print("Running %s. MEDIAN=%s" % (self.__class__.__name__, self.percentile(50))) + megabytes_per_sec = (self.data_size * self.n_threads) / median / 1000000 + print( + f"Completed {self.__class__.__name__} {megabytes_per_sec:.3f} MB/s, MEDIAN={self.percentile(50):.3f}s, " + f"total time={duration:.3f}s, iterations={len(self.results)}" + ) result_data.append( { "info": { "test_name": name, "args": { - "threads": 1, + "threads": self.n_threads, }, }, "metrics": [ - {"name": "bytes_per_sec", "value": bytes_per_sec}, + { + "name": "megabytes_per_sec", + "type": "MEDIAN", + "value": megabytes_per_sec, + "metadata": { + "improvement_direction": "up", + "measurement_unit": "megabytes_per_second", + }, + }, ], } ) @@ -103,6 +167,9 @@ def tearDown(self): def before(self): pass + def do_task(self): + raise NotImplementedError + def after(self): pass @@ -113,41 +180,69 @@ def percentile(self, percentile): return sorted_results[percentile_index] else: self.fail("Test execution failed") + return None def runTest(self): results = [] start = time.monotonic() - self.max_iterations = NUM_ITERATIONS - for i in range(NUM_ITERATIONS): - if time.monotonic() - start > MAX_ITERATION_TIME: - warnings.warn("Test timed out, completed %s iterations." % i) - break + i = 0 + while True: + i += 1 self.before() with Timer() as timer: - self.do_task() + if self.n_threads == 1: + self.do_task() + else: + threaded(self.n_threads, self.do_task) self.after() results.append(timer.interval) + duration = time.monotonic() - start + if duration > MIN_ITERATION_TIME and i >= NUM_ITERATIONS: + break + if duration > MAX_ITERATION_TIME: + with warnings.catch_warnings(): + warnings.simplefilter("default") + warnings.warn( + f"{self.__class__.__name__} timed out after {MAX_ITERATION_TIME}s, completed {i}/{NUM_ITERATIONS} iterations." + ) + + break self.results = results + def mp_map(self, map_func, files): + with mp.Pool(initializer=proc_init, initargs=(client_context.client_options,)) as pool: + pool.map(map_func, files) + # BSON MICRO-BENCHMARKS -class BsonEncodingTest(PerformanceTest): + + +class MicroTest(PerformanceTest): def setUp(self): + super().setUp() # Location of test data. with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: - self.document = loads(data.read()) + self.file_data = data.read() + + +class BsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + self.data_size = len(encode(self.document)) * NUM_DOCS def do_task(self): for _ in range(NUM_DOCS): encode(self.document) -class BsonDecodingTest(PerformanceTest): +class BsonDecodingTest(MicroTest): def setUp(self): - # Location of test data. - with open(os.path.join(TEST_PATH, os.path.join("extended_bson", self.dataset))) as data: - self.document = encode(json.loads(data.read())) + super().setUp() + self.document = encode(json_util.loads(self.file_data)) + self.data_size = len(self.document) * NUM_DOCS def do_task(self): for _ in range(NUM_DOCS): @@ -156,53 +251,103 @@ def do_task(self): class TestFlatEncoding(BsonEncodingTest, unittest.TestCase): dataset = "flat_bson.json" - data_size = 75310000 class TestFlatDecoding(BsonDecodingTest, unittest.TestCase): dataset = "flat_bson.json" - data_size = 75310000 class TestDeepEncoding(BsonEncodingTest, unittest.TestCase): dataset = "deep_bson.json" - data_size = 19640000 class TestDeepDecoding(BsonDecodingTest, unittest.TestCase): dataset = "deep_bson.json" - data_size = 19640000 class TestFullEncoding(BsonEncodingTest, unittest.TestCase): dataset = "full_bson.json" - data_size = 57340000 class TestFullDecoding(BsonDecodingTest, unittest.TestCase): dataset = "full_bson.json" - data_size = 57340000 + + +# JSON MICRO-BENCHMARKS +class JsonEncodingTest(MicroTest): + def setUp(self): + super().setUp() + # Location of test data. + self.document = json_util.loads(self.file_data) + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(self.document)) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.dumps(self.document) + + +class JsonDecodingTest(MicroTest): + def setUp(self): + super().setUp() + self.document = self.file_data + # Note: use the BSON size as the data size so we can compare BSON vs JSON performance. + self.data_size = len(encode(json_util.loads(self.file_data))) * NUM_DOCS + + def do_task(self): + for _ in range(NUM_DOCS): + json_util.loads(self.document) + + +class TestJsonFlatEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonFlatDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "flat_bson.json" + + +class TestJsonDeepEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonDeepDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "deep_bson.json" + + +class TestJsonFullEncoding(JsonEncodingTest, unittest.TestCase): + dataset = "full_bson.json" + + +class TestJsonFullDecoding(JsonDecodingTest, unittest.TestCase): + dataset = "full_bson.json" # SINGLE-DOC BENCHMARKS class TestRunCommand(PerformanceTest, unittest.TestCase): - data_size = 160000 + data_size = len(encode({"hello": True})) * NUM_DOCS def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") def do_task(self): command = self.client.perftest.command for _ in range(NUM_DOCS): - command("ping") + command("hello", True) + + +class TestRunCommand8Threads(TestRunCommand): + n_threads = 8 class TestDocument(PerformanceTest): def setUp(self): + super().setUp() # Location of test data. with open( - os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)), "r" + os.path.join(TEST_PATH, os.path.join("single_and_multi_document", self.dataset)) ) as data: self.document = json.loads(data.read()) @@ -210,7 +355,7 @@ def setUp(self): self.client.drop_database("perftest") def tearDown(self): - super(TestDocument, self).tearDown() + super().tearDown() self.client.drop_database("perftest") def before(self): @@ -220,54 +365,71 @@ def after(self): self.client.perftest.drop_collection("corpus") -class TestFindOneByID(TestDocument, unittest.TestCase): - data_size = 16220000 +class FindTest(TestDocument): + dataset = "tweet.json" def setUp(self): - self.dataset = "tweet.json" - super(TestFindOneByID, self).setUp() - + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS documents = [self.document.copy() for _ in range(NUM_DOCS)] self.corpus = self.client.perftest.corpus result = self.corpus.insert_many(documents) self.inserted_ids = result.inserted_ids + def before(self): + pass + + def after(self): + pass + + +class TestFindOneByID(FindTest, unittest.TestCase): def do_task(self): find_one = self.corpus.find_one for _id in self.inserted_ids: find_one({"_id": _id}) - def before(self): - pass - def after(self): - pass +class TestFindOneByID8Threads(TestFindOneByID): + n_threads = 8 -class TestSmallDocInsertOne(TestDocument, unittest.TestCase): - data_size = 2750000 +class SmallDocInsertTest(TestDocument): + dataset = "small_doc.json" def setUp(self): - self.dataset = "small_doc.json" - super(TestSmallDocInsertOne, self).setUp() + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS + self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + +class SmallDocMixedTest(TestDocument): + dataset = "small_doc.json" + + def setUp(self): + super().setUp() + self.data_size = len(encode(self.document)) * NUM_DOCS * 2 self.documents = [self.document.copy() for _ in range(NUM_DOCS)] + +class TestSmallDocInsertOne(SmallDocInsertTest, unittest.TestCase): def do_task(self): insert_one = self.corpus.insert_one for doc in self.documents: insert_one(doc) -class TestLargeDocInsertOne(TestDocument, unittest.TestCase): - data_size = 27310890 +class LargeDocInsertTest(TestDocument): + dataset = "large_doc.json" def setUp(self): - self.dataset = "large_doc.json" - super(TestLargeDocInsertOne, self).setUp() + super().setUp() + n_docs = 10 + self.data_size = len(encode(self.document)) * n_docs + self.documents = [self.document.copy() for _ in range(n_docs)] - self.documents = [self.document.copy() for _ in range(10)] +class TestLargeDocInsertOne(LargeDocInsertTest, unittest.TestCase): def do_task(self): insert_one = self.corpus.insert_one for doc in self.documents: @@ -275,61 +437,108 @@ def do_task(self): # MULTI-DOC BENCHMARKS -class TestFindManyAndEmptyCursor(TestDocument, unittest.TestCase): - data_size = 16220000 +class TestFindManyAndEmptyCursor(FindTest, unittest.TestCase): + def do_task(self): + list(self.corpus.find()) - def setUp(self): - self.dataset = "tweet.json" - super(TestFindManyAndEmptyCursor, self).setUp() - for _ in range(10): - self.client.perftest.command("insert", "corpus", documents=[self.document] * 1000) - self.corpus = self.client.perftest.corpus +class TestFindManyAndEmptyCursor8Threads(TestFindManyAndEmptyCursor): + n_threads = 8 + +class TestSmallDocBulkInsert(SmallDocInsertTest, unittest.TestCase): def do_task(self): - list(self.corpus.find()) + self.corpus.insert_many(self.documents, ordered=True) - def before(self): - pass - def after(self): - pass +class TestSmallDocCollectionBulkInsert(SmallDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) -class TestSmallDocBulkInsert(TestDocument, unittest.TestCase): - data_size = 2750000 +class TestSmallDocClientBulkInsert(SmallDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) def setUp(self): - self.dataset = "small_doc.json" - super(TestSmallDocBulkInsert, self).setUp() - self.documents = [self.document.copy() for _ in range(NUM_DOCS)] - - def before(self): - self.corpus = self.client.perftest.create_collection("corpus") + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + @client_context.require_version_min(8, 0, 0, -24) def do_task(self): - self.corpus.insert_many(self.documents, ordered=True) + self.client.bulk_write(self.models, ordered=True) + +class TestSmallDocBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(document=doc)) + self.models.append(ReplaceOne(filter={}, replacement=doc.copy(), upsert=True)) + self.models.append(DeleteOne(filter={})) -class TestLargeDocBulkInsert(TestDocument, unittest.TestCase): - data_size = 27310890 + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + +class TestSmallDocClientBulkMixedOps(SmallDocMixedTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) def setUp(self): - self.dataset = "large_doc.json" - super(TestLargeDocBulkInsert, self).setUp() - self.documents = [self.document.copy() for _ in range(10)] + super().setUp() + self.models: list[Union[InsertOne, ReplaceOne, DeleteOne]] = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + self.models.append( + ReplaceOne( + namespace="perftest.corpus", filter={}, replacement=doc.copy(), upsert=True + ) + ) + self.models.append(DeleteOne(namespace="perftest.corpus", filter={})) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) - def before(self): - self.corpus = self.client.perftest.create_collection("corpus") +class TestLargeDocBulkInsert(LargeDocInsertTest, unittest.TestCase): def do_task(self): self.corpus.insert_many(self.documents, ordered=True) -class TestGridFsUpload(PerformanceTest, unittest.TestCase): - data_size = 52428800 +class TestLargeDocCollectionBulkInsert(LargeDocInsertTest, unittest.TestCase): + def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + def do_task(self): + self.corpus.bulk_write(self.models, ordered=True) + + +class TestLargeDocClientBulkInsert(LargeDocInsertTest, unittest.TestCase): + @client_context.require_version_min(8, 0, 0, -24) def setUp(self): + super().setUp() + self.models = [] + for doc in self.documents: + self.models.append(InsertOne(namespace="perftest.corpus", document=doc)) + + @client_context.require_version_min(8, 0, 0, -24) + def do_task(self): + self.client.bulk_write(self.models, ordered=True) + + +class GridFsTest(PerformanceTest): + def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") @@ -338,68 +547,53 @@ def setUp(self): ) with open(gridfs_path, "rb") as data: self.document = data.read() - + self.data_size = len(self.document) self.bucket = GridFSBucket(self.client.perftest) def tearDown(self): - super(TestGridFsUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") + +class TestGridFsUpload(GridFsTest, unittest.TestCase): def before(self): + # Create the bucket. self.bucket.upload_from_stream("init", b"x") def do_task(self): self.bucket.upload_from_stream("gridfstest", self.document) -class TestGridFsDownload(PerformanceTest, unittest.TestCase): - data_size = 52428800 - +class TestGridFsDownload(GridFsTest, unittest.TestCase): def setUp(self): - self.client = client_context.client - self.client.drop_database("perftest") - - gridfs_path = os.path.join( - TEST_PATH, os.path.join("single_and_multi_document", "gridfs_large.bin") - ) - - self.bucket = GridFSBucket(self.client.perftest) - with open(gridfs_path, "rb") as gfile: - self.uploaded_id = self.bucket.upload_from_stream("gridfstest", gfile) - - def tearDown(self): - super(TestGridFsDownload, self).tearDown() - self.client.drop_database("perftest") + super().setUp() + self.uploaded_id = self.bucket.upload_from_stream("gridfstest", self.document) def do_task(self): self.bucket.open_download_stream(self.uploaded_id).read() -proc_client = None +proc_client: Optional[MongoClient] = None -def proc_init(*dummy): +def proc_init(client_kwargs): global proc_client - proc_client = MongoClient(host, port) + proc_client = MongoClient(**client_kwargs) # PARALLEL BENCHMARKS -def mp_map(map_func, files): - pool = mp.Pool(initializer=proc_init) - pool.map(map_func, files) - pool.close() def insert_json_file(filename): assert proc_client is not None - with open(filename, "r") as data: + with open(filename) as data: coll = proc_client.perftest.corpus coll.insert_many([json.loads(line) for line in data]) def insert_json_file_with_file_id(filename): documents = [] - with open(filename, "r") as data: + with open(filename) as data: for line in data: doc = json.loads(line) doc["file"] = filename @@ -412,13 +606,10 @@ def insert_json_file_with_file_id(filename): def read_json_file(filename): assert proc_client is not None coll = proc_client.perftest.corpus - temp = tempfile.TemporaryFile(mode="w") - try: - temp.writelines( - [json.dumps(doc) + "\n" for doc in coll.find({"file": filename}, {"_id": False})] - ) - finally: - temp.close() + with tempfile.TemporaryFile(mode="w") as temp: + for doc in coll.find({"file": filename}, {"_id": False}): + temp.write(json.dumps(doc)) + temp.write("\n") def insert_gridfs_file(filename): @@ -441,57 +632,58 @@ def read_gridfs_file(filename): class TestJsonMultiImport(PerformanceTest, unittest.TestCase): - data_size = 565000000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") + ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) + self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) + self.corpus = self.client.perftest.corpus def before(self): self.client.perftest.command({"create": "corpus"}) - self.corpus = self.client.perftest.corpus - - ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) - self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] def do_task(self): - mp_map(insert_json_file, self.files) + self.mp_map(insert_json_file, self.files) def after(self): - self.client.perftest.drop_collection("corpus") + self.corpus.drop() def tearDown(self): - super(TestJsonMultiImport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") class TestJsonMultiExport(PerformanceTest, unittest.TestCase): - data_size = 565000000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") self.client.perfest.corpus.create_index("file") ldjson_path = os.path.join(TEST_PATH, os.path.join("parallel", "ldjson_multi")) self.files = [os.path.join(ldjson_path, s) for s in os.listdir(ldjson_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) - mp_map(insert_json_file_with_file_id, self.files) + self.mp_map(insert_json_file_with_file_id, self.files) def do_task(self): - mp_map(read_json_file, self.files) + self.mp_map(read_json_file, self.files) def tearDown(self): - super(TestJsonMultiExport, self).tearDown() + super().tearDown() self.client.drop_database("perftest") class TestGridFsMultiFileUpload(PerformanceTest, unittest.TestCase): - data_size = 262144000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") + gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) + self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] + self.data_size = sum(os.path.getsize(fname) for fname in self.files) def before(self): self.client.perftest.drop_collection("fs.files") @@ -502,17 +694,16 @@ def before(self): self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] def do_task(self): - mp_map(insert_gridfs_file, self.files) + self.mp_map(insert_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileUpload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") class TestGridFsMultiFileDownload(PerformanceTest, unittest.TestCase): - data_size = 262144000 - def setUp(self): + super().setUp() self.client = client_context.client self.client.drop_database("perftest") @@ -520,16 +711,16 @@ def setUp(self): gridfs_path = os.path.join(TEST_PATH, os.path.join("parallel", "gridfs_multi")) self.files = [os.path.join(gridfs_path, s) for s in os.listdir(gridfs_path)] - + self.data_size = sum(os.path.getsize(fname) for fname in self.files) for fname in self.files: with open(fname, "rb") as gfile: bucket.upload_from_stream(fname, gfile) def do_task(self): - mp_map(read_gridfs_file, self.files) + self.mp_map(read_gridfs_file, self.files) def tearDown(self): - super(TestGridFsMultiFileDownload, self).tearDown() + super().tearDown() self.client.drop_database("perftest") diff --git a/test/pymongo_mocks.py b/test/pymongo_mocks.py index 580c5da993..7662dc9682 100644 --- a/test/pymongo_mocks.py +++ b/test/pymongo_mocks.py @@ -13,6 +13,7 @@ # limitations under the License. """Tools for mocking parts of PyMongo to test other parts.""" +from __future__ import annotations import contextlib import weakref @@ -22,9 +23,11 @@ from pymongo import MongoClient, common from pymongo.errors import AutoReconnect, NetworkTimeout from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor -from pymongo.pool import Pool from pymongo.server_description import ServerDescription +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import Pool + +_IS_SYNC = True class MockPool(Pool): @@ -38,23 +41,23 @@ def __init__(self, client, pair, *args, **kwargs): Pool.__init__(self, (client_context.host, client_context.port), *args, **kwargs) @contextlib.contextmanager - def get_socket(self, handler=None): + def checkout(self, handler=None): client = self.client - host_and_port = "%s:%s" % (self.mock_host, self.mock_port) + host_and_port = f"{self.mock_host}:{self.mock_port}" if host_and_port in client.mock_down_hosts: raise AutoReconnect("mock error") assert host_and_port in ( client.mock_standalones + client.mock_members + client.mock_mongoses - ), ("bad host: %s" % host_and_port) + ), "bad host: %s" % host_and_port - with Pool.get_socket(self, handler) as sock_info: - sock_info.mock_host = self.mock_host - sock_info.mock_port = self.mock_port - yield sock_info + with Pool.checkout(self, handler) as conn: + conn.mock_host = self.mock_host + conn.mock_port = self.mock_port + yield conn -class DummyMonitor(object): +class DummyMonitor: def __init__(self, server_description, topology, pool, topology_settings): self._server_description = server_description self.opened = False @@ -75,7 +78,7 @@ def close(self): self.opened = False -class MockMonitor(Monitor): +class SyncMockMonitor(Monitor): def __init__(self, client, server_description, topology, pool, topology_settings): # MockMonitor gets a 'client' arg, regular monitors don't. Weakref it # to avoid cycles. @@ -85,7 +88,7 @@ def __init__(self, client, server_description, topology, pool, topology_settings def _check_once(self): client = self.client address = self._server_description.address - response, rtt = client.mock_hello("%s:%d" % address) + response, rtt = client.mock_hello("%s:%d" % address) # type: ignore[str-format] return ServerDescription(address, Hello(response), rtt) @@ -99,7 +102,7 @@ def __init__( arbiters=None, down_hosts=None, *args, - **kwargs + **kwargs, ): """A MongoClient connected to the default server, with a mock topology. @@ -139,12 +142,31 @@ def __init__( self.mock_rtts = {} kwargs["_pool_class"] = partial(MockPool, self) - kwargs["_monitor_class"] = partial(MockMonitor, self) + kwargs["_monitor_class"] = partial(SyncMockMonitor, self) client_options = client_context.default_client_options.copy() client_options.update(kwargs) - super(MockClient, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) + + @classmethod + def get_mock_client( + cls, + standalones, + members, + mongoses, + hello_hosts=None, + arbiters=None, + down_hosts=None, + *args, + **kwargs, + ): + c = MockClient( + standalones, members, mongoses, hello_hosts, arbiters, down_hosts, *args, **kwargs + ) + + c._connect() + return c def kill_host(self, host): """Host is like 'a:1'.""" diff --git a/test/pytest_conf.py b/test/pytest_conf.py new file mode 100644 index 0000000000..a6e24cd9b1 --- /dev/null +++ b/test/pytest_conf.py @@ -0,0 +1,15 @@ +from __future__ import annotations + + +def pytest_collection_modifyitems(items, config): + # Markers that should overlap with the default markers. + overlap_markers = ["async"] + + for item in items: + if "asynchronous" in item.fspath.dirname: + default_marker = "default_async" + else: + default_marker = "default" + markers = [m for m in item.iter_markers() if m not in overlap_markers] + if not markers: + item.add_marker(default_marker) diff --git a/test/qcheck.py b/test/qcheck.py index 4cce7b5bc8..842580cbff 100644 --- a/test/qcheck.py +++ b/test/qcheck.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import datetime import random @@ -24,6 +25,8 @@ from bson.objectid import ObjectId from bson.son import SON +_IS_SYNC = True + gen_target = 100 reduction_attempts = 10 examples = 5 @@ -116,7 +119,8 @@ def gen_regexp(gen_length): # TODO our patterns only consist of one letter. # this is because of a bug in CPython's regex equality testing, # which I haven't quite tracked down, so I'm just ignoring it... - pattern = lambda: "".join(gen_list(choose_lifted("a"), gen_length)()) + def pattern(): + return "".join(gen_list(choose_lifted("a"), gen_length)()) def gen_flags(): flags = 0 @@ -142,7 +146,6 @@ def gen_dbref(): def gen_mongo_value(depth, ref): - choices = [ gen_unicode(gen_range(0, 50)), gen_printable_string(gen_range(0, 50)), @@ -220,7 +223,10 @@ def reduce(case, predicate, reductions=0): def isnt(predicate): - return lambda x: not predicate(x) + def is_not(x): + return not predicate(x) + + return is_not def check(predicate, generator): @@ -230,9 +236,9 @@ def check(predicate, generator): try: if not predicate(case): reduction = reduce(case, predicate) - counter_examples.append("after %s reductions: %r" % reduction) + counter_examples.append("after {} reductions: {!r}".format(*reduction)) except: - counter_examples.append("%r : %s" % (case, traceback.format_exc())) + counter_examples.append(f"{case!r} : {traceback.format_exc()}") return counter_examples diff --git a/test/read_write_concern/operation/default-write-concern-2.6.json b/test/read_write_concern/operation/default-write-concern-2.6.json index c623298cd7..0d8f9c98a1 100644 --- a/test/read_write_concern/operation/default-write-concern-2.6.json +++ b/test/read_write_concern/operation/default-write-concern-2.6.json @@ -1,19 +1,55 @@ { - "data": [ + "description": "default-write-concern-2.6", + "schemaVersion": "1.0", + "runOnRequirements": [ { - "_id": 1, - "x": 11 + "minServerVersion": "2.6" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } }, { - "_id": 2, - "x": 22 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } } ], - "collection_name": "default_write_concern_coll", - "database_name": "default_write_concern_db", - "runOn": [ + "initialData": [ { - "minServerVersion": "2.6" + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] } ], "tests": [ @@ -22,32 +58,36 @@ "operations": [ { "name": "deleteOne", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": {} }, - "result": { + "expectResult": { "deletedCount": 1 } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "default_write_concern_coll", - "deletes": [ - { - "q": {}, - "limit": 1 + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null + } } - } + ] } ] }, @@ -56,32 +96,36 @@ "operations": [ { "name": "deleteMany", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": {} }, - "result": { + "expectResult": { "deletedCount": 2 } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "default_write_concern_coll", - "deletes": [ - { - "q": {}, - "limit": 0 + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null + } } - } + ] } ] }, @@ -90,30 +134,24 @@ "operations": [ { "name": "bulkWrite", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "ordered": true, "requests": [ { - "name": "deleteMany", - "arguments": { + "deleteMany": { "filter": {} } }, { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 1 } } }, { - "name": "updateOne", - "arguments": { + "updateOne": { "filter": { "_id": 1 }, @@ -125,16 +163,14 @@ } }, { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 2 } } }, { - "name": "replaceOne", - "arguments": { + "replaceOne": { "filter": { "_id": 1 }, @@ -144,16 +180,14 @@ } }, { - "name": "insertOne", - "arguments": { + "insertOne": { "document": { "_id": 3 } } }, { - "name": "updateMany", - "arguments": { + "updateMany": { "filter": { "_id": 1 }, @@ -165,8 +199,7 @@ } }, { - "name": "deleteOne", - "arguments": { + "deleteOne": { "filter": { "_id": 3 } @@ -176,148 +209,185 @@ } } ], - "outcome": { - "collection": { - "name": "default_write_concern_coll", - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [ { - "_id": 1, - "x": 3 + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 0 + } + ], + "writeConcern": { + "$$exists": false + } + } + } }, { - "_id": 2 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "default_write_concern_coll", - "deletes": [ - { - "q": {}, - "limit": 0 + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1 + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "insert": "default_write_concern_coll", - "documents": [ - { - "_id": 1 + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$set": { - "x": 1 + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 2 } + ], + "writeConcern": { + "$$exists": false } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "insert": "default_write_concern_coll", - "documents": [ - { - "_id": 2 + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "x": 2 + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "x": 2 + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": { + "$$exists": false } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "insert": "default_write_concern_coll", - "documents": [ - { - "_id": 3 + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 3 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$set": { - "x": 3 + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 } - }, - "multi": true + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null + } } - } - }, + ] + } + ], + "outcome": [ { - "command_started_event": { - "command": { - "delete": "default_write_concern_coll", - "deletes": [ - { - "q": { - "_id": 3 - }, - "limit": 1 - } - ], - "writeConcern": null + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 3 + }, + { + "_id": 2 } - } + ] } ] }, @@ -326,10 +396,7 @@ "operations": [ { "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "document": { "_id": 3 @@ -338,10 +405,7 @@ }, { "name": "insertMany", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "documents": [ { @@ -354,10 +418,51 @@ } } ], - "outcome": { - "collection": { - "name": "default_write_concern_coll", - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ { "_id": 1, "x": 11 @@ -377,37 +482,6 @@ } ] } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "default_write_concern_coll", - "documents": [ - { - "_id": 3 - } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "insert": "default_write_concern_coll", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "writeConcern": null - } - } - } ] }, { @@ -415,10 +489,7 @@ "operations": [ { "name": "updateOne", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 1 @@ -432,10 +503,7 @@ }, { "name": "updateMany", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 2 @@ -449,10 +517,7 @@ }, { "name": "replaceOne", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 2 @@ -463,80 +528,107 @@ } } ], - "outcome": { - "collection": { - "name": "default_write_concern_coll", - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [ { - "_id": 1, - "x": 1 + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "writeConcern": { + "$$exists": false + } + } + } }, { - "_id": 2, - "x": 3 - } - ] - } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$set": { - "x": 1 + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } } + ], + "writeConcern": { + "$$exists": false } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 2 - }, - "u": { - "$set": { - "x": 2 + } + }, + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": 2 + }, + "u": { + "x": 3 + }, + "upsert": { + "$$unsetOrMatches": false + }, + "multi": { + "$$unsetOrMatches": false + } } - }, - "multi": true + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null + } } - } - }, + ] + } + ], + "outcome": [ { - "command_started_event": { - "command": { - "update": "default_write_concern_coll", - "updates": [ - { - "q": { - "_id": 2 - }, - "u": { - "x": 3 - } - } - ], - "writeConcern": null + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 3 } - } + ] } ] } diff --git a/test/read_write_concern/operation/default-write-concern-3.2.json b/test/read_write_concern/operation/default-write-concern-3.2.json index 04dd231f04..166a184916 100644 --- a/test/read_write_concern/operation/default-write-concern-3.2.json +++ b/test/read_write_concern/operation/default-write-concern-3.2.json @@ -1,19 +1,55 @@ { - "data": [ + "description": "default-write-concern-3.2", + "schemaVersion": "1.0", + "runOnRequirements": [ { - "_id": 1, - "x": 11 + "minServerVersion": "3.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } } ], - "collection_name": "default_write_concern_coll", - "database_name": "default_write_concern_db", - "runOn": [ + "initialData": [ { - "minServerVersion": "3.2" + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] } ], "tests": [ @@ -22,10 +58,7 @@ "operations": [ { "name": "findOneAndUpdate", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 1 @@ -39,10 +72,7 @@ }, { "name": "findOneAndReplace", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 2 @@ -54,10 +84,7 @@ }, { "name": "findOneAndDelete", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "filter": { "_id": 2 @@ -65,59 +92,71 @@ } } ], - "outcome": { - "collection": { - "name": "default_write_concern_coll", - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [ { - "_id": 1, - "x": 1 + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + }, + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 2 + }, + "update": { + "x": 2 + }, + "writeConcern": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": 2 + }, + "remove": true, + "writeConcern": { + "$$exists": false + } + } + } } ] } - }, - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "default_write_concern_coll", - "query": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - }, - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "default_write_concern_coll", - "query": { - "_id": 2 - }, - "update": { - "x": 2 - }, - "writeConcern": null - } - } - }, + ], + "outcome": [ { - "command_started_event": { - "command": { - "findAndModify": "default_write_concern_coll", - "query": { - "_id": 2 - }, - "remove": true, - "writeConcern": null + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 1 } - } + ] } ] } diff --git a/test/read_write_concern/operation/default-write-concern-3.4.json b/test/read_write_concern/operation/default-write-concern-3.4.json index 6519f6f089..e18cdfc0c4 100644 --- a/test/read_write_concern/operation/default-write-concern-3.4.json +++ b/test/read_write_concern/operation/default-write-concern-3.4.json @@ -1,30 +1,68 @@ { - "data": [ + "description": "default-write-concern-3.4", + "schemaVersion": "1.4", + "runOnRequirements": [ { - "_id": 1, - "x": 11 + "minServerVersion": "3.4" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } }, { - "_id": 2, - "x": 22 + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } } ], - "collection_name": "default_write_concern_coll", - "database_name": "default_write_concern_db", - "runOn": [ + "initialData": [ { - "minServerVersion": "3.4" + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] } ], "tests": [ { "description": "Aggregate with $out omits default write concern", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "name": "aggregate", "arguments": { "pipeline": [ @@ -42,37 +80,45 @@ } } ], - "outcome": { - "collection": { - "name": "other_collection_name", - "data": [ + "expectEvents": [ + { + "client": "client0", + "events": [ { - "_id": 2, - "x": 22 + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_collection_name" + } + ], + "writeConcern": { + "$$exists": false + } + } + } } ] } - }, - "expectations": [ + ], + "outcome": [ { - "command_started_event": { - "command": { - "aggregate": "default_write_concern_coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$out": "other_collection_name" - } - ], - "writeConcern": null + "collectionName": "other_collection_name", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 2, + "x": 22 } - } + ] } ] }, @@ -80,39 +126,43 @@ "description": "RunCommand with a write command omits default write concern (runCommand should never inherit write concern)", "operations": [ { - "object": "database", - "databaseOptions": { - "writeConcern": {} - }, + "object": "database0", "name": "runCommand", - "command_name": "delete", "arguments": { "command": { - "delete": "default_write_concern_coll", + "delete": "coll", "deletes": [ { "q": {}, "limit": 1 } ] - } + }, + "commandName": "delete" } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "delete": "default_write_concern_coll", - "deletes": [ - { - "q": {}, - "limit": 1 + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "coll", + "deletes": [ + { + "q": {}, + "limit": 1 + } + ], + "writeConcern": { + "$$exists": false + } } - ], - "writeConcern": null + } } - } + ] } ] }, @@ -120,10 +170,7 @@ "description": "CreateIndex and dropIndex omits default write concern", "operations": [ { - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "name": "createIndex", "arguments": { "keys": { @@ -132,53 +179,61 @@ } }, { - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "name": "dropIndex", "arguments": { "name": "x_1" } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "createIndexes": "default_write_concern_coll", - "indexes": [ - { - "name": "x_1", - "key": { - "x": 1 + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "createIndexes": "coll", + "indexes": [ + { + "name": "x_1", + "key": { + "x": 1 + } + } + ], + "writeConcern": { + "$$exists": false } } - ], - "writeConcern": null - } - } - }, - { - "command_started_event": { - "command": { - "dropIndexes": "default_write_concern_coll", - "index": "x_1", - "writeConcern": null + } + }, + { + "commandStartedEvent": { + "command": { + "dropIndexes": "coll", + "index": "x_1", + "writeConcern": { + "$$exists": false + } + } + } } - } + ] } ] }, { "description": "MapReduce omits default write concern", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "mapReduce", - "object": "collection", - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "arguments": { "map": { "$code": "function inc() { return emit(0, this.x + 1) }" @@ -192,23 +247,30 @@ } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "mapReduce": "default_write_concern_coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - }, - "writeConcern": null + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + }, + "writeConcern": { + "$$exists": false + } + } + } } - } + ] } ] } diff --git a/test/read_write_concern/operation/default-write-concern-4.2.json b/test/read_write_concern/operation/default-write-concern-4.2.json index fef192d1a3..e8bb78d91d 100644 --- a/test/read_write_concern/operation/default-write-concern-4.2.json +++ b/test/read_write_concern/operation/default-write-concern-4.2.json @@ -1,19 +1,55 @@ { - "data": [ + "description": "default-write-concern-4.2", + "schemaVersion": "1.0", + "runOnRequirements": [ { - "_id": 1, - "x": 11 + "minServerVersion": "4.2" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } }, { - "_id": 2, - "x": 22 + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default-write-concern-tests", + "databaseOptions": { + "writeConcern": {} + } + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll", + "collectionOptions": { + "writeConcern": {} + } + } } ], - "collection_name": "default_write_concern_coll", - "database_name": "default_write_concern_db", - "runOn": [ + "initialData": [ { - "minServerVersion": "4.2" + "collectionName": "coll", + "databaseName": "default-write-concern-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] } ], "tests": [ @@ -21,13 +57,7 @@ "description": "Aggregate with $merge omits default write concern", "operations": [ { - "object": "collection", - "databaseOptions": { - "writeConcern": {} - }, - "collectionOptions": { - "writeConcern": {} - }, + "object": "collection0", "name": "aggregate", "arguments": { "pipeline": [ @@ -47,41 +77,49 @@ } } ], - "expectations": [ + "expectEvents": [ { - "command_started_event": { - "command": { - "aggregate": "default_write_concern_coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "other_collection_name" + } } - } - }, - { - "$merge": { - "into": "other_collection_name" + ], + "writeConcern": { + "$$exists": false } } - ], - "writeConcern": null + } } - } + ] } ], - "outcome": { - "collection": { - "name": "other_collection_name", - "data": [ + "outcome": [ + { + "collectionName": "other_collection_name", + "databaseName": "default-write-concern-tests", + "documents": [ { "_id": 2, "x": 22 } ] } - } + ] } ] } diff --git a/test/retryable_reads/legacy/aggregate-merge.json b/test/retryable_reads/legacy/aggregate-merge.json deleted file mode 100644 index b401d741ba..0000000000 --- a/test/retryable_reads/legacy/aggregate-merge.json +++ /dev/null @@ -1,98 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.11" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate with $merge does not retry", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "object": "collection", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$merge": { - "into": "output-collection" - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$merge": { - "into": "output-collection" - } - } - ] - }, - "command_name": "aggregate", - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/aggregate-serverErrors.json b/test/retryable_reads/legacy/aggregate-serverErrors.json deleted file mode 100644 index 1155f808dc..0000000000 --- a/test/retryable_reads/legacy/aggregate-serverErrors.json +++ /dev/null @@ -1,1208 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/aggregate.json b/test/retryable_reads/legacy/aggregate.json deleted file mode 100644 index f23d5c6793..0000000000 --- a/test/retryable_reads/legacy/aggregate.json +++ /dev/null @@ -1,406 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Aggregate succeeds on first attempt", - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "result": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Aggregate with $out does not retry", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$out": "output-collection" - } - ] - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": { - "_id": { - "$gt": 1 - } - } - }, - { - "$sort": { - "x": 1 - } - }, - { - "$out": "output-collection" - } - ] - }, - "command_name": "aggregate", - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json deleted file mode 100644 index 73dbfee916..0000000000 --- a/test/retryable_reads/legacy/changeStreams-client.watch-serverErrors.json +++ /dev/null @@ -1,740 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "client.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-client.watch.json b/test/retryable_reads/legacy/changeStreams-client.watch.json deleted file mode 100644 index 30a53037ad..0000000000 --- a/test/retryable_reads/legacy/changeStreams-client.watch.json +++ /dev/null @@ -1,209 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "client.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - }, - { - "description": "client.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": { - "allChangesForCluster": true - } - } - ] - }, - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json deleted file mode 100644 index 77b3af04f4..0000000000 --- a/test/retryable_reads/legacy/changeStreams-db.coll.watch-serverErrors.json +++ /dev/null @@ -1,690 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "db.coll.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-db.coll.watch.json b/test/retryable_reads/legacy/changeStreams-db.coll.watch.json deleted file mode 100644 index 27f6105a4b..0000000000 --- a/test/retryable_reads/legacy/changeStreams-db.coll.watch.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "db.coll.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.coll.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json deleted file mode 100644 index 7a87534508..0000000000 --- a/test/retryable_reads/legacy/changeStreams-db.watch-serverErrors.json +++ /dev/null @@ -1,690 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "db.watch succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/changeStreams-db.watch.json b/test/retryable_reads/legacy/changeStreams-db.watch.json deleted file mode 100644 index e6b0b9b781..0000000000 --- a/test/retryable_reads/legacy/changeStreams-db.watch.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "db.watch succeeds on first attempt", - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "db.watch fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "watch", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": 1, - "cursor": {}, - "pipeline": [ - { - "$changeStream": {} - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/count-serverErrors.json b/test/retryable_reads/legacy/count-serverErrors.json deleted file mode 100644 index 36a0c17cab..0000000000 --- a/test/retryable_reads/legacy/count-serverErrors.json +++ /dev/null @@ -1,586 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "Count succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/count.json b/test/retryable_reads/legacy/count.json deleted file mode 100644 index 139a545131..0000000000 --- a/test/retryable_reads/legacy/count.json +++ /dev/null @@ -1,179 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "Count succeeds on first attempt", - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Count fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "count", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/countDocuments-serverErrors.json b/test/retryable_reads/legacy/countDocuments-serverErrors.json deleted file mode 100644 index 782ea5e4f1..0000000000 --- a/test/retryable_reads/legacy/countDocuments-serverErrors.json +++ /dev/null @@ -1,911 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "CountDocuments succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/countDocuments.json b/test/retryable_reads/legacy/countDocuments.json deleted file mode 100644 index 57a64e45b7..0000000000 --- a/test/retryable_reads/legacy/countDocuments.json +++ /dev/null @@ -1,257 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "CountDocuments succeeds on first attempt", - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "CountDocuments fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": {} - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "coll", - "pipeline": [ - { - "$match": {} - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ] - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/distinct-serverErrors.json b/test/retryable_reads/legacy/distinct-serverErrors.json deleted file mode 100644 index d7c6018a62..0000000000 --- a/test/retryable_reads/legacy/distinct-serverErrors.json +++ /dev/null @@ -1,838 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Distinct succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/distinct.json b/test/retryable_reads/legacy/distinct.json deleted file mode 100644 index 1fd415da81..0000000000 --- a/test/retryable_reads/legacy/distinct.json +++ /dev/null @@ -1,245 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "tests": [ - { - "description": "Distinct succeeds on first attempt", - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "result": [ - 22, - 33 - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Distinct fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "x", - "filter": { - "_id": { - "$gt": 1 - } - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "coll", - "key": "x", - "query": { - "_id": { - "$gt": 1 - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json b/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json deleted file mode 100644 index 6bb128f5f3..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount-serverErrors.json +++ /dev/null @@ -1,546 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/estimatedDocumentCount.json b/test/retryable_reads/legacy/estimatedDocumentCount.json deleted file mode 100644 index 8dfa15a2cd..0000000000 --- a/test/retryable_reads/legacy/estimatedDocumentCount.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "EstimatedDocumentCount succeeds on first attempt", - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "result": 2 - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "EstimatedDocumentCount fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "count" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "estimatedDocumentCount", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "count": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/find-serverErrors.json b/test/retryable_reads/legacy/find-serverErrors.json deleted file mode 100644 index f6b96c6dcb..0000000000 --- a/test/retryable_reads/legacy/find-serverErrors.json +++ /dev/null @@ -1,962 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "Find succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/find.json b/test/retryable_reads/legacy/find.json deleted file mode 100644 index 00d419c0da..0000000000 --- a/test/retryable_reads/legacy/find.json +++ /dev/null @@ -1,348 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "Find succeeds on first attempt", - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds on second attempt with explicit clientOptions", - "clientOptions": { - "retryReads": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "result": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Find fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "find", - "object": "collection", - "arguments": { - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": {}, - "sort": { - "_id": 1 - }, - "limit": 4 - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/findOne-serverErrors.json b/test/retryable_reads/legacy/findOne-serverErrors.json deleted file mode 100644 index d039ef247e..0000000000 --- a/test/retryable_reads/legacy/findOne-serverErrors.json +++ /dev/null @@ -1,732 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "FindOne succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/findOne.json b/test/retryable_reads/legacy/findOne.json deleted file mode 100644 index b9deb73d2a..0000000000 --- a/test/retryable_reads/legacy/findOne.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - }, - { - "_id": 5, - "x": 55 - } - ], - "tests": [ - { - "description": "FindOne succeeds on first attempt", - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": { - "_id": 1, - "x": 11 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "FindOne fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "findOne", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "coll", - "filter": { - "_id": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/gridfs-download-serverErrors.json b/test/retryable_reads/legacy/gridfs-download-serverErrors.json deleted file mode 100644 index cec3a5016a..0000000000 --- a/test/retryable_reads/legacy/gridfs-download-serverErrors.json +++ /dev/null @@ -1,925 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "Download succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/gridfs-download.json b/test/retryable_reads/legacy/gridfs-download.json deleted file mode 100644 index 4d0d5a17e4..0000000000 --- a/test/retryable_reads/legacy/gridfs-download.json +++ /dev/null @@ -1,270 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "Download succeeds on first attempt", - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "Download fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download", - "object": "gridfsbucket", - "arguments": { - "id": { - "$oid": "000000000000000000000001" - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "_id": { - "$oid": "000000000000000000000001" - } - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json deleted file mode 100644 index a64230d38a..0000000000 --- a/test/retryable_reads/legacy/gridfs-downloadByName-serverErrors.json +++ /dev/null @@ -1,849 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "DownloadByName succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/gridfs-downloadByName.json b/test/retryable_reads/legacy/gridfs-downloadByName.json deleted file mode 100644 index 48f2168cfc..0000000000 --- a/test/retryable_reads/legacy/gridfs-downloadByName.json +++ /dev/null @@ -1,250 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "bucket_name": "fs", - "data": { - "fs.files": [ - { - "_id": { - "$oid": "000000000000000000000001" - }, - "length": 1, - "chunkSize": 4, - "uploadDate": { - "$date": "1970-01-01T00:00:00.000Z" - }, - "filename": "abc", - "metadata": {} - } - ], - "fs.chunks": [ - { - "_id": { - "$oid": "000000000000000000000002" - }, - "files_id": { - "$oid": "000000000000000000000001" - }, - "n": 0, - "data": { - "$binary": { - "base64": "EQ==", - "subType": "00" - } - } - } - ] - }, - "tests": [ - { - "description": "DownloadByName succeeds on first attempt", - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.chunks", - "filter": { - "files_id": { - "$oid": "000000000000000000000001" - } - }, - "sort": { - "n": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "DownloadByName fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "download_by_name", - "object": "gridfsbucket", - "arguments": { - "filename": "abc" - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "fs.files", - "filter": { - "filename": "abc" - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollectionNames-serverErrors.json b/test/retryable_reads/legacy/listCollectionNames-serverErrors.json deleted file mode 100644 index bbdce625ad..0000000000 --- a/test/retryable_reads/legacy/listCollectionNames-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollectionNames.json b/test/retryable_reads/legacy/listCollectionNames.json deleted file mode 100644 index 73d96a3cf7..0000000000 --- a/test/retryable_reads/legacy/listCollectionNames.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionNames succeeds on first attempt", - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionNames", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json b/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json deleted file mode 100644 index ab469dfe30..0000000000 --- a/test/retryable_reads/legacy/listCollectionObjects-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionObjects succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollectionObjects.json b/test/retryable_reads/legacy/listCollectionObjects.json deleted file mode 100644 index 1fb0f18437..0000000000 --- a/test/retryable_reads/legacy/listCollectionObjects.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollectionObjects succeeds on first attempt", - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollectionObjects fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollectionObjects", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollections-serverErrors.json b/test/retryable_reads/legacy/listCollections-serverErrors.json deleted file mode 100644 index def9ac4595..0000000000 --- a/test/retryable_reads/legacy/listCollections-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollections succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listCollections.json b/test/retryable_reads/legacy/listCollections.json deleted file mode 100644 index 2427883621..0000000000 --- a/test/retryable_reads/legacy/listCollections.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListCollections succeeds on first attempt", - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - }, - { - "description": "ListCollections fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listCollections" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listCollections", - "object": "database", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listCollections": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json b/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json deleted file mode 100644 index 1dd8e4415a..0000000000 --- a/test/retryable_reads/legacy/listDatabaseNames-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabaseNames.json b/test/retryable_reads/legacy/listDatabaseNames.json deleted file mode 100644 index b431f57016..0000000000 --- a/test/retryable_reads/legacy/listDatabaseNames.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseNames succeeds on first attempt", - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseNames", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json b/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json deleted file mode 100644 index bc497bb088..0000000000 --- a/test/retryable_reads/legacy/listDatabaseObjects-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseObjects succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabaseObjects.json b/test/retryable_reads/legacy/listDatabaseObjects.json deleted file mode 100644 index 267fe921ca..0000000000 --- a/test/retryable_reads/legacy/listDatabaseObjects.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabaseObjects succeeds on first attempt", - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabaseObjects fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabaseObjects", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabases-serverErrors.json b/test/retryable_reads/legacy/listDatabases-serverErrors.json deleted file mode 100644 index ed7bcbc398..0000000000 --- a/test/retryable_reads/legacy/listDatabases-serverErrors.json +++ /dev/null @@ -1,502 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabases succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listDatabases.json b/test/retryable_reads/legacy/listDatabases.json deleted file mode 100644 index 69ef9788f8..0000000000 --- a/test/retryable_reads/legacy/listDatabases.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListDatabases succeeds on first attempt", - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - }, - { - "description": "ListDatabases fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listDatabases" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listDatabases", - "object": "client", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - }, - { - "command_started_event": { - "command": { - "listDatabases": 1 - } - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listIndexNames-serverErrors.json b/test/retryable_reads/legacy/listIndexNames-serverErrors.json deleted file mode 100644 index 2d3265ec85..0000000000 --- a/test/retryable_reads/legacy/listIndexNames-serverErrors.json +++ /dev/null @@ -1,527 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexNames succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listIndexNames.json b/test/retryable_reads/legacy/listIndexNames.json deleted file mode 100644 index fbdb420f8a..0000000000 --- a/test/retryable_reads/legacy/listIndexNames.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexNames succeeds on first attempt", - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexNames fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexNames", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listIndexes-serverErrors.json b/test/retryable_reads/legacy/listIndexes-serverErrors.json deleted file mode 100644 index 25c5b0e448..0000000000 --- a/test/retryable_reads/legacy/listIndexes-serverErrors.json +++ /dev/null @@ -1,527 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexes succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11600 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 11602 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13435 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 13436 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 189 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 91 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 7 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 6 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 89 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 9001 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails after two NotWritablePrimary errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/listIndexes.json b/test/retryable_reads/legacy/listIndexes.json deleted file mode 100644 index 5cb620ae45..0000000000 --- a/test/retryable_reads/legacy/listIndexes.json +++ /dev/null @@ -1,156 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [], - "tests": [ - { - "description": "ListIndexes succeeds on first attempt", - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes succeeds on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails on first attempt", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "ListIndexes fails on second attempt", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "listIndexes" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "listIndexes", - "object": "collection", - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - }, - { - "command_started_event": { - "command": { - "listIndexes": "coll" - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/legacy/mapReduce.json b/test/retryable_reads/legacy/mapReduce.json deleted file mode 100644 index 9327a23052..0000000000 --- a/test/retryable_reads/legacy/mapReduce.json +++ /dev/null @@ -1,189 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "single", - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ], - "serverless": "forbid" - } - ], - "database_name": "retryable-reads-tests", - "collection_name": "coll", - "data": [ - { - "_id": 1, - "x": 0 - }, - { - "_id": 2, - "x": 1 - }, - { - "_id": 3, - "x": 2 - } - ], - "tests": [ - { - "description": "MapReduce succeeds with retry on", - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "result": [ - { - "_id": 0, - "value": 6 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "MapReduce fails with retry on", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "mapReduce" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - }, - { - "description": "MapReduce fails with retry off", - "clientOptions": { - "retryReads": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "mapReduce" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "mapReduce", - "object": "collection", - "arguments": { - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "error": true - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "mapReduce": "coll", - "map": { - "$code": "function inc() { return emit(0, this.x + 1) }" - }, - "reduce": { - "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" - }, - "out": { - "inline": 1 - } - }, - "database_name": "retryable-reads-tests" - } - } - ] - } - ] -} diff --git a/test/retryable_reads/unified/aggregate-merge.json b/test/retryable_reads/unified/aggregate-merge.json new file mode 100644 index 0000000000..96bbd0fc38 --- /dev/null +++ b/test/retryable_reads/unified/aggregate-merge.json @@ -0,0 +1,143 @@ +{ + "description": "aggregate-merge", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $merge does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$merge": { + "into": "output-collection" + } + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/aggregate-serverErrors.json b/test/retryable_reads/unified/aggregate-serverErrors.json new file mode 100644 index 0000000000..d39835a5d3 --- /dev/null +++ b/test/retryable_reads/unified/aggregate-serverErrors.json @@ -0,0 +1,1430 @@ +{ + "description": "aggregate-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/aggregate.json b/test/retryable_reads/unified/aggregate.json new file mode 100644 index 0000000000..2b504c8d49 --- /dev/null +++ b/test/retryable_reads/unified/aggregate.json @@ -0,0 +1,527 @@ +{ + "description": "aggregate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Aggregate with $out does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$sort": { + "x": 1 + } + }, + { + "$out": "output-collection" + } + ] + }, + "commandName": "aggregate", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json new file mode 100644 index 0000000000..47375974d2 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-client.watch-serverErrors.json @@ -0,0 +1,959 @@ +{ + "description": "changeStreams-client.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-client.watch.json b/test/retryable_reads/unified/changeStreams-client.watch.json new file mode 100644 index 0000000000..95ddaf921d --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-client.watch.json @@ -0,0 +1,294 @@ +{ + "description": "changeStreams-client.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "tests": [ + { + "description": "client.watch succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "client.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": { + "allChangesForCluster": true + } + } + ] + }, + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json new file mode 100644 index 0000000000..589d0a3c37 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.coll.watch-serverErrors.json @@ -0,0 +1,944 @@ +{ + "description": "changeStreams-db.coll.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.coll.watch.json b/test/retryable_reads/unified/changeStreams-db.coll.watch.json new file mode 100644 index 0000000000..bbea2ffe4f --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.coll.watch.json @@ -0,0 +1,314 @@ +{ + "description": "changeStreams-db.coll.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.coll.watch succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.coll.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json b/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json new file mode 100644 index 0000000000..6c12d7ddd8 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.watch-serverErrors.json @@ -0,0 +1,930 @@ +{ + "description": "changeStreams-db.watch-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/changeStreams-db.watch.json b/test/retryable_reads/unified/changeStreams-db.watch.json new file mode 100644 index 0000000000..1b6d911c76 --- /dev/null +++ b/test/retryable_reads/unified/changeStreams-db.watch.json @@ -0,0 +1,303 @@ +{ + "description": "changeStreams-db.watch", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "db.watch succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "db.watch fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "createChangeStream", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": 1, + "cursor": {}, + "pipeline": [ + { + "$changeStream": {} + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/count-serverErrors.json b/test/retryable_reads/unified/count-serverErrors.json new file mode 100644 index 0000000000..c52edfdb98 --- /dev/null +++ b/test/retryable_reads/unified/count-serverErrors.json @@ -0,0 +1,808 @@ +{ + "description": "count-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/count.json b/test/retryable_reads/unified/count.json new file mode 100644 index 0000000000..d5c9a343a9 --- /dev/null +++ b/test/retryable_reads/unified/count.json @@ -0,0 +1,286 @@ +{ + "description": "count", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "Count succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Count fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/countDocuments-serverErrors.json b/test/retryable_reads/unified/countDocuments-serverErrors.json new file mode 100644 index 0000000000..fd028b114c --- /dev/null +++ b/test/retryable_reads/unified/countDocuments-serverErrors.json @@ -0,0 +1,1133 @@ +{ + "description": "countDocuments-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/countDocuments.json b/test/retryable_reads/unified/countDocuments.json new file mode 100644 index 0000000000..e06e89c1ad --- /dev/null +++ b/test/retryable_reads/unified/countDocuments.json @@ -0,0 +1,364 @@ +{ + "description": "countDocuments", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "CountDocuments succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "CountDocuments fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": {} + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$match": {} + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ] + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/distinct-serverErrors.json b/test/retryable_reads/unified/distinct-serverErrors.json new file mode 100644 index 0000000000..79d2d5fc31 --- /dev/null +++ b/test/retryable_reads/unified/distinct-serverErrors.json @@ -0,0 +1,1060 @@ +{ + "description": "distinct-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/distinct.json b/test/retryable_reads/unified/distinct.json new file mode 100644 index 0000000000..81f1f66e91 --- /dev/null +++ b/test/retryable_reads/unified/distinct.json @@ -0,0 +1,352 @@ +{ + "description": "distinct", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Distinct succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectResult": [ + 22, + 33 + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Distinct fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "x", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "coll", + "key": "x", + "query": { + "_id": { + "$gt": 1 + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json b/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json new file mode 100644 index 0000000000..ba983c6cdf --- /dev/null +++ b/test/retryable_reads/unified/estimatedDocumentCount-serverErrors.json @@ -0,0 +1,768 @@ +{ + "description": "estimatedDocumentCount-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/estimatedDocumentCount.json b/test/retryable_reads/unified/estimatedDocumentCount.json new file mode 100644 index 0000000000..2ee29f6799 --- /dev/null +++ b/test/retryable_reads/unified/estimatedDocumentCount.json @@ -0,0 +1,273 @@ +{ + "description": "estimatedDocumentCount", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "EstimatedDocumentCount succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectResult": 2 + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "estimatedDocumentCount", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "EstimatedDocumentCount fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "estimatedDocumentCount", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "count": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/exceededTimeLimit.json b/test/retryable_reads/unified/exceededTimeLimit.json new file mode 100644 index 0000000000..8d090bbe3f --- /dev/null +++ b/test/retryable_reads/unified/exceededTimeLimit.json @@ -0,0 +1,147 @@ +{ + "description": "ExceededTimeLimit is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "exceededtimelimit-test" + } + } + ], + "initialData": [ + { + "collectionName": "exceededtimelimit-test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 262 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "exceededtimelimit-test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/find-serverErrors.json b/test/retryable_reads/unified/find-serverErrors.json new file mode 100644 index 0000000000..ab3dbe45f4 --- /dev/null +++ b/test/retryable_reads/unified/find-serverErrors.json @@ -0,0 +1,1184 @@ +{ + "description": "find-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/find.json b/test/retryable_reads/unified/find.json new file mode 100644 index 0000000000..30c4c5e478 --- /dev/null +++ b/test/retryable_reads/unified/find.json @@ -0,0 +1,498 @@ +{ + "description": "find", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt with explicit clientOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Find fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4 + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/findOne-serverErrors.json b/test/retryable_reads/unified/findOne-serverErrors.json new file mode 100644 index 0000000000..7adda1e32b --- /dev/null +++ b/test/retryable_reads/unified/findOne-serverErrors.json @@ -0,0 +1,954 @@ +{ + "description": "findOne-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/findOne.json b/test/retryable_reads/unified/findOne.json new file mode 100644 index 0000000000..4314a19e46 --- /dev/null +++ b/test/retryable_reads/unified/findOne.json @@ -0,0 +1,330 @@ +{ + "description": "findOne", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "FindOne succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "FindOne fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": { + "_id": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-download-serverErrors.json b/test/retryable_reads/unified/gridfs-download-serverErrors.json new file mode 100644 index 0000000000..5bb7eee0b2 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-download-serverErrors.json @@ -0,0 +1,1092 @@ +{ + "description": "gridfs-download-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-download.json b/test/retryable_reads/unified/gridfs-download.json new file mode 100644 index 0000000000..69fe8ff7c8 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-download.json @@ -0,0 +1,367 @@ +{ + "description": "gridfs-download", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "Download succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "Download fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "download", + "arguments": { + "id": { + "$oid": "000000000000000000000001" + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "_id": { + "$oid": "000000000000000000000001" + } + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json b/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json new file mode 100644 index 0000000000..35f7e1e563 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-downloadByName-serverErrors.json @@ -0,0 +1,1016 @@ +{ + "description": "gridfs-downloadByName-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/gridfs-downloadByName.json b/test/retryable_reads/unified/gridfs-downloadByName.json new file mode 100644 index 0000000000..c3fa873396 --- /dev/null +++ b/test/retryable_reads/unified/gridfs-downloadByName.json @@ -0,0 +1,347 @@ +{ + "description": "gridfs-downloadByName", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket0", + "database": "database0" + } + } + ], + "initialData": [ + { + "collectionName": "fs.files", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000001" + }, + "length": 1, + "chunkSize": 4, + "uploadDate": { + "$date": "1970-01-01T00:00:00.000Z" + }, + "filename": "abc", + "metadata": {} + } + ] + }, + { + "collectionName": "fs.chunks", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": { + "$oid": "000000000000000000000002" + }, + "files_id": { + "$oid": "000000000000000000000001" + }, + "n": 0, + "data": { + "$binary": { + "base64": "EQ==", + "subType": "00" + } + } + } + ] + } + ], + "tests": [ + { + "description": "DownloadByName succeeds on first attempt", + "operations": [ + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectResult": { + "$$matchesHexBytes": "11" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.chunks" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "bucket": { + "id": "bucket1", + "database": "database1" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket1", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "DownloadByName fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "object": "bucket0", + "name": "downloadByName", + "arguments": { + "filename": "abc" + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "fs.files", + "filter": { + "filename": "abc" + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/handshakeError.json b/test/retryable_reads/unified/handshakeError.json index 2cf1d173f8..2921d8a954 100644 --- a/test/retryable_reads/unified/handshakeError.json +++ b/test/retryable_reads/unified/handshakeError.json @@ -1,6 +1,6 @@ { "description": "retryable reads handshake failures", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-reads-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-reads-handshake-tests", "documents": [ { "_id": 1, @@ -60,13 +62,13 @@ ], "tests": [ { - "description": "find succeeds after retryable handshake network error", + "description": "client.listDatabases succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -74,8 +76,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -84,7 +86,2184 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabases succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.listDatabaseNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases" + } + }, + { + "commandSucceededEvent": { + "commandName": "listDatabases" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollections succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.listCollectionNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections" + } + }, + { + "commandSucceededEvent": { + "commandName": "listCollections" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.aggregate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.countDocuments succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.estimatedDocumentCount succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "count" + } + }, + { + "commandSucceededEvent": { + "commandName": "count" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.distinct succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "distinct" + } + }, + { + "commandSucceededEvent": { + "commandName": "distinct" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.find succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -97,23 +2276,471 @@ }, { "name": "find", - "object": "collection0", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.findOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "find" + } + }, + { + "commandSucceededEvent": { + "commandName": "find" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" + } + } + ] + } + ] + }, + { + "description": "collection.listIndexes succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", "arguments": { - "filter": { - "_id": 2 + "commandName": "ping", + "command": { + "ping": 1 } }, - "expectResult": [ + "expectError": { + "isError": true + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, { - "_id": 2, - "x": 22 + "commandSucceededEvent": { + "commandName": "listIndexes" + } } ] } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -131,25 +2758,119 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" } + } + ] + } + ] + }, + { + "description": "collection.listIndexNames succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "find": "coll", - "filter": { - "_id": 2 - } + "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes" + } + }, + { + "commandSucceededEvent": { + "commandName": "listIndexes" } } ] @@ -157,13 +2878,18 @@ ] }, { - "description": "find succeeds after retryable handshake network error (ShutdownInProgress)", + "description": "collection.createChangeStream succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -171,17 +2897,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -193,24 +2919,116 @@ } }, { - "name": "find", - "object": "collection0", + "name": "createChangeStream", + "object": "collection", "arguments": { - "filter": { - "_id": 2 - } + "pipeline": [] }, - "expectResult": [ + "saveResultAsEntity": "changeStream" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-reads-handshake-tests" + } + }, { - "_id": 2, - "x": 22 + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" + } } ] } + ] + }, + { + "description": "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "saveResultAsEntity": "changeStream" + } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -228,25 +3046,29 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-reads-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" } }, { "commandStartedEvent": { - "command": { - "find": "coll", - "filter": { - "_id": 2 - } - }, - "databaseName": "retryable-handshake-tests" + "commandName": "aggregate" + } + }, + { + "commandSucceededEvent": { + "commandName": "aggregate" } } ] diff --git a/test/retryable_reads/unified/listCollectionNames-serverErrors.json b/test/retryable_reads/unified/listCollectionNames-serverErrors.json new file mode 100644 index 0000000000..162dd4cee0 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionNames-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionNames.json b/test/retryable_reads/unified/listCollectionNames.json new file mode 100644 index 0000000000..0fe575f7a6 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionNames.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionNames succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionObjects-serverErrors.json b/test/retryable_reads/unified/listCollectionObjects-serverErrors.json new file mode 100644 index 0000000000..8b9d582c10 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionObjects-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollectionObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollectionObjects.json b/test/retryable_reads/unified/listCollectionObjects.json new file mode 100644 index 0000000000..9cdbb69276 --- /dev/null +++ b/test/retryable_reads/unified/listCollectionObjects.json @@ -0,0 +1,243 @@ +{ + "description": "listCollectionObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollectionObjects succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollectionObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollectionObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollections-serverErrors.json b/test/retryable_reads/unified/listCollections-serverErrors.json new file mode 100644 index 0000000000..171fe7457f --- /dev/null +++ b/test/retryable_reads/unified/listCollections-serverErrors.json @@ -0,0 +1,710 @@ +{ + "description": "listCollections-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listCollections.json b/test/retryable_reads/unified/listCollections.json new file mode 100644 index 0000000000..b6152f9ce5 --- /dev/null +++ b/test/retryable_reads/unified/listCollections.json @@ -0,0 +1,243 @@ +{ + "description": "listCollections", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListCollections succeeds on first attempt", + "operations": [ + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database1", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListCollections fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "closeConnection": true + } + } + } + }, + { + "object": "database0", + "name": "listCollections", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseNames-serverErrors.json b/test/retryable_reads/unified/listDatabaseNames-serverErrors.json new file mode 100644 index 0000000000..489ff0ad51 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseNames-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseNames.json b/test/retryable_reads/unified/listDatabaseNames.json new file mode 100644 index 0000000000..5590f39a51 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseNames.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseNames succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json b/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json new file mode 100644 index 0000000000..56f9f36236 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseObjects-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabaseObjects-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabaseObjects.json b/test/retryable_reads/unified/listDatabaseObjects.json new file mode 100644 index 0000000000..46b1511d46 --- /dev/null +++ b/test/retryable_reads/unified/listDatabaseObjects.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabaseObjects", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabaseObjects succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabaseObjects fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabaseObjects", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabases-serverErrors.json b/test/retryable_reads/unified/listDatabases-serverErrors.json new file mode 100644 index 0000000000..09b935a59f --- /dev/null +++ b/test/retryable_reads/unified/listDatabases-serverErrors.json @@ -0,0 +1,696 @@ +{ + "description": "listDatabases-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listDatabases.json b/test/retryable_reads/unified/listDatabases.json new file mode 100644 index 0000000000..4cf5eccc7b --- /dev/null +++ b/test/retryable_reads/unified/listDatabases.json @@ -0,0 +1,229 @@ +{ + "description": "listDatabases", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListDatabases succeeds on first attempt", + "operations": [ + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client1", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "ListDatabases fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "listDatabases", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexNames-serverErrors.json b/test/retryable_reads/unified/listIndexNames-serverErrors.json new file mode 100644 index 0000000000..7b98111480 --- /dev/null +++ b/test/retryable_reads/unified/listIndexNames-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexNames-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexNames.json b/test/retryable_reads/unified/listIndexNames.json new file mode 100644 index 0000000000..c5fe967ff5 --- /dev/null +++ b/test/retryable_reads/unified/listIndexNames.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexNames", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexNames succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexNames fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexNames", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexes-serverErrors.json b/test/retryable_reads/unified/listIndexes-serverErrors.json new file mode 100644 index 0000000000..0110a0acd0 --- /dev/null +++ b/test/retryable_reads/unified/listIndexes-serverErrors.json @@ -0,0 +1,749 @@ +{ + "description": "listIndexes-serverErrors", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11600 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 11602 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13435 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 13436 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 189 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 91 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 6 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 89 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 9001 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after two NotWritablePrimary errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails after NotWritablePrimary when retryReads is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/listIndexes.json b/test/retryable_reads/unified/listIndexes.json new file mode 100644 index 0000000000..2560e4961c --- /dev/null +++ b/test/retryable_reads/unified/listIndexes.json @@ -0,0 +1,263 @@ +{ + "description": "listIndexes", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "ListIndexes succeeds on first attempt", + "operations": [ + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes succeeds on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on first attempt", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "ListIndexes fails on second attempt", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "listIndexes", + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "listIndexes": "coll" + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/mapReduce.json b/test/retryable_reads/unified/mapReduce.json new file mode 100644 index 0000000000..745c0ef001 --- /dev/null +++ b/test/retryable_reads/unified/mapReduce.json @@ -0,0 +1,284 @@ +{ + "description": "mapReduce", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 0 + }, + { + "_id": 2, + "x": 1 + }, + { + "_id": 3, + "x": 2 + } + ] + } + ], + "tests": [ + { + "description": "MapReduce succeeds with retry on", + "operations": [ + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectResult": [ + { + "_id": 0, + "value": 6 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry on", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + }, + { + "description": "MapReduce fails with retry off", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryReads": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "mapReduce" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "mapReduce", + "arguments": { + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "mapReduce": "coll", + "map": { + "$code": "function inc() { return emit(0, this.x + 1) }" + }, + "reduce": { + "$code": "function sum(key, values) { return values.reduce((acc, x) => acc + x); }" + }, + "out": { + "inline": 1 + } + }, + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json new file mode 100644 index 0000000000..8aa6a6b5e5 --- /dev/null +++ b/test/retryable_reads/unified/readConcernMajorityNotAvailableYet.json @@ -0,0 +1,147 @@ +{ + "description": "ReadConcernMajorityNotAvailableYet is a retryable read", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-reads-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "readconcernmajoritynotavailableyet_test" + } + } + ], + "initialData": [ + { + "collectionName": "readconcernmajoritynotavailableyet_test", + "databaseName": "retryable-reads-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Find succeeds on second attempt after ReadConcernMajorityNotAvailableYet", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 134 + } + } + } + }, + { + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "object": "collection0", + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "readconcernmajoritynotavailableyet_test", + "filter": { + "_id": { + "$gt": 1 + } + } + }, + "commandName": "find", + "databaseName": "retryable-reads-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/legacy/bulkWrite-errorLabels.json b/test/retryable_writes/legacy/bulkWrite-errorLabels.json deleted file mode 100644 index 66c3ecb336..0000000000 --- a/test/retryable_writes/legacy/bulkWrite-errorLabels.json +++ /dev/null @@ -1,183 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "BulkWrite succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/bulkWrite-serverErrors.json b/test/retryable_writes/legacy/bulkWrite-serverErrors.json deleted file mode 100644 index 1e6cc74c05..0000000000 --- a/test/retryable_writes/legacy/bulkWrite-serverErrors.json +++ /dev/null @@ -1,273 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "BulkWrite succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 3 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/bulkWrite.json b/test/retryable_writes/legacy/bulkWrite.json deleted file mode 100644 index 72a8d01893..0000000000 --- a/test/retryable_writes/legacy/bulkWrite.json +++ /dev/null @@ -1,806 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "First command is retried", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "0": 2 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - } - ] - } - } - }, - { - "description": "All commands are retried", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 7 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 4, - "x": 44 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 5, - "x": 55 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 3 - }, - "replacement": { - "_id": 3, - "x": 333 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 3, - "insertedIds": { - "0": 2, - "2": 3, - "4": 5 - }, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 1, - "upsertedIds": { - "3": 4 - } - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 23 - }, - { - "_id": 3, - "x": 333 - }, - { - "_id": 4, - "x": 45 - }, - { - "_id": 5, - "x": 55 - } - ] - } - } - }, - { - "description": "Both commands are retried after their first statement fails", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 2 - }, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 23 - } - ] - } - } - }, - { - "description": "Second command is retried after its second statement fails", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "skip": 2 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 2 - }, - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 23 - } - ] - } - } - }, - { - "description": "BulkWrite with unordered execution", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 2, - "insertedIds": { - "0": 2, - "1": 3 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "First insertOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 0, - "insertedIds": {}, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "Second updateOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "skip": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 2 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Third updateOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "skip": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "1": 2 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Single-document write following deleteMany is retried", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "deleteMany", - "arguments": { - "filter": { - "x": 11 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 1, - "insertedCount": 1, - "insertedIds": { - "1": 2 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "Single-document write following updateMany is retried", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "bulkWrite", - "arguments": { - "requests": [ - { - "name": "updateMany", - "arguments": { - "filter": { - "x": 11 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 2, - "x": 22 - } - } - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "1": 2 - }, - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0, - "upsertedIds": {} - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/deleteMany.json b/test/retryable_writes/legacy/deleteMany.json deleted file mode 100644 index faa21c44f1..0000000000 --- a/test/retryable_writes/legacy/deleteMany.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "DeleteMany ignores retryWrites", - "useMultipleMongoses": true, - "operation": { - "name": "deleteMany", - "arguments": { - "filter": {} - } - }, - "outcome": { - "result": { - "deletedCount": 2 - }, - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/deleteOne-errorLabels.json b/test/retryable_writes/legacy/deleteOne-errorLabels.json deleted file mode 100644 index c14692fd1a..0000000000 --- a/test/retryable_writes/legacy/deleteOne-errorLabels.json +++ /dev/null @@ -1,107 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "DeleteOne succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/deleteOne-serverErrors.json b/test/retryable_writes/legacy/deleteOne-serverErrors.json deleted file mode 100644 index a1a27838de..0000000000 --- a/test/retryable_writes/legacy/deleteOne-serverErrors.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "DeleteOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne fails with RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "delete" - ], - "closeConnection": true - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/deleteOne.json b/test/retryable_writes/legacy/deleteOne.json deleted file mode 100644 index 592937aced..0000000000 --- a/test/retryable_writes/legacy/deleteOne.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "DeleteOne is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "result": { - "deletedCount": 1 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "DeleteOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json b/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json deleted file mode 100644 index 60e6e0a7bc..0000000000 --- a/test/retryable_writes/legacy/findOneAndDelete-errorLabels.json +++ /dev/null @@ -1,118 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndDelete succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json b/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json deleted file mode 100644 index c18b63f456..0000000000 --- a/test/retryable_writes/legacy/findOneAndDelete-serverErrors.json +++ /dev/null @@ -1,170 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndDelete succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndDelete.json b/test/retryable_writes/legacy/findOneAndDelete.json deleted file mode 100644 index 0cbe18108b..0000000000 --- a/test/retryable_writes/legacy/findOneAndDelete.json +++ /dev/null @@ -1,137 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndDelete is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndDelete is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndDelete", - "arguments": { - "filter": { - "x": { - "$gte": 11 - } - }, - "sort": { - "x": 1 - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json b/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json deleted file mode 100644 index afa2f47af4..0000000000 --- a/test/retryable_writes/legacy/findOneAndReplace-errorLabels.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndReplace succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json b/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json deleted file mode 100644 index 944a3af848..0000000000 --- a/test/retryable_writes/legacy/findOneAndReplace-serverErrors.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndReplace succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndReplace.json b/test/retryable_writes/legacy/findOneAndReplace.json deleted file mode 100644 index e1f9ab7f8c..0000000000 --- a/test/retryable_writes/legacy/findOneAndReplace.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndReplace is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndReplace is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndReplace", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - }, - "returnDocument": "Before" - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json b/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json deleted file mode 100644 index 19b3a9e771..0000000000 --- a/test/retryable_writes/legacy/findOneAndUpdate-errorLabels.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json b/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json deleted file mode 100644 index e83a610615..0000000000 --- a/test/retryable_writes/legacy/findOneAndUpdate-serverErrors.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/findOneAndUpdate.json b/test/retryable_writes/legacy/findOneAndUpdate.json deleted file mode 100644 index 9ae2d87d82..0000000000 --- a/test/retryable_writes/legacy/findOneAndUpdate.json +++ /dev/null @@ -1,147 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "FindOneAndUpdate is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - } - }, - "outcome": { - "result": { - "_id": 1, - "x": 11 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "FindOneAndUpdate is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "findOneAndUpdate", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertMany-errorLabels.json b/test/retryable_writes/legacy/insertMany-errorLabels.json deleted file mode 100644 index 65fd377fa6..0000000000 --- a/test/retryable_writes/legacy/insertMany-errorLabels.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "InsertMany succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertMany-serverErrors.json b/test/retryable_writes/legacy/insertMany-serverErrors.json deleted file mode 100644 index fe8dbf4a62..0000000000 --- a/test/retryable_writes/legacy/insertMany-serverErrors.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "InsertMany succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertMany.json b/test/retryable_writes/legacy/insertMany.json deleted file mode 100644 index 0ad326e2dc..0000000000 --- a/test/retryable_writes/legacy/insertMany.json +++ /dev/null @@ -1,163 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - } - ], - "tests": [ - { - "description": "InsertMany succeeds after one network error", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany with unordered execution", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ], - "options": { - "ordered": false - } - } - }, - "outcome": { - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertMany fails after multiple network errors", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": "alwaysOn", - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "insertMany", - "arguments": { - "documents": [ - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - }, - { - "_id": 4, - "x": 44 - } - ], - "options": { - "ordered": true - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertOne-errorLabels.json b/test/retryable_writes/legacy/insertOne-errorLabels.json deleted file mode 100644 index d90ac5dfbd..0000000000 --- a/test/retryable_writes/legacy/insertOne-errorLabels.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [], - "tests": [ - { - "description": "InsertOne succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "outcome": { - "result": { - "insertedId": 1 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - } - ] - } - } - }, - { - "description": "InsertOne fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1, - "x": 11 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertOne-serverErrors.json b/test/retryable_writes/legacy/insertOne-serverErrors.json deleted file mode 100644 index 5179a6ab75..0000000000 --- a/test/retryable_writes/legacy/insertOne-serverErrors.json +++ /dev/null @@ -1,1162 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "InsertOne succeeds after connection failure", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after connection failure when retryWrites option is false", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after ExceededTimeLimit", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 262, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601, - "closeConnection": false - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after multiple retryable writeConcernErrors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after WriteConcernError Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "writeConcernError": { - "code": 11601, - "errmsg": "operation was interrupted" - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails after WriteConcernError WriteConcernFailed", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/insertOne.json b/test/retryable_writes/legacy/insertOne.json deleted file mode 100644 index 04dee6dd68..0000000000 --- a/test/retryable_writes/legacy/insertOne.json +++ /dev/null @@ -1,139 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "InsertOne is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "result": { - "insertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 33 - } - ] - } - } - }, - { - "description": "InsertOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3, - "x": 33 - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/replaceOne-errorLabels.json b/test/retryable_writes/legacy/replaceOne-errorLabels.json deleted file mode 100644 index 6029b875dc..0000000000 --- a/test/retryable_writes/legacy/replaceOne-errorLabels.json +++ /dev/null @@ -1,121 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "ReplaceOne succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/replaceOne-serverErrors.json b/test/retryable_writes/legacy/replaceOne-serverErrors.json deleted file mode 100644 index 6b35722e12..0000000000 --- a/test/retryable_writes/legacy/replaceOne-serverErrors.json +++ /dev/null @@ -1,177 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "ReplaceOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/replaceOne.json b/test/retryable_writes/legacy/replaceOne.json deleted file mode 100644 index e5b8cf8eab..0000000000 --- a/test/retryable_writes/legacy/replaceOne.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "ReplaceOne is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 111 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "ReplaceOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "_id": 1, - "x": 111 - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/updateMany.json b/test/retryable_writes/legacy/updateMany.json deleted file mode 100644 index 46fef73e74..0000000000 --- a/test/retryable_writes/legacy/updateMany.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "UpdateMany ignores retryWrites", - "useMultipleMongoses": true, - "operation": { - "name": "updateMany", - "arguments": { - "filter": {}, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 23 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/updateOne-errorLabels.json b/test/retryable_writes/legacy/updateOne-errorLabels.json deleted file mode 100644 index 5bd00cde90..0000000000 --- a/test/retryable_writes/legacy/updateOne-errorLabels.json +++ /dev/null @@ -1,123 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "UpdateOne succeeds with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne fails if server does not return RetryableWriteError", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/updateOne-serverErrors.json b/test/retryable_writes/legacy/updateOne-serverErrors.json deleted file mode 100644 index cf274f57e0..0000000000 --- a/test/retryable_writes/legacy/updateOne-serverErrors.json +++ /dev/null @@ -1,180 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topology": [ - "sharded", - "load-balanced" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "UpdateOne succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "error": true, - "result": { - "errorLabelsContain": [ - "RetryableWriteError" - ] - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/legacy/updateOne.json b/test/retryable_writes/legacy/updateOne.json deleted file mode 100644 index 0f806dc3d8..0000000000 --- a/test/retryable_writes/legacy/updateOne.json +++ /dev/null @@ -1,288 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "3.6", - "topology": [ - "replicaset" - ] - } - ], - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ], - "tests": [ - { - "description": "UpdateOne is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 12 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - }, - { - "description": "UpdateOne with upsert is committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3, - "x": 33 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 34 - } - ] - } - } - }, - { - "description": "UpdateOne with upsert is not committed on first attempt", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 1 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3, - "x": 33 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "outcome": { - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 3 - }, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - }, - { - "_id": 3, - "x": 34 - } - ] - } - } - }, - { - "description": "UpdateOne with upsert is never committed", - "failPoint": { - "configureFailPoint": "onPrimaryTransactionalWrite", - "mode": { - "times": 2 - }, - "data": { - "failBeforeCommitExceptionCode": 1 - } - }, - "operation": { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 3, - "x": 33 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - }, - "outcome": { - "error": true, - "collection": { - "data": [ - { - "_id": 1, - "x": 11 - }, - { - "_id": 2, - "x": 22 - } - ] - } - } - } - ] -} diff --git a/test/retryable_writes/unified/aggregate-out-merge.json b/test/retryable_writes/unified/aggregate-out-merge.json new file mode 100644 index 0000000000..fd25c345ac --- /dev/null +++ b/test/retryable_writes/unified/aggregate-out-merge.json @@ -0,0 +1,149 @@ +{ + "description": "aggregate with $out/$merge does not set txnNumber", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "mergeCollection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "aggregate with $out does not set txnNumber", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "outCollection" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "aggregate with $merge does not set txnNumber", + "runOnRequirements": [ + { + "minServerVersion": "4.1.11" + } + ], + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$merge": { + "into": "mergeCollection" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite-errorLabels.json b/test/retryable_writes/unified/bulkWrite-errorLabels.json new file mode 100644 index 0000000000..13ba9bae75 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite-errorLabels.json @@ -0,0 +1,416 @@ +{ + "description": "bulkWrite-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "BulkWrite succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "BulkWrite succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 3 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/bulkWrite-serverErrors.json b/test/retryable_writes/unified/bulkWrite-serverErrors.json index 23cf2869a6..0a063ab4d9 100644 --- a/test/retryable_writes/unified/bulkWrite-serverErrors.json +++ b/test/retryable_writes/unified/bulkWrite-serverErrors.json @@ -1,12 +1,19 @@ { "description": "retryable-writes bulkWrite serverErrors", - "schemaVersion": "1.0", + "schemaVersion": "1.3", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] } ], "createEntities": [ @@ -55,16 +62,7 @@ "description": "BulkWrite succeeds after retryable writeConcernError in first batch", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ @@ -200,6 +198,88 @@ ] } ] + }, + { + "description": "BulkWrite fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] } ] } diff --git a/test/retryable_writes/unified/bulkWrite.json b/test/retryable_writes/unified/bulkWrite.json new file mode 100644 index 0000000000..f2bd9e0eb8 --- /dev/null +++ b/test/retryable_writes/unified/bulkWrite.json @@ -0,0 +1,1083 @@ +{ + "description": "bulkWrite", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "First command is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "All commands are retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 7 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 4, + "x": 44 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + } + }, + { + "insertOne": { + "document": { + "_id": 5, + "x": 55 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 3 + }, + "replacement": { + "_id": 3, + "x": 333 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 3, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "2": 3, + "4": 5 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 1, + "upsertedIds": { + "3": 4 + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 23 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 45 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ] + }, + { + "description": "Both commands are retried after their first statement fails", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "Second command is retried after its second statement fails", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 2 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ] + }, + { + "description": "BulkWrite with unordered execution", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "ordered": false + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 2, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "First insertOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 0, + "insertedIds": { + "$$unsetOrMatches": {} + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "Second updateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 2 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Third updateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "skip": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Single-document write following deleteMany is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": { + "x": 11 + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 1, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "Single-document write following updateMany is retried", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": { + "x": 11 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "1": 2 + } + }, + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0, + "upsertedIds": {} + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "collection bulkWrite with updateMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "updateMany": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "collection bulkWrite with deleteMany does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "deleteMany": { + "filter": {} + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-clientErrors.json b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 0000000000..d16e0c9c8d --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,351 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/client-bulkWrite-serverErrors.json b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 0000000000..a1f7c8152a --- /dev/null +++ b/test/retryable_writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,882 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteMany.json b/test/retryable_writes/unified/deleteMany.json new file mode 100644 index 0000000000..381f377954 --- /dev/null +++ b/test/retryable_writes/unified/deleteMany.json @@ -0,0 +1,96 @@ +{ + "description": "deleteMany", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteMany ignores retryWrites", + "operations": [ + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "filter": {} + }, + "expectResult": { + "deletedCount": 2 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne-errorLabels.json b/test/retryable_writes/unified/deleteOne-errorLabels.json new file mode 100644 index 0000000000..88920862ec --- /dev/null +++ b/test/retryable_writes/unified/deleteOne-errorLabels.json @@ -0,0 +1,266 @@ +{ + "description": "deleteOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne-serverErrors.json b/test/retryable_writes/unified/deleteOne-serverErrors.json new file mode 100644 index 0000000000..0808b7921d --- /dev/null +++ b/test/retryable_writes/unified/deleteOne-serverErrors.json @@ -0,0 +1,114 @@ +{ + "description": "deleteOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne fails with RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/deleteOne.json b/test/retryable_writes/unified/deleteOne.json new file mode 100644 index 0000000000..9e37ff8bcf --- /dev/null +++ b/test/retryable_writes/unified/deleteOne.json @@ -0,0 +1,218 @@ +{ + "description": "deleteOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "DeleteOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "DeleteOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "DeleteOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete-errorLabels.json b/test/retryable_writes/unified/findOneAndDelete-errorLabels.json new file mode 100644 index 0000000000..8639873fca --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete-errorLabels.json @@ -0,0 +1,289 @@ +{ + "description": "findOneAndDelete-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete-serverErrors.json b/test/retryable_writes/unified/findOneAndDelete-serverErrors.json new file mode 100644 index 0000000000..f6d8e9d69c --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "findOneAndDelete-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndDelete.json b/test/retryable_writes/unified/findOneAndDelete.json new file mode 100644 index 0000000000..ebfb8ce665 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndDelete.json @@ -0,0 +1,235 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndDelete is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndDelete is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndDelete is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "filter": { + "x": { + "$gte": 11 + } + }, + "sort": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace-errorLabels.json b/test/retryable_writes/unified/findOneAndReplace-errorLabels.json new file mode 100644 index 0000000000..78db52e75d --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace-errorLabels.json @@ -0,0 +1,301 @@ +{ + "description": "findOneAndReplace-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace-serverErrors.json b/test/retryable_writes/unified/findOneAndReplace-serverErrors.json new file mode 100644 index 0000000000..1c355c3ebf --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "findOneAndReplace-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndReplace.json b/test/retryable_writes/unified/findOneAndReplace.json new file mode 100644 index 0000000000..638d15a41d --- /dev/null +++ b/test/retryable_writes/unified/findOneAndReplace.json @@ -0,0 +1,243 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndReplace is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndReplace is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json b/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json new file mode 100644 index 0000000000..38b3f7ba44 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate-errorLabels.json @@ -0,0 +1,305 @@ +{ + "description": "findOneAndUpdate-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json b/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json new file mode 100644 index 0000000000..150012ac72 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate-serverErrors.json @@ -0,0 +1,120 @@ +{ + "description": "findOneAndUpdate-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/findOneAndUpdate.json b/test/retryable_writes/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..eefe98ae11 --- /dev/null +++ b/test/retryable_writes/unified/findOneAndUpdate.json @@ -0,0 +1,245 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "FindOneAndUpdate is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 1, + "x": 11 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/handshakeError.json b/test/retryable_writes/unified/handshakeError.json index 6d6b4ac491..93cb2e849e 100644 --- a/test/retryable_writes/unified/handshakeError.json +++ b/test/retryable_writes/unified/handshakeError.json @@ -1,6 +1,6 @@ { "description": "retryable writes handshake failures", - "schemaVersion": "1.3", + "schemaVersion": "1.4", "runOnRequirements": [ { "minServerVersion": "4.2", @@ -15,25 +15,27 @@ "createEntities": [ { "client": { - "id": "client0", + "id": "client", "useMultipleMongoses": false, "observeEvents": [ + "connectionCheckOutStartedEvent", "commandStartedEvent", - "connectionCheckOutStartedEvent" + "commandSucceededEvent", + "commandFailedEvent" ] } }, { "database": { - "id": "database0", - "client": "client0", - "databaseName": "retryable-handshake-tests" + "id": "database", + "client": "client", + "databaseName": "retryable-writes-handshake-tests" } }, { "collection": { - "id": "collection0", - "database": "database0", + "id": "collection", + "database": "database", "collectionName": "coll" } } @@ -41,7 +43,7 @@ "initialData": [ { "collectionName": "coll", - "databaseName": "retryable-handshake-tests", + "databaseName": "retryable-writes-handshake-tests", "documents": [ { "_id": 1, @@ -52,13 +54,19 @@ ], "tests": [ { - "description": "InsertOne succeeds after retryable handshake error", + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -66,8 +74,8 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], "closeConnection": true } @@ -76,7 +84,7 @@ }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -88,19 +96,26 @@ } }, { - "name": "insertOne", - "object": "collection0", + "name": "clientBulkWrite", + "object": "client", "arguments": { - "document": { - "_id": 2, - "x": 22 - } + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] } } ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -118,59 +133,152 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" } }, { "commandStartedEvent": { - "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] - }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" } } ] } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } ], - "outcome": [ + "expectEvents": [ { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "eventType": "cmap", + "events": [ { - "_id": 1, - "x": 11 + "connectionCheckOutStartedEvent": {} }, { - "_id": 2, - "x": 22 + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } } ] } ] }, { - "description": "InsertOne succeeds after retryable handshake error ShutdownInProgress", + "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ { "name": "failPoint", "object": "testRunner", "arguments": { - "client": "client0", + "client": "client", "failPoint": { "configureFailPoint": "failCommand", "mode": { @@ -178,17 +286,17 @@ }, "data": { "failCommands": [ - "saslContinue", - "ping" + "ping", + "saslContinue" ], - "errorCode": 91 + "closeConnection": true } } } }, { "name": "runCommand", - "object": "database0", + "object": "database", "arguments": { "commandName": "ping", "command": { @@ -201,7 +309,7 @@ }, { "name": "insertOne", - "object": "collection0", + "object": "collection", "arguments": { "document": { "_id": 2, @@ -212,7 +320,7 @@ ], "expectEvents": [ { - "client": "client0", + "client": "client", "eventType": "cmap", "events": [ { @@ -230,46 +338,1674 @@ ] }, { - "client": "client0", + "client": "client", "events": [ { "commandStartedEvent": { "command": { "ping": 1 }, - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" } }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ { "commandStartedEvent": { "command": { - "insert": "coll", - "documents": [ - { - "_id": 2, - "x": 22 - } - ] + "ping": 1 }, - "commandName": "insert", - "databaseName": "retryable-handshake-tests" + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" } } ] } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } ], - "outcome": [ + "expectEvents": [ { - "collectionName": "coll", - "databaseName": "retryable-handshake-tests", - "documents": [ + "client": "client", + "eventType": "cmap", + "events": [ { - "_id": 1, - "x": 11 + "connectionCheckOutStartedEvent": {} }, { - "_id": 2, - "x": 22 + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.insertMany succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.deleteOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "delete" + } + }, + { + "commandSucceededEvent": { + "commandName": "delete" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.replaceOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.updateOne succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "update" + } + }, + { + "commandSucceededEvent": { + "commandName": "update" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndDelete succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndReplace succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 22 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.findOneAndUpdate succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 22 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify" + } + }, + { + "commandSucceededEvent": { + "commandName": "findAndModify" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "collection.bulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 2, + "x": 22 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "insert" + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } } ] } diff --git a/test/retryable_writes/unified/insertMany-errorLabels.json b/test/retryable_writes/unified/insertMany-errorLabels.json new file mode 100644 index 0000000000..5254ba7cb2 --- /dev/null +++ b/test/retryable_writes/unified/insertMany-errorLabels.json @@ -0,0 +1,335 @@ +{ + "description": "insertMany-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertMany succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertMany-serverErrors.json b/test/retryable_writes/unified/insertMany-serverErrors.json new file mode 100644 index 0000000000..f5f513603c --- /dev/null +++ b/test/retryable_writes/unified/insertMany-serverErrors.json @@ -0,0 +1,114 @@ +{ + "description": "insertMany-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertMany.json b/test/retryable_writes/unified/insertMany.json new file mode 100644 index 0000000000..35a18c46c6 --- /dev/null +++ b/test/retryable_writes/unified/insertMany.json @@ -0,0 +1,290 @@ +{ + "description": "insertMany", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "tests": [ + { + "description": "InsertMany succeeds after one network error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": true + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertMany with unordered execution", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ], + "ordered": false + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertMany fails after multiple network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": "alwaysOn", + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ], + "ordered": true + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-errorLabels.json b/test/retryable_writes/unified/insertOne-errorLabels.json new file mode 100644 index 0000000000..39f31a8aa6 --- /dev/null +++ b/test/retryable_writes/unified/insertOne-errorLabels.json @@ -0,0 +1,1127 @@ +{ + "description": "insertOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "InsertOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + }, + { + "description": "InsertOne succeeds after NotWritablePrimary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after HostNotFound", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after HostUnreachable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after SocketException", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after NetworkTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after ExceededTimeLimit", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 262, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "InsertOne fails after multiple retryable writeConcernErrors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-noWritesPerformedError.json b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json new file mode 100644 index 0000000000..3194e91c5c --- /dev/null +++ b/test/retryable_writes/unified/insertOne-noWritesPerformedError.json @@ -0,0 +1,90 @@ +{ + "description": "retryable-writes insertOne noWritesPerformedErrors", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "6.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "no-writes-performed-collection" + } + } + ], + "tests": [ + { + "description": "InsertOne fails after NoWritesPerformed error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 64, + "errorLabels": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "errorCode": 64, + "errorLabelsContain": [ + "NoWritesPerformed", + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "no-writes-performed-collection", + "databaseName": "retryable-writes-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/insertOne-serverErrors.json b/test/retryable_writes/unified/insertOne-serverErrors.json index 77245a8197..8edafb7029 100644 --- a/test/retryable_writes/unified/insertOne-serverErrors.json +++ b/test/retryable_writes/unified/insertOne-serverErrors.json @@ -1,12 +1,19 @@ { "description": "retryable-writes insertOne serverErrors", - "schemaVersion": "1.0", + "schemaVersion": "1.9", "runOnRequirements": [ { - "minServerVersion": "3.6", + "minServerVersion": "4.0", "topologies": [ "replicaset" ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] } ], "createEntities": [ @@ -55,16 +62,7 @@ "description": "InsertOne succeeds after retryable writeConcernError", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", - "topologies": [ - "sharded-replicaset" - ] + "minServerVersion": "4.3.1" } ], "operations": [ @@ -168,6 +166,699 @@ ] } ] + }, + { + "description": "RetryableWriteError label is added based on top-level code in pre-4.4 server response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 189 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "replicaset" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "RetryableWriteError label is not added based on writeConcernError in pre-4.4 mongos response", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "maxServerVersion": "4.2.99", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 3, + "x": 33 + } + ] + }, + "commandName": "insert", + "databaseName": "retryable-writes-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne succeeds after connection failure", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after connection failure when retryWrites option is false", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + } + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError Interrupted", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails after WriteConcernError WriteConcernTimeout", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] } ] } diff --git a/test/retryable_writes/unified/insertOne.json b/test/retryable_writes/unified/insertOne.json new file mode 100644 index 0000000000..a6afdbf224 --- /dev/null +++ b/test/retryable_writes/unified/insertOne.json @@ -0,0 +1,245 @@ +{ + "description": "insertOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "InsertOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "InsertOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "InsertOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3, + "x": 33 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne-errorLabels.json b/test/retryable_writes/unified/replaceOne-errorLabels.json new file mode 100644 index 0000000000..22c4561ae7 --- /dev/null +++ b/test/retryable_writes/unified/replaceOne-errorLabels.json @@ -0,0 +1,300 @@ +{ + "description": "replaceOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne-serverErrors.json b/test/retryable_writes/unified/replaceOne-serverErrors.json new file mode 100644 index 0000000000..c957db7244 --- /dev/null +++ b/test/retryable_writes/unified/replaceOne-serverErrors.json @@ -0,0 +1,118 @@ +{ + "description": "replaceOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/replaceOne.json b/test/retryable_writes/unified/replaceOne.json new file mode 100644 index 0000000000..ee6e37d3bb --- /dev/null +++ b/test/retryable_writes/unified/replaceOne.json @@ -0,0 +1,242 @@ +{ + "description": "replaceOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "ReplaceOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 111 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "ReplaceOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": 1 + }, + "replacement": { + "_id": 1, + "x": 111 + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/unacknowledged-write-concern.json b/test/retryable_writes/unified/unacknowledged-write-concern.json new file mode 100644 index 0000000000..eaa114acfd --- /dev/null +++ b/test/retryable_writes/unified/unacknowledged-write-concern.json @@ -0,0 +1,77 @@ +{ + "description": "unacknowledged write does not set txnNumber", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + } + ], + "tests": [ + { + "description": "unacknowledged write does not set txnNumber", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateMany.json b/test/retryable_writes/unified/updateMany.json new file mode 100644 index 0000000000..12c5204ee9 --- /dev/null +++ b/test/retryable_writes/unified/updateMany.json @@ -0,0 +1,112 @@ +{ + "description": "updateMany", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateMany ignores retryWrites", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": {}, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 23 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne-errorLabels.json b/test/retryable_writes/unified/updateOne-errorLabels.json new file mode 100644 index 0000000000..e44cef45f6 --- /dev/null +++ b/test/retryable_writes/unified/updateOne-errorLabels.json @@ -0,0 +1,304 @@ +{ + "description": "updateOne-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne succeeds with RetryableWriteError from server", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne fails if server does not return RetryableWriteError", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "errorLabelsOmit": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne succeeds after PrimarySteppedDown", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne-serverErrors.json b/test/retryable_writes/unified/updateOne-serverErrors.json new file mode 100644 index 0000000000..648834ada4 --- /dev/null +++ b/test/retryable_writes/unified/updateOne-serverErrors.json @@ -0,0 +1,119 @@ +{ + "description": "updateOne-serverErrors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne fails with a RetryableWriteError label after two connection failures", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/retryable_writes/unified/updateOne.json b/test/retryable_writes/unified/updateOne.json new file mode 100644 index 0000000000..99ffba8e21 --- /dev/null +++ b/test/retryable_writes/unified/updateOne.json @@ -0,0 +1,424 @@ +{ + "description": "updateOne", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "tests": [ + { + "description": "UpdateOne is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "command": { + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 3 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is not committed on first attempt", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 1 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 3 + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 34 + } + ] + } + ] + }, + { + "description": "UpdateOne with upsert is never committed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "onPrimaryTransactionalWrite", + "mode": { + "times": 2 + }, + "data": { + "failBeforeCommitExceptionCode": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": 3, + "x": 33 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectError": { + "isError": true + } + } + ], + "outcome": [ + { + "collectionName": "coll", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCommand.json b/test/run_command/unified/runCommand.json new file mode 100644 index 0000000000..fde9de92e6 --- /dev/null +++ b/test/run_command/unified/runCommand.json @@ -0,0 +1,634 @@ +{ + "description": "runCommand", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + }, + { + "database": { + "id": "dbWithRC", + "client": "client", + "databaseName": "dbWithRC", + "databaseOptions": { + "readConcern": { + "level": "local" + } + } + } + }, + { + "database": { + "id": "dbWithWC", + "client": "client", + "databaseName": "dbWithWC", + "databaseOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "client": { + "id": "clientWithStableApi", + "observeEvents": [ + "commandStartedEvent" + ], + "serverApi": { + "version": "1", + "strict": true + } + } + }, + { + "database": { + "id": "dbWithStableApi", + "client": "clientWithStableApi", + "databaseName": "dbWithStableApi" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [] + } + ], + "tests": [ + { + "description": "always attaches $db and implicit lsid to given command and omits default readPreference", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "db", + "lsid": { + "$$exists": true + }, + "$readPreference": { + "$$exists": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "always gossips the $clusterTime on the sent command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$clusterTime": { + "$$exists": true + } + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided session lsid to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "session": "session" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "attaches the provided $readPreference to given command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "mode": "nearest" + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach $readPreference to given command on standalone", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "nearest" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not attach primary $readPreference to given command", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + }, + "readPreference": { + "mode": "primary" + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$readPreference": { + "$$exists": false + }, + "$db": "db" + }, + "commandName": "ping" + } + } + ] + } + ] + }, + { + "description": "does not inherit readConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithRC", + "arguments": { + "commandName": "aggregate", + "command": { + "aggregate": "collection", + "pipeline": [], + "cursor": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "collection", + "readConcern": { + "$$exists": false + }, + "$db": "dbWithRC" + }, + "commandName": "aggregate" + } + } + ] + } + ] + }, + { + "description": "does not inherit writeConcern specified at the db level", + "operations": [ + { + "name": "runCommand", + "object": "dbWithWC", + "arguments": { + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "bar" + } + ], + "ordered": true + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "writeConcern": { + "$$exists": false + }, + "$db": "dbWithWC" + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "does not retry retryable errors on given command", + "runOnRequirements": [ + { + "minServerVersion": "4.2" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "attaches transaction fields to given command", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "session": "session", + "commandName": "insert", + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "collection", + "documents": [ + { + "foo": "transaction" + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "db" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session" + }, + "txnNumber": 1, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "attaches apiVersion fields to given command when stableApi is configured on the client", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "runCommand", + "object": "dbWithStableApi", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "clientWithStableApi", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1, + "$db": "dbWithStableApi", + "apiVersion": "1", + "apiStrict": true, + "apiDeprecationErrors": { + "$$unsetOrMatches": false + } + }, + "commandName": "ping" + } + } + ] + } + ] + } + ] +} diff --git a/test/run_command/unified/runCursorCommand.json b/test/run_command/unified/runCursorCommand.json new file mode 100644 index 0000000000..4f1ec8a01a --- /dev/null +++ b/test/run_command/unified/runCursorCommand.json @@ -0,0 +1,877 @@ +{ + "description": "runCursorCommand", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "connectionReadyEvent", + "connectionCheckedOutEvent", + "connectionCheckedInEvent" + ] + } + }, + { + "session": { + "id": "session", + "client": "client" + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "successfully executes checkMetadataConsistency cursor creating command", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "checkMetadataConsistency", + "command": { + "checkMetadataConsistency": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "checkMetadataConsistency": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "checkMetadataConsistency" + } + } + ] + } + ] + }, + { + "description": "errors if the command response is not a cursor", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "creates an implicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "accepts an explicit session that is reused across getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + }, + { + "name": "assertSameLsidOnLastTwoCommands", + "object": "testRunner", + "arguments": { + "client": "client" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is exhausted", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "session": "session", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "cursor", + "expectResult": { + "_id": 5, + "x": 55 + } + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 2, + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$sessionLsid": "session" + } + }, + "commandName": "getMore" + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionReadyEvent": {} + }, + { + "connectionCheckedOutEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + }, + { + "description": "returns pinned connections to the pool when the cursor is closed", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "command": { + "find": "collection", + "batchSize": 2 + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 1 + } + }, + { + "name": "close", + "object": "cursor" + }, + { + "name": "assertNumberConnectionsCheckedOut", + "object": "testRunner", + "arguments": { + "client": "client", + "connections": 0 + } + } + ] + }, + { + "description": "supports configuring getMore batchSize", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 5, + "command": { + "find": "collection", + "batchSize": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "batchSize": 5, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore maxTimeMS", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "maxTimeMS": 300, + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1 + } + }, + "ignoreResultAndError": true + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "maxTimeMS": 200, + "batchSize": 1, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "maxTimeMS": 300, + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "supports configuring getMore comment", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "comment": { + "hello": "getMore" + }, + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + } + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "batchSize": 1, + "comment": { + "hello": "find" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "comment": { + "hello": "getMore" + }, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "does not close the cursor when receiving an empty batch", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "dropCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection" + } + }, + { + "name": "createCollection", + "object": "db", + "arguments": { + "collection": "cappedCollection", + "capped": true, + "size": 4096, + "max": 3 + }, + "saveResultAsEntity": "cappedCollection" + }, + { + "name": "insertMany", + "object": "cappedCollection", + "arguments": { + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + }, + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "cursorType": "tailable", + "commandName": "find", + "batchSize": 2, + "command": { + "find": "cappedCollection", + "tailable": true + } + }, + "saveResultAsEntity": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "iterateOnce", + "object": "cursor" + }, + { + "name": "close", + "object": "cursor" + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "cappedCollection" + }, + "commandName": "drop" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "cappedCollection" + }, + "commandName": "create" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "cappedCollection" + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "cappedCollection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "cappedCollection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/sdam_monitoring/discovered_standalone.json b/test/sdam_monitoring/discovered_standalone.json index dd8f7fc51e..097203694e 100644 --- a/test/sdam_monitoring/discovered_standalone.json +++ b/test/sdam_monitoring/discovered_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_no_primary.json b/test/sdam_monitoring/replica_set_with_no_primary.json index 950e32efe1..41d048729d 100644 --- a/test/sdam_monitoring/replica_set_with_no_primary.json +++ b/test/sdam_monitoring/replica_set_with_no_primary.json @@ -19,7 +19,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_primary.json b/test/sdam_monitoring/replica_set_with_primary.json index 2ad94d6e6a..3ccc127d1d 100644 --- a/test/sdam_monitoring/replica_set_with_primary.json +++ b/test/sdam_monitoring/replica_set_with_primary.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/replica_set_with_removal.json b/test/sdam_monitoring/replica_set_with_removal.json index ae28faa30c..dc6fbe7e7d 100644 --- a/test/sdam_monitoring/replica_set_with_removal.json +++ b/test/sdam_monitoring/replica_set_with_removal.json @@ -69,7 +69,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/test/sdam_monitoring/required_replica_set.json b/test/sdam_monitoring/required_replica_set.json index 401c5d99c5..1f4e5c1d71 100644 --- a/test/sdam_monitoring/required_replica_set.json +++ b/test/sdam_monitoring/required_replica_set.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/standalone.json b/test/sdam_monitoring/standalone.json index 821a1525d4..f375a383ca 100644 --- a/test/sdam_monitoring/standalone.json +++ b/test/sdam_monitoring/standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json index 5958e2d26c..4d046ff8ed 100644 --- a/test/sdam_monitoring/standalone_suppress_equal_description_changes.json +++ b/test/sdam_monitoring/standalone_suppress_equal_description_changes.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/test/server_selection_logging/load-balanced.json b/test/server_selection_logging/load-balanced.json new file mode 100644 index 0000000000..5855c4e991 --- /dev/null +++ b/test/server_selection_logging/load-balanced.json @@ -0,0 +1,107 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.13", + "runOnRequirements": [ + { + "topologies": [ + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "heartbeatFrequencyMS": 500 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + } + ], + "tests": [ + { + "description": "A successful operation - load balanced cluster", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "LoadBalancer" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/operation-id.json b/test/server_selection_logging/operation-id.json new file mode 100644 index 0000000000..ccc2623166 --- /dev/null +++ b/test/server_selection_logging/operation-id.json @@ -0,0 +1,418 @@ +{ + "description": "operation-id", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, + "tests": [ + { + "description": "Successful bulkWrite operation: log messages have operationIds", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + }, + { + "description": "Failed bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "insert" + } + } + ] + } + ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/replica-set.json b/test/server_selection_logging/replica-set.json new file mode 100644 index 0000000000..830b1ea51a --- /dev/null +++ b/test/server_selection_logging/replica-set.json @@ -0,0 +1,228 @@ +{ + "description": "replica-set-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + }, + { + "collection": { + "id": "unsatisfiableRPColl", + "database": "database", + "collectionName": "unsatisfiableRPColl", + "collectionOptions": { + "readPreference": { + "mode": "Secondary", + "tagSets": [ + { + "nonexistenttag": "a" + } + ] + } + } + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Server selection fails due to unsatisfiable read preference", + "runOnRequirements": [ + { + "minServerVersion": "4.0" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 4 + } + }, + { + "name": "find", + "object": "unsatisfiableRPColl", + "arguments": { + "filter": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "find", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/sharded.json b/test/server_selection_logging/sharded.json new file mode 100644 index 0000000000..346c050f9e --- /dev/null +++ b/test/server_selection_logging/sharded.json @@ -0,0 +1,237 @@ +{ + "description": "server-selection-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/server_selection_logging/standalone.json b/test/server_selection_logging/standalone.json new file mode 100644 index 0000000000..fa01ad9911 --- /dev/null +++ b/test/server_selection_logging/standalone.json @@ -0,0 +1,235 @@ +{ + "description": "standalone-logging", + "schemaVersion": "1.14", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "uriOptions": { + "retryWrites": false, + "heartbeatFrequencyMS": 500, + "appName": "loggingClient", + "serverSelectionTimeoutMS": 2000 + }, + "observeLogMessages": { + "serverSelection": "debug" + }, + "observeEvents": [ + "serverDescriptionChangedEvent", + "topologyDescriptionChangedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "logging-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "server-selection" + } + }, + { + "client": { + "id": "failPointClient" + } + } + ], + "tests": [ + { + "description": "A successful operation", + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "serverHost": { + "$$type": "string" + }, + "serverPort": { + "$$type": [ + "int", + "long" + ] + } + } + } + ] + } + ] + }, + { + "description": "Failure due to unreachable server", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "remainingTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "selector": { + "$$exists": true + }, + "operation": "insert", + "topologyDescription": { + "$$exists": true + }, + "failure": { + "$$exists": true + } + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/driver-sessions-dirty-session-errors.json b/test/sessions/driver-sessions-dirty-session-errors.json index 361ea83d7b..d7a1c6aba7 100644 --- a/test/sessions/driver-sessions-dirty-session-errors.json +++ b/test/sessions/driver-sessions-dirty-session-errors.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -347,7 +347,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -375,7 +377,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$sessionLsid": "session0" }, @@ -627,7 +631,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$type": "object" }, @@ -655,7 +661,9 @@ "x": 1 } }, - "new": false, + "new": { + "$$unsetOrMatches": false + }, "lsid": { "$$type": "object" }, diff --git a/test/sessions/implicit-sessions-default-causal-consistency.json b/test/sessions/implicit-sessions-default-causal-consistency.json new file mode 100644 index 0000000000..517c8ebc63 --- /dev/null +++ b/test/sessions/implicit-sessions-default-causal-consistency.json @@ -0,0 +1,318 @@ +{ + "description": "implicit sessions default causal consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "implicit-cc-tests" + } + }, + { + "collection": { + "id": "collectionDefault", + "database": "database0", + "collectionName": "coll-default" + } + }, + { + "collection": { + "id": "collectionSnapshot", + "database": "database0", + "collectionName": "coll-snapshot", + "collectionOptions": { + "readConcern": { + "level": "snapshot" + } + } + } + }, + { + "collection": { + "id": "collectionlinearizable", + "database": "database0", + "collectionName": "coll-linearizable", + "collectionOptions": { + "readConcern": { + "level": "linearizable" + } + } + } + } + ], + "initialData": [ + { + "collectionName": "coll-default", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "default" + } + ] + }, + { + "collectionName": "coll-snapshot", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "snapshot" + } + ] + }, + { + "collectionName": "coll-linearizable", + "databaseName": "implicit-cc-tests", + "documents": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "tests": [ + { + "description": "readConcern is not sent on retried read in implicit session when readConcern level is not specified", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionDefault", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "default" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-default", + "filter": {}, + "readConcern": { + "$$exists": false + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is snapshot", + "runOnRequirements": [ + { + "minServerVersion": "5.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionSnapshot", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "snapshot" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-snapshot", + "filter": {}, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + }, + { + "description": "afterClusterTime is not sent on retried read in implicit session when readConcern level is linearizable", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11600 + } + } + } + }, + { + "name": "find", + "object": "collectionlinearizable", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": "linearizable" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "coll-linearizable", + "filter": {}, + "readConcern": { + "level": "linearizable", + "afterClusterTime": { + "$$exists": false + } + } + }, + "databaseName": "implicit-cc-tests" + } + } + ] + } + ] + } + ] +} diff --git a/test/sessions/snapshot-sessions-unsupported-ops.json b/test/sessions/snapshot-sessions-unsupported-ops.json index 1021b7f264..c41f74d337 100644 --- a/test/sessions/snapshot-sessions-unsupported-ops.json +++ b/test/sessions/snapshot-sessions-unsupported-ops.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/sessions/snapshot-sessions.json b/test/sessions/snapshot-sessions.json index 75b577b039..260f8b6f48 100644 --- a/test/sessions/snapshot-sessions.json +++ b/test/sessions/snapshot-sessions.json @@ -6,7 +6,7 @@ "minServerVersion": "5.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/sigstop_sigcont.py b/test/sigstop_sigcont.py index 87b4f62038..bc1bacce33 100644 --- a/test/sigstop_sigcont.py +++ b/test/sigstop_sigcont.py @@ -13,6 +13,7 @@ # limitations under the License. """Used by test_client.TestClient.test_sigstop_sigcont.""" +from __future__ import annotations import logging import os @@ -21,8 +22,8 @@ sys.path[0:0] = [""] from pymongo import monitoring -from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi +from pymongo.synchronous.mongo_client import MongoClient SERVER_API = None MONGODB_API_VERSION = os.environ.get("MONGODB_API_VERSION") @@ -84,7 +85,7 @@ def main(uri: str) -> None: if len(sys.argv) != 2: print("unknown or missing options") print(f"usage: python3 {sys.argv[0]} 'mongodb://localhost'") - exit(1) + sys.exit(1) # Enable logs in this format: # 2022-03-30 12:40:55,582 INFO diff --git a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json index 3f500acdc6..8e459115c1 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-directConnection.json @@ -10,5 +10,6 @@ "loadBalanced": true, "ssl": true, "directConnection": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json index f9719e760d..39bff5a23b 100644 --- a/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json +++ b/test/srv_seedlist/load-balanced/loadBalanced-true-txt.json @@ -9,5 +9,6 @@ "options": { "loadBalanced": true, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json index a18360ea64..474a314fd7 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero-txt.json @@ -10,5 +10,6 @@ "loadBalanced": true, "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json index bd85418117..dfc90dc96d 100644 --- a/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json +++ b/test/srv_seedlist/load-balanced/srvMaxHosts-zero.json @@ -10,5 +10,6 @@ "loadBalanced": true, "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json new file mode 100644 index 0000000000..b5fcfd2c07 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas-escaped.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some%2Cdb?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/dbname-with-commas.json b/test/srv_seedlist/replica-set/dbname-with-commas.json new file mode 100644 index 0000000000..c1e85f4b99 --- /dev/null +++ b/test/srv_seedlist/replica-set/dbname-with-commas.json @@ -0,0 +1,19 @@ +{ + "uri": "mongodb+srv://test1.test.build.10gen.cc/some,db?replicaSet=repl0", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, + "parsed_options": { + "defaultDatabase": "some,db" + } +} diff --git a/test/srv_seedlist/replica-set/direct-connection-false.json b/test/srv_seedlist/replica-set/direct-connection-false.json index 1d57bdcb3c..3f14ff94e7 100644 --- a/test/srv_seedlist/replica-set/direct-connection-false.json +++ b/test/srv_seedlist/replica-set/direct-connection-false.json @@ -11,5 +11,6 @@ "options": { "ssl": true, "directConnection": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json index 70c6c23a39..4493628be9 100644 --- a/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json +++ b/test/srv_seedlist/replica-set/encoded-userinfo-and-db.json @@ -17,5 +17,6 @@ "password": "$4to@L8=MC", "db": "mydb?" }, + "ping": false, "comment": "Encoded user, pass, and DB parse correctly" } diff --git a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json index fd2e565c7b..682d32a742 100644 --- a/test/srv_seedlist/replica-set/loadBalanced-false-txt.json +++ b/test/srv_seedlist/replica-set/loadBalanced-false-txt.json @@ -11,5 +11,6 @@ "options": { "loadBalanced": false, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/longer-parent-in-return.json b/test/srv_seedlist/replica-set/longer-parent-in-return.json index 9a8267eaeb..ebe3fe1e77 100644 --- a/test/srv_seedlist/replica-set/longer-parent-in-return.json +++ b/test/srv_seedlist/replica-set/longer-parent-in-return.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "ssl": true }, + "ping": true, "comment": "Is correct, as returned host name shared the URI root \"test.build.10gen.cc\"." } diff --git a/test/srv_seedlist/replica-set/one-result-default-port.json b/test/srv_seedlist/replica-set/one-result-default-port.json index cebb3b1ec3..9f7733de80 100644 --- a/test/srv_seedlist/replica-set/one-result-default-port.json +++ b/test/srv_seedlist/replica-set/one-result-default-port.json @@ -11,5 +11,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json index 622668c351..1d740b1b59 100644 --- a/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json +++ b/test/srv_seedlist/replica-set/one-txt-record-multiple-strings.json @@ -11,5 +11,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/one-txt-record.json b/test/srv_seedlist/replica-set/one-txt-record.json index 2385021ad4..ecdb0a7e2a 100644 --- a/test/srv_seedlist/replica-set/one-txt-record.json +++ b/test/srv_seedlist/replica-set/one-txt-record.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "thisDB", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srv-service-name.json b/test/srv_seedlist/replica-set/srv-service-name.json index ec36cdbb00..e320c2ca3e 100644 --- a/test/srv_seedlist/replica-set/srv-service-name.json +++ b/test/srv_seedlist/replica-set/srv-service-name.json @@ -12,5 +12,6 @@ "options": { "ssl": true, "srvServiceName": "customname" - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json index d9765ac663..70edacfd06 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-equal_to_srv_records.json @@ -13,5 +13,6 @@ "options": { "srvMaxHosts": 2, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json index 494bb87687..72540ed408 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-greater_than_srv_records.json @@ -12,5 +12,6 @@ "options": { "srvMaxHosts": 3, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json index 66a5e90dad..a9d6dd6fd9 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-less_than_srv_records.json @@ -9,5 +9,6 @@ "options": { "srvMaxHosts": 1, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json index 241a901c64..e232edb9eb 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero-txt.json @@ -13,5 +13,6 @@ "replicaSet": "repl0", "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json index c68610a201..3421a35a3d 100644 --- a/test/srv_seedlist/replica-set/srvMaxHosts-zero.json +++ b/test/srv_seedlist/replica-set/srvMaxHosts-zero.json @@ -13,5 +13,6 @@ "replicaSet": "repl0", "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/two-results-default-port.json b/test/srv_seedlist/replica-set/two-results-default-port.json index 66028310a6..43efcc6310 100644 --- a/test/srv_seedlist/replica-set/two-results-default-port.json +++ b/test/srv_seedlist/replica-set/two-results-default-port.json @@ -12,5 +12,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/two-results-nonstandard-port.json b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json index 4900f7cff1..f6e8e415a7 100644 --- a/test/srv_seedlist/replica-set/two-results-nonstandard-port.json +++ b/test/srv_seedlist/replica-set/two-results-nonstandard-port.json @@ -12,5 +12,6 @@ "options": { "replicaSet": "repl0", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json index 0ebc737bd5..3d84cfe446 100644 --- a/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-ssl-option.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "thisDB", "ssl": false - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json index 2626ba6083..1a5a240680 100644 --- a/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json +++ b/test/srv_seedlist/replica-set/txt-record-with-overridden-uri-option.json @@ -12,5 +12,6 @@ "replicaSet": "repl0", "authSource": "otherDB", "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/uri-with-admin-database.json b/test/srv_seedlist/replica-set/uri-with-admin-database.json index 32710d75f7..c5513a0dad 100644 --- a/test/srv_seedlist/replica-set/uri-with-admin-database.json +++ b/test/srv_seedlist/replica-set/uri-with-admin-database.json @@ -15,5 +15,6 @@ }, "parsed_options": { "auth_database": "adminDB" - } + }, + "ping": true } diff --git a/test/srv_seedlist/replica-set/uri-with-auth.json b/test/srv_seedlist/replica-set/uri-with-auth.json index cc7257d85b..872f997cc7 100644 --- a/test/srv_seedlist/replica-set/uri-with-auth.json +++ b/test/srv_seedlist/replica-set/uri-with-auth.json @@ -9,9 +9,14 @@ "localhost:27018", "localhost:27019" ], + "options": { + "replicaSet": "repl0", + "ssl": true + }, "parsed_options": { "user": "auser", "password": "apass" }, + "ping": false, "comment": "Should preserve auth credentials" } diff --git a/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json new file mode 100644 index 0000000000..40579aa44c --- /dev/null +++ b/test/srv_seedlist/replica-set/uri-with-uppercase-hostname.json @@ -0,0 +1,16 @@ +{ + "uri": "mongodb+srv://TEST1.TEST.BUILD.10GEN.CC", + "seeds": [ + "localhost.test.build.10gen.cc:27017", + "localhost.test.build.10gen.cc:27018" + ], + "hosts": [ + "localhost:27017", + "localhost:27018", + "localhost:27019" + ], + "options": { + "ssl": true + }, + "ping": true +} diff --git a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json index 46390726f0..7d2f9a6bf8 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-equal_to_srv_records.json @@ -12,5 +12,6 @@ "options": { "srvMaxHosts": 2, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json index e02d72bf28..452c7b54db 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-greater_than_srv_records.json @@ -11,5 +11,6 @@ "options": { "srvMaxHosts": 3, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json index fdcc1692c0..cd3bf65117 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-less_than_srv_records.json @@ -5,5 +5,6 @@ "options": { "srvMaxHosts": 1, "ssl": true - } + }, + "ping": true } diff --git a/test/srv_seedlist/sharded/srvMaxHosts-zero.json b/test/srv_seedlist/sharded/srvMaxHosts-zero.json index 10ab9e656d..f289628c9c 100644 --- a/test/srv_seedlist/sharded/srvMaxHosts-zero.json +++ b/test/srv_seedlist/sharded/srvMaxHosts-zero.json @@ -11,5 +11,6 @@ "options": { "srvMaxHosts": 0, "ssl": true - } + }, + "ping": true } diff --git a/test/test_auth.py b/test/test_auth.py index 69ed27bda0..27f6743fae 100644 --- a/test/test_auth.py +++ b/test/test_auth.py @@ -13,7 +13,9 @@ # limitations under the License. """Authentication Tests.""" +from __future__ import annotations +import asyncio import os import sys import threading @@ -21,23 +23,28 @@ sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import ( - AllowListEventListener, - delay, - ignore_deprecations, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - single_client_noauth, +from test import ( + IntegrationTest, + PyMongoTestCase, + SkipTest, + client_context, + unittest, ) +from test.utils_shared import AllowListEventListener, delay, ignore_deprecations + +import pytest from pymongo import MongoClient, monitoring -from pymongo.auth import HAVE_KERBEROS, _build_credentials_tuple +from pymongo.auth_shared import _build_credentials_tuple from pymongo.errors import OperationFailure from pymongo.hello import HelloCompat from pymongo.read_preferences import ReadPreference from pymongo.saslprep import HAVE_STRINGPREP +from pymongo.synchronous.auth import HAVE_KERBEROS, _canonicalize_hostname + +_IS_SYNC = True + +pytestmark = pytest.mark.auth # YOU MUST RUN KINIT BEFORE RUNNING GSSAPI TESTS ON UNIX. GSSAPI_HOST = os.environ.get("GSSAPI_HOST") @@ -60,14 +67,14 @@ class AutoAuthenticateThread(threading.Thread): """Used in testing threaded authentication. This does collection.find_one() with a 1-second delay to ensure it must - check out and authenticate multiple sockets from the pool concurrently. + check out and authenticate multiple connections from the pool concurrently. :Parameters: `collection`: An auth-protected collection containing one document. """ def __init__(self, collection): - super(AutoAuthenticateThread, self).__init__() + super().__init__() self.collection = collection self.success = False @@ -76,7 +83,7 @@ def run(self): self.success = True -class TestGSSAPI(unittest.TestCase): +class TestGSSAPI(PyMongoTestCase): mech_properties: str service_realm_required: bool @@ -89,10 +96,11 @@ def setUpClass(cls): cls.service_realm_required = ( GSSAPI_SERVICE_REALM is not None and GSSAPI_SERVICE_REALM not in GSSAPI_PRINCIPAL ) - mech_properties = "SERVICE_NAME:%s" % (GSSAPI_SERVICE_NAME,) - mech_properties += ",CANONICALIZE_HOST_NAME:%s" % (GSSAPI_CANONICALIZE,) + mech_properties = dict( + SERVICE_NAME=GSSAPI_SERVICE_NAME, CANONICALIZE_HOST_NAME=GSSAPI_CANONICALIZE + ) if GSSAPI_SERVICE_REALM is not None: - mech_properties += ",SERVICE_REALM:%s" % (GSSAPI_SERVICE_REALM,) + mech_properties["SERVICE_REALM"] = GSSAPI_SERVICE_REALM cls.mech_properties = mech_properties def test_credentials_hashing(self): @@ -111,8 +119,8 @@ def test_credentials_hashing(self): "GSSAPI", None, "user", "pass", {"authmechanismproperties": {"SERVICE_NAME": "B"}}, None ) - self.assertEqual(1, len(set([creds1, creds2]))) - self.assertEqual(3, len(set([creds0, creds1, creds2, creds3]))) + self.assertEqual(1, len({creds1, creds2})) + self.assertEqual(3, len({creds0, creds1, creds2, creds3})) @ignore_deprecations def test_gssapi_simple(self): @@ -133,7 +141,7 @@ def test_gssapi_simple(self): if not self.service_realm_required: # Without authMechanismProperties. - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -144,11 +152,11 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, without authMechanismProperties. - client = MongoClient(uri) + client = self.simple_client(uri) client[GSSAPI_DB].collection.find_one() # Authenticate with authMechanismProperties. - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -160,15 +168,18 @@ def test_gssapi_simple(self): client[GSSAPI_DB].collection.find_one() # Log in using URI, with authMechanismProperties. - mech_uri = uri + "&authMechanismProperties=%s" % (self.mech_properties,) - client = MongoClient(mech_uri) + mech_properties_str = "" + for key, value in self.mech_properties.items(): + mech_properties_str += f"{key}:{value}," + mech_uri = uri + f"&authMechanismProperties={mech_properties_str[:-1]}" + client = self.simple_client(mech_uri) client[GSSAPI_DB].collection.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: if not self.service_realm_required: # Without authMechanismProperties - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -179,12 +190,12 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - uri = uri + "&replicaSet=%s" % (str(set_name),) - client = MongoClient(uri) + uri = uri + f"&replicaSet={set_name!s}" + client = self.simple_client(uri) client[GSSAPI_DB].list_collection_names() # With authMechanismProperties - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -196,13 +207,14 @@ def test_gssapi_simple(self): client[GSSAPI_DB].list_collection_names() - mech_uri = mech_uri + "&replicaSet=%s" % (str(set_name),) - client = MongoClient(mech_uri) + mech_uri = mech_uri + f"&replicaSet={set_name!s}" + client = self.simple_client(mech_uri) client[GSSAPI_DB].list_collection_names() @ignore_deprecations + @client_context.require_sync def test_gssapi_threaded(self): - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -217,7 +229,7 @@ def test_gssapi_threaded(self): # Need one document in the collection. AutoAuthenticateThread does # collection.find_one with a 1-second delay, forcing it to check out - # multiple sockets from the pool concurrently, proving that + # multiple connections from the pool concurrently, proving that # auto-authentication works with GSSAPI. collection = db.test if not collection.count_documents({}): @@ -236,9 +248,9 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: - client = MongoClient( + client = self.simple_client( GSSAPI_HOST, GSSAPI_PORT, username=GSSAPI_PRINCIPAL, @@ -260,16 +272,67 @@ def test_gssapi_threaded(self): thread.join() self.assertTrue(thread.success) + def test_gssapi_canonicalize_host_name(self): + # Test the low level method. + assert GSSAPI_HOST is not None + result = _canonicalize_hostname(GSSAPI_HOST, "forward") + if "compute-1.amazonaws.com" not in result: + self.assertEqual(result, GSSAPI_HOST) + result = _canonicalize_hostname(GSSAPI_HOST, "forwardAndReverse") + self.assertEqual(result, GSSAPI_HOST) + + # Use the equivalent named CANONICALIZE_HOST_NAME. + props = self.mech_properties.copy() + if props["CANONICALIZE_HOST_NAME"] == "true": + props["CANONICALIZE_HOST_NAME"] = "forwardAndReverse" + else: + props["CANONICALIZE_HOST_NAME"] = "none" + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=props, + ) + client.server_info() + + def test_gssapi_host_name(self): + props = self.mech_properties + props["SERVICE_HOST"] = "example.com" + + # Authenticate with authMechanismProperties. + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + with self.assertRaises(OperationFailure): + client.server_info() + + props["SERVICE_HOST"] = GSSAPI_HOST + client = self.simple_client( + GSSAPI_HOST, + GSSAPI_PORT, + username=GSSAPI_PRINCIPAL, + password=GSSAPI_PASS, + authMechanism="GSSAPI", + authMechanismProperties=self.mech_properties, + ) + client.server_info() + -class TestSASLPlain(unittest.TestCase): +class TestSASLPlain(PyMongoTestCase): @classmethod def setUpClass(cls): if not SASL_HOST or not SASL_USER or not SASL_PASS: raise SkipTest("Must set SASL_HOST, SASL_USER, and SASL_PASS to test SASL") def test_sasl_plain(self): - - client = MongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, username=SASL_USER, @@ -288,12 +351,12 @@ def test_sasl_plain(self): SASL_PORT, SASL_DB, ) - client = MongoClient(uri) + client = self.simple_client(uri) client.ldap.test.find_one() - set_name = client.admin.command(HelloCompat.LEGACY_CMD).get("setName") + set_name = client_context.replica_set_name if set_name: - client = MongoClient( + client = self.simple_client( SASL_HOST, SASL_PORT, replicaSet=set_name, @@ -312,7 +375,7 @@ def test_sasl_plain(self): SASL_DB, str(set_name), ) - client = MongoClient(uri) + client = self.simple_client(uri) client.ldap.test.find_one() def test_sasl_plain_bad_credentials(self): @@ -326,27 +389,30 @@ def auth_string(user, password): ) return uri - bad_user = MongoClient(auth_string("not-user", SASL_PASS)) - bad_pwd = MongoClient(auth_string(SASL_USER, "not-pwd")) + bad_user = self.simple_client(auth_string("not-user", SASL_PASS)) + bad_pwd = self.simple_client(auth_string(SASL_USER, "not-pwd")) # OperationFailure raised upon connecting. - self.assertRaises(OperationFailure, bad_user.admin.command, "ping") - self.assertRaises(OperationFailure, bad_pwd.admin.command, "ping") + with self.assertRaises(OperationFailure): + bad_user.admin.command("ping") + with self.assertRaises(OperationFailure): + bad_pwd.admin.command("ping") class TestSCRAMSHA1(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestSCRAMSHA1, self).setUp() + super().setUp() client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") - super(TestSCRAMSHA1, self).tearDown() + super().tearDown() + @client_context.require_no_fips def test_scram_sha1(self): host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( "mongodb://user:pass@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" % (host, port) ) client.pymongo_test.command("dbstats") @@ -357,27 +423,27 @@ def test_scram_sha1(self): "@%s:%d/pymongo_test?authMechanism=SCRAM-SHA-1" "&replicaSet=%s" % (host, port, client_context.replica_set_name) ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) client.pymongo_test.command("dbstats") db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) db.command("dbstats") -# https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst#scram-sha-256-and-mechanism-negotiation +# https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#scram-sha-256-and-mechanism-negotiation class TestSCRAM(IntegrationTest): @client_context.require_auth @client_context.require_version_min(3, 7, 2) def setUp(self): - super(TestSCRAM, self).setUp() + super().setUp() self._SENSITIVE_COMMANDS = monitoring._SENSITIVE_COMMANDS - monitoring._SENSITIVE_COMMANDS = set([]) + monitoring._SENSITIVE_COMMANDS = set() self.listener = AllowListEventListener("saslStart") def tearDown(self): monitoring._SENSITIVE_COMMANDS = self._SENSITIVE_COMMANDS client_context.client.testscram.command("dropAllUsersFromDatabase") client_context.client.drop_database("testscram") - super(TestSCRAM, self).tearDown() + super().tearDown() def test_scram_skip_empty_exchange(self): listener = AllowListEventListener("saslStart", "saslContinue") @@ -385,14 +451,14 @@ def test_scram_skip_empty_exchange(self): "testscram", "sha256", "pwd", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", event_listeners=[listener] ) client.testscram.command("dbstats") if client_context.version < (4, 4, -1): # Assert we sent the skipEmptyExchange option. - first_event = listener.results["started"][0] + first_event = listener.started_events[0] self.assertEqual(first_event.command_name, "saslStart") self.assertEqual(first_event.command["options"], {"skipEmptyExchange": True}) @@ -404,6 +470,7 @@ def test_scram_skip_empty_exchange(self): else: self.assertEqual(started, ["saslStart", "saslContinue", "saslContinue"]) + @client_context.require_no_fips def test_scram(self): # Step 1: create users client_context.create_user( @@ -421,61 +488,63 @@ def test_scram(self): ) # Step 2: verify auth success cases - client = rs_or_single_client_noauth(username="sha1", password="pwd", authSource="testscram") + client = self.rs_or_single_client_noauth( + username="sha1", password="pwd", authSource="testscram" + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") # Step 2: SCRAM-SHA-1 and SCRAM-SHA-256 - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") - self.listener.results.clear() - client = rs_or_single_client_noauth( + self.listener.reset() + client = self.rs_or_single_client_noauth( username="both", password="pwd", authSource="testscram", event_listeners=[self.listener] ) client.testscram.command("dbstats") if client_context.version.at_least(4, 4, -1): # Speculative authentication in 4.4+ sends saslStart with the # handshake. - self.assertEqual(self.listener.results["started"], []) + self.assertEqual(self.listener.started_events, []) else: - started = self.listener.results["started"][0] + started = self.listener.started_events[0] self.assertEqual(started.command.get("mechanism"), "SCRAM-SHA-256") # Step 3: verify auth failure conditions - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha1", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-256" ) with self.assertRaises(OperationFailure): client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="sha256", password="pwd", authSource="testscram", authMechanism="SCRAM-SHA-1" ) with self.assertRaises(OperationFailure): client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="not-a-user", password="pwd", authSource="testscram" ) with self.assertRaises(OperationFailure): @@ -488,7 +557,7 @@ def test_scram(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) client.testscram.command("dbstats") db = client.get_database("testscram", read_preference=ReadPreference.SECONDARY) db.command("dbstats") @@ -508,12 +577,12 @@ def test_scram_saslprep(self): "testscram", "IX", "IX", roles=["dbOwner"], mechanisms=["SCRAM-SHA-256"] ) - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="\u2163", authSource="testscram", @@ -521,17 +590,17 @@ def test_scram_saslprep(self): ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="\u2168", password="IV", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="I\u00ADX", authSource="testscram", @@ -539,25 +608,29 @@ def test_scram_saslprep(self): ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( username="IX", password="IX", authSource="testscram", authMechanism="SCRAM-SHA-256" ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth( + client = self.rs_or_single_client_noauth( "mongodb://\u2168:\u2163@%s:%d/testscram" % (host, port) ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://\u2168:IV@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth( + "mongodb://\u2168:IV@%s:%d/testscram" % (host, port) + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth( + "mongodb://IX:I\u00ADX@%s:%d/testscram" % (host, port) + ) client.testscram.command("dbstats") - client = rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) + client = self.rs_or_single_client_noauth("mongodb://IX:IX@%s:%d/testscram" % (host, port)) client.testscram.command("dbstats") def test_cache(self): - client = single_client() + client = self.single_client() credentials = client.options.pool_options._credentials cache = credentials.cache self.assertIsNotNone(cache) @@ -575,13 +648,15 @@ def test_cache(self): self.assertIsInstance(salt, bytes) self.assertIsInstance(iterations, int) + @client_context.require_sync def test_scram_threaded(self): coll = client_context.client.db.test coll.drop() coll.insert_one({"_id": 1}) # The first thread to call find() will authenticate - coll = rs_or_single_client().db.test + client = self.rs_or_single_client() + coll = client.db.test threads = [] for _ in range(4): threads.append(AutoAuthenticateThread(coll)) @@ -595,19 +670,19 @@ def test_scram_threaded(self): class TestAuthURIOptions(IntegrationTest): @client_context.require_auth def setUp(self): - super(TestAuthURIOptions, self).setUp() + super().setUp() client_context.create_user("admin", "admin", "pass") client_context.create_user("pymongo_test", "user", "pass", ["userAdmin", "readWrite"]) def tearDown(self): client_context.drop_user("pymongo_test", "user") client_context.drop_user("admin", "admin") - super(TestAuthURIOptions, self).tearDown() + super().tearDown() def test_uri_options(self): # Test default to admin host, port = client_context.host, client_context.port - client = rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) + client = self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port)) self.assertTrue(client.admin.command("dbstats")) if client_context.is_rs: @@ -616,15 +691,16 @@ def test_uri_options(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) + client = self.single_client_noauth(uri) self.assertTrue(client.admin.command("dbstats")) db = client.get_database("admin", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) # Test explicit database uri = "mongodb://user:pass@%s:%d/pymongo_test" % (host, port) - client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, "dbstats") + client = self.rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: @@ -633,16 +709,18 @@ def test_uri_options(self): port, client_context.replica_set_name, ) - client = single_client_noauth(uri) - self.assertRaises(OperationFailure, client.admin.command, "dbstats") + client = self.single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.admin.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) # Test authSource uri = "mongodb://user:pass@%s:%d/pymongo_test2?authSource=pymongo_test" % (host, port) - client = rs_or_single_client_noauth(uri) - self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + client = self.rs_or_single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) if client_context.is_rs: @@ -650,8 +728,9 @@ def test_uri_options(self): "mongodb://user:pass@%s:%d/pymongo_test2?replicaSet=" "%s;authSource=pymongo_test" % (host, port, client_context.replica_set_name) ) - client = single_client_noauth(uri) - self.assertRaises(OperationFailure, client.pymongo_test2.command, "dbstats") + client = self.single_client_noauth(uri) + with self.assertRaises(OperationFailure): + client.pymongo_test2.command("dbstats") self.assertTrue(client.pymongo_test.command("dbstats")) db = client.get_database("pymongo_test", read_preference=ReadPreference.SECONDARY) self.assertTrue(db.command("dbstats")) diff --git a/test/test_auth_oidc.py b/test/test_auth_oidc.py new file mode 100644 index 0000000000..877a5ca981 --- /dev/null +++ b/test/test_auth_oidc.py @@ -0,0 +1,1190 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test MONGODB-OIDC Authentication.""" +from __future__ import annotations + +import os +import sys +import time +import unittest +import warnings +from contextlib import contextmanager +from pathlib import Path +from test import PyMongoTestCase +from test.helpers import ConcurrentRunner +from typing import Dict + +import pytest + +sys.path[0:0] = [""] + +from test.unified_format import generate_test_classes +from test.utils_shared import EventListener, OvertCommandListener + +from bson import SON +from pymongo import MongoClient +from pymongo._azure_helpers import _get_azure_response +from pymongo._gcp_helpers import _get_gcp_response +from pymongo.auth_oidc_shared import _get_k8s_token +from pymongo.auth_shared import _build_credentials_tuple +from pymongo.cursor_shared import CursorType +from pymongo.errors import AutoReconnect, ConfigurationError, OperationFailure +from pymongo.hello import HelloCompat +from pymongo.operations import InsertOne +from pymongo.synchronous.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + _get_authenticator, +) +from pymongo.synchronous.uri_parser import parse_uri + +_IS_SYNC = True + +ROOT = Path(__file__).parent.parent.resolve() +TEST_PATH = ROOT / "auth" / "unified" +ENVIRON = os.environ.get("OIDC_ENV", "test") +DOMAIN = os.environ.get("OIDC_DOMAIN", "") +TOKEN_DIR = os.environ.get("OIDC_TOKEN_DIR", "") +TOKEN_FILE = os.environ.get("OIDC_TOKEN_FILE", "") + +# Generate unified tests. +globals().update(generate_test_classes(str(TEST_PATH), module=__name__)) + +pytestmark = pytest.mark.auth_oidc + + +class OIDCTestBase(PyMongoTestCase): + @classmethod + def setUpClass(cls): + cls.uri_single = os.environ["MONGODB_URI_SINGLE"] + cls.uri_multiple = os.environ.get("MONGODB_URI_MULTI") + cls.uri_admin = os.environ["MONGODB_URI"] + if ENVIRON == "test": + if not TOKEN_DIR: + raise ValueError("Please set OIDC_TOKEN_DIR") + if not TOKEN_FILE: + raise ValueError("Please set OIDC_TOKEN_FILE") + + def setUp(self): + self.request_called = 0 + + def get_token(self, username=None): + """Get a token for the current provider.""" + if ENVIRON == "test": + if username is None: + token_file = TOKEN_FILE + else: + token_file = os.path.join(TOKEN_DIR, username) + with open(token_file) as fid: # noqa: ASYNC101,RUF100 + return fid.read() + elif ENVIRON == "azure": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_azure_response(token_aud, username)["access_token"] + elif ENVIRON == "gcp": + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + return _get_gcp_response(token_aud, username)["access_token"] + elif ENVIRON == "k8s": + return _get_k8s_token() + else: + raise ValueError(f"Unknown ENVIRON: {ENVIRON}") + + @contextmanager + def fail_point(self, command_args): + cmd_on = SON([("configureFailPoint", "failCommand")]) + cmd_on.update(command_args) + client = MongoClient(self.uri_admin) + client.admin.command(cmd_on) + try: + yield + finally: + client.admin.command("configureFailPoint", cmd_on["configureFailPoint"], mode="off") + client.close() + + +class TestAuthOIDCHuman(OIDCTestBase): + uri: str + + @classmethod + def setUpClass(cls): + if ENVIRON != "test": + raise unittest.SkipTest("Human workflows are only tested with the test environment") + if DOMAIN is None: + raise ValueError("Missing OIDC_DOMAIN") + super().setUpClass() + + def setUp(self): + self.refresh_present = 0 + super().setUp() + + def create_request_cb(self, username="test_user1", sleep=0): + def request_token(context: OIDCCallbackContext): + # Validate the info. + self.assertIsInstance(context.idp_info.issuer, str) + if context.idp_info.clientId is not None: + self.assertIsInstance(context.idp_info.clientId, str) + + # Validate the timeout. + timeout_seconds = context.timeout_seconds + self.assertEqual(timeout_seconds, 60 * 5) + + if context.refresh_token: + self.refresh_present += 1 + + token = self.get_token(username) + resp = OIDCCallbackResult(access_token=token, refresh_token=token) + + time.sleep(sleep) + self.request_called += 1 + return resp + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + username = kwargs.get("username", "test_user1") + if kwargs.get("username") in ["test_user1", "test_user2"]: + kwargs["username"] = f"{username}@{DOMAIN}" + request_cb = kwargs.pop("request_cb", self.create_request_cb(username=username)) + props = kwargs.pop("authmechanismproperties", {"OIDC_HUMAN_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + + client = self.simple_client(*args, authmechanismproperties=props, **kwargs) + + return client + + def test_1_1_single_principal_implicit_username(self): + # Create default OIDC client with authMechanism=MONGODB-OIDC. + client = self.create_client() + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_2_single_principal_explicit_username(self): + # Create a client with MONGODB_URI_SINGLE, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_user1") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_3_multiple_principal_user_1(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, a username of test_user1, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user1") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_4_multiple_principal_user_2(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a human callback that reads in the generated test_user2 token file. + # Create a client with MONGODB_URI_MULTI, a username of test_user2, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple, username="test_user2") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_5_multiple_principal_no_user(self): + if not self.uri_multiple: + raise unittest.SkipTest("Test Requires Server with Multiple Workflow IdPs") + # Create a client with MONGODB_URI_MULTI, no username, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(self.uri_multiple) + # Assert that a find operation fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_6_allowed_hosts_blocked(self): + # Create a default OIDC client, with an ALLOWED_HOSTS that is an empty list. + request_token = self.create_request_cb() + props: Dict = {"OIDC_HUMAN_CALLBACK": request_token, "ALLOWED_HOSTS": []} + client = self.create_client(authmechanismproperties=props) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + # Close the client. + client.close() + + # Create a client that uses the URL mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com, + # a human callback, and an ALLOWED_HOSTS that contains ["example.com"]. + props: Dict = { + "OIDC_HUMAN_CALLBACK": request_token, + "ALLOWED_HOSTS": ["example.com"], + } + with warnings.catch_warnings(): + warnings.simplefilter("default") + client = self.create_client( + self.uri_single + "&ignored=example.com", + authmechanismproperties=props, + connect=False, + ) + # Assert that a find operation fails with a client-side error. + with self.assertRaises(ConfigurationError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_1_7_allowed_hosts_in_connection_string_ignored(self): + # Create an OIDC configured client with the connection string: `mongodb+srv://example.com/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D` and a Human Callback. + # Assert that the creation of the client raises a configuration error. + uri = "mongodb+srv://example.com?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22example.com%22%5D" + with self.assertRaises(ConfigurationError), warnings.catch_warnings(): + warnings.simplefilter("ignore") + c = MongoClient( + uri, + authmechanismproperties=dict(OIDC_HUMAN_CALLBACK=self.create_request_cb()), + ) + c._connect() + + def test_1_8_machine_idp_human_callback(self): + if not os.environ.get("OIDC_IS_LOCAL"): + raise unittest.SkipTest("Test Requires Local OIDC server") + # Create a client with MONGODB_URI_SINGLE, a username of test_machine, authMechanism=MONGODB-OIDC, and the OIDC human callback. + client = self.create_client(username="test_machine") + # Perform a find operation that succeeds. + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient with a human callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. Verify that the human callback was called with the appropriate inputs, including the timeout parameter if possible. + # Ensure that there are no unexpected fields. + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_2_callback_returns_missing_data(self): + # Create a MongoClient with a human callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCB(OIDCCallback): + def fetch(self, ctx): + return dict() + + client = self.create_client(request_cb=CustomCB()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + # Close the client. + client.close() + + def test_2_3_refresh_token_is_passed_to_the_callback(self): + # Create a MongoClient with a human callback that checks for the presence of a refresh token. + client = self.create_client() + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Assert that the callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the refresh token was used once. + self.assertEqual(self.refresh_present, 1) + + def test_3_1_uses_speculative_authentication_if_there_is_a_cached_token(self): + # Create a client with a human callback that returns a valid token. + client = self.create_client() + + # Set a fail point for ``find`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that succeeds + client.test.test.find_one() + + # Close the client. + client.close() + + def test_3_2_does_not_use_speculative_authentication_if_there_is_no_cached_token(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Close the client. + client.close() + + def test_4_1_reauthenticate_succeeds(self): + # Create a default OIDC client and add an event listener. + # The following assumes that the driver does not emit saslStart or saslContinue events. + # If the driver does emit those events, ignore/filter them for the purposes of this test. + listener = OvertCommandListener() + client = self.create_client(event_listeners=[listener]) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Clear the listener state if possible. + listener.reset() + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform another find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + + # Assert that the ordering of list started events is [find, find]. + # Note that if the listener stat could not be cleared then there will be an extra find command. + started_events = [ + i.command_name for i in listener.started_events if not i.command_name.startswith("sasl") + ] + succeeded_events = [ + i.command_name + for i in listener.succeeded_events + if not i.command_name.startswith("sasl") + ] + failed_events = [ + i.command_name for i in listener.failed_events if not i.command_name.startswith("sasl") + ] + + self.assertEqual( + started_events, + [ + "find", + "find", + ], + ) + # Assert that the list of command succeeded events is [find]. + self.assertEqual(succeeded_events, ["find"]) + # Assert that a find operation failed once during the command execution. + self.assertEqual(failed_events, ["find"]) + # Close the client. + client.close() + + def test_4_2_reauthenticate_succeeds_no_refresh(self): + # Create a default OIDC client with a human callback that does not return a refresh token. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = None + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called twice. + self.assertEqual(self.request_called, 2) + # Close the client. + client.close() + + def test_4_3_reauthenticate_succeeds_after_refresh_fails(self): + # Create a default OIDC client with a human callback that returns an invalid refresh token + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + def fetch(self, *args, **kwargs): + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthenication using a fail point. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that succeeds. + client.test.test.find_one() + + # Assert that the human callback has been called 2 times. + self.assertEqual(self.request_called, 2) + + # Close the client. + client.close() + + def test_4_4_reauthenticate_fails(self): + # Create a default OIDC client with a human callback that returns invalid refresh tokens and + # Returns invalid access tokens after the first access. + cb = self.create_request_cb() + + class CustomRequest(OIDCCallback): + fetch_called = 0 + + def fetch(self, *args, **kwargs): + self.fetch_called += 1 + result = cb.fetch(*args, **kwargs) + result.refresh_token = "bad" + if self.fetch_called > 1: + result.access_token = "bad" + return result + + client = self.create_client(request_cb=CustomRequest()) + + # Perform a find operation that succeeds (to force a speculative auth). + client.test.test.find_one() + # Assert that the human callback has been called once. + self.assertEqual(self.request_called, 1) + + # Force a reauthentication using a failCommand. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the human callback has been called three times. + self.assertEqual(self.request_called, 3) + + # Close the client. + client.close() + + def test_request_callback_returns_null(self): + class RequestTokenNull(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=RequestTokenNull()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_request_callback_invalid_result(self): + class CallbackInvalidToken(OIDCCallback): + def fetch(self, a): + return {} + + client = self.create_client(request_cb=CallbackInvalidToken()) + with self.assertRaises(ValueError): + client.test.test.find_one() + client.close() + + def test_reauthentication_succeeds_multiple_connections(self): + request_cb = self.create_request_cb() + + # Create a client with the callback. + client1 = self.create_client(request_cb=request_cb) + client2 = self.create_client(request_cb=request_cb) + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + client1.close() + client2.close() + + # PyMongo specific tests, since we have multiple code paths for reauth handling. + + def test_reauthenticate_succeeds_bulk_write(self): + # Create a client. + client = self.create_client() + + # Perform a find operation. + client.test.test.find_one() + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_bulk_read(self): + # Create a client. + client = self.create_client() + + # Perform a find operation. + client.test.test.find_one() + + # Perform a bulk write operation. + client.test.test.bulk_write([InsertOne({})]) # type:ignore[type-var] + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a bulk read operation. + cursor = client.test.test.find_raw_batches({}) + cursor.to_list() + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_cursor(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_get_more_exhaust(self): + # Ensure no mongos + client = self.create_client() + hello = client.admin.command(HelloCompat.LEGACY_CMD) + if hello.get("msg") != "isdbgrid": + raise unittest.SkipTest("Must not be a mongos") + + # Create a client with the callback. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_many([{"a": 1}, {"a": 1}]) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["getMore"], "errorCode": 391}, + } + ): + # Perform a find operation. + cursor = client.test.test.find({"a": 1}, batch_size=1, cursor_type=CursorType.EXHAUST) + self.assertGreaterEqual(len(cursor.to_list()), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + def test_reauthenticate_succeeds_command(self): + # Create a client. + client = self.create_client() + + # Perform an insert operation. + client.test.test.insert_one({"a": 1}) + + # Assert that the request callback has been called once. + self.assertEqual(self.request_called, 1) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["count"], "errorCode": 391}, + } + ): + # Perform a count operation. + cursor = client.test.command({"count": "test"}) + + self.assertGreaterEqual(len(cursor), 1) + + # Assert that the request callback has been called twice. + self.assertEqual(self.request_called, 2) + client.close() + + +class TestAuthOIDCMachine(OIDCTestBase): + uri: str + + def setUp(self): + self.request_called = 0 + + def create_request_cb(self, username=None, sleep=0): + def request_token(context): + assert isinstance(context.timeout_seconds, int) + assert context.version == 1 + assert context.refresh_token is None + assert context.idp_info is None + token = self.get_token(username) + time.sleep(sleep) + self.request_called += 1 + return OIDCCallbackResult(access_token=token) + + class Inner(OIDCCallback): + def fetch(self, context): + return request_token(context) + + return Inner() + + def create_client(self, *args, **kwargs): + request_cb = kwargs.pop("request_cb", self.create_request_cb()) + props = kwargs.pop("authmechanismproperties", {"OIDC_CALLBACK": request_cb}) + kwargs["retryReads"] = False + if not len(args): + args = [self.uri_single] + client = MongoClient(*args, authmechanismproperties=props, **kwargs) + self.addCleanup(client.close) + return client + + def test_1_1_callback_is_called_during_reauthentication(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_1_2_callback_is_called_once_for_multiple_connections(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + client._connect() + + # Start 10 tasks and run 100 find operations that all succeed in each task. + def target(): + for _ in range(100): + client.test.test.find_one() + + tasks = [] + for i in range(10): + tasks.append(ConcurrentRunner(target=target)) + for t in tasks: + t.start() + for t in tasks: + t.join() + # Assert that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_2_1_valid_callback_inputs(self): + # Create a MongoClient configured with an OIDC callback that validates its inputs and returns a valid access token. + client = self.create_client() + # Perform a find operation that succeeds. + client.test.test.find_one() + # Assert that the OIDC callback was called with the appropriate inputs, including the timeout parameter if possible. Ensure that there are no unexpected fields. + self.assertEqual(self.request_called, 1) + + def test_2_2_oidc_callback_returns_null(self): + # Create a MongoClient configured with an OIDC callback that returns null. + class CallbackNullToken(OIDCCallback): + def fetch(self, a): + return None + + client = self.create_client(request_cb=CallbackNullToken()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_2_3_oidc_callback_returns_missing_data(self): + # Create a MongoClient configured with an OIDC callback that returns data not conforming to the OIDCCredential with missing fields. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return object() + + client = self.create_client(request_cb=CustomCallback()) + # Perform a find operation that fails. + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_2_4_invalid_client_configuration_with_callback(self): + # Create a MongoClient configured with an OIDC callback and auth mechanism property ENVIRONMENT:test. + request_cb = self.create_request_cb() + props: Dict = {"OIDC_CALLBACK": request_cb, "ENVIRONMENT": "test"} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_2_5_invalid_use_of_ALLOWED_HOSTS(self): + # Create an OIDC configured client with auth mechanism properties `{"ENVIRONMENT": "test", "ALLOWED_HOSTS": []}`. + props: Dict = {"ENVIRONMENT": "test", "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + # Create an OIDC configured client with auth mechanism properties `{"OIDC_CALLBACK": "", "ALLOWED_HOSTS": []}`. + props: Dict = {"OIDC_CALLBACK": self.create_request_cb(), "ALLOWED_HOSTS": []} + # Assert it returns a client configuration error. + with self.assertRaises(ConfigurationError): + self.create_client(authmechanismproperties=props) + + def test_2_6_ALLOWED_HOSTS_defaults_ignored(self): + # Create a MongoCredential for OIDC with a machine callback. + props = {"OIDC_CALLBACK": self.create_request_cb()} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, "foo", None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "foo" + + # Create a MongoCredential for OIDC with an ENVIRONMENT. + props = {"ENVIRONMENT": "test"} + extra = dict(authmechanismproperties=props) + mongo_creds = _build_credentials_tuple("MONGODB-OIDC", None, None, None, extra, "test") + # Assert that creating an authenticator for example.com does not result in an error. + authenticator = _get_authenticator(mongo_creds, ("example.com", 30)) + assert authenticator.properties.username == "" + + def test_3_1_authentication_failure_with_cached_tokens_fetch_a_new_token_and_retry(self): + # Create a MongoClient and an OIDC callback that implements the provider logic. + client = self.create_client() + client._connect() + # Poison the cache with an invalid access token. + # Set a fail point for ``find`` command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391, "closeConnection": True}, + } + ): + # Perform a ``find`` operation that fails. This is to force the ``MongoClient`` + # to cache an access token. + with self.assertRaises(AutoReconnect): + client.test.test.find_one() + # Poison the cache of the client. + client.options.pool_options._credentials.cache.data.access_token = "bad" + # Reset the request count. + self.request_called = 0 + # Verify that a find succeeds. + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(self.request_called, 1) + + def test_3_2_authentication_failures_without_cached_tokens_returns_an_error(self): + # Create a MongoClient configured with retryReads=false and an OIDC callback that always returns invalid access tokens. + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, a): + self.count += 1 + return OIDCCallbackResult(access_token="bad value") + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + # Verify that the callback was called 1 time. + self.assertEqual(callback.count, 1) + + def test_3_3_unexpected_error_code_does_not_clear_cache(self): + # Create a ``MongoClient`` with a human callback that returns a valid token + client = self.create_client() + + # Set a fail point for ``saslStart`` commands. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["saslStart"], "errorCode": 20}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Assert that the callback has been called once. + self.assertEqual(self.request_called, 1) + + def test_4_1_reauthentication_succeeds(self): + # Create a ``MongoClient`` configured with a custom OIDC callback that + # implements the provider logic. + client = self.create_client() + client._connect() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that succeeds. + client.test.test.find_one() + + # Verify that the callback was called 2 times (once during the connection + # handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + def test_4_2_read_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad tokens after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform a read operation that succeeds. + client.test.test.find_one() + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Perform a ``find`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.find_one() + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + def test_4_3_write_commands_fail_if_reauthentication_fails(self): + # Create a ``MongoClient`` whose OIDC callback returns one good token and then + # bad token after the first call. + get_token = self.get_token + + class CustomCallback(OIDCCallback): + count = 0 + + def fetch(self, _): + self.count += 1 + if self.count == 1: + access_token = get_token() + else: + access_token = "bad value" + return OIDCCallbackResult(access_token=access_token) + + callback = CustomCallback() + client = self.create_client(request_cb=callback) + + # Perform an insert operation that succeeds. + client.test.test.insert_one({}) + + # Set a fail point for the find command. + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform a ``insert`` operation that fails. + with self.assertRaises(OperationFailure): + client.test.test.insert_one({}) + + # Verify that the callback was called 2 times. + self.assertEqual(callback.count, 2) + + def test_4_4_speculative_authentication_should_be_ignored_on_reauthentication(self): + # Create an OIDC configured client that can listen for `SaslStart` commands. + listener = EventListener() + client = self.create_client(event_listeners=[listener]) + client._connect() + + # Preload the *Client Cache* with a valid access token to enforce Speculative Authentication. + client2 = self.create_client() + client2.test.test.find_one() + client.options.pool_options._credentials.cache.data = ( + client2.options.pool_options._credentials.cache.data + ) + client2.close() + self.request_called = 0 + + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was not called. + self.assertEqual(self.request_called, 0) + + # Assert there were no `SaslStart` commands executed. + assert not any( + event.command_name.lower() == "saslstart" for event in listener.started_events + ) + listener.reset() + + # Set a fail point for `insert` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["insert"], "errorCode": 391}, + } + ): + # Perform an `insert` operation that succeeds. + client.test.test.insert_one({}) + + # Assert that the callback was called once. + self.assertEqual(self.request_called, 1) + + # Assert there were `SaslStart` commands executed. + assert any(event.command_name.lower() == "saslstart" for event in listener.started_events) + + def test_4_5_reauthentication_succeeds_when_a_session_is_involved(self): + # Create an OIDC configured client. + client = self.create_client() + + # Set a fail point for `find` commands of the form: + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + # Start a new session. + with client.start_session() as session: + # In the started session perform a `find` operation that succeeds. + client.test.test.find_one({}, session=session) + + # Assert that the callback was called 2 times (once during the connection handshake, and again during reauthentication). + self.assertEqual(self.request_called, 2) + + def test_5_1_azure_with_no_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + opts = parse_uri(self.uri_single)["options"] + resource = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=resource, ENVIRONMENT="azure") + client = self.create_client(authMechanismProperties=props) + client.test.test.find_one() + + def test_5_2_azure_with_bad_username(self): + if ENVIRON != "azure": + raise unittest.SkipTest("Test is only supported on Azure") + + opts = parse_uri(self.uri_single)["options"] + token_aud = opts["authMechanismProperties"]["TOKEN_RESOURCE"] + + props = dict(TOKEN_RESOURCE=token_aud, ENVIRONMENT="azure") + client = self.create_client(username="bad", authmechanismproperties=props) + with self.assertRaises(ValueError): + client.test.test.find_one() + + def test_speculative_auth_success(self): + client1 = self.create_client() + client1.test.test.find_one() + client2 = self.create_client() + client2._connect() + + # Prime the cache of the second client. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + # Set a fail point for saslStart commands. + with self.fail_point( + { + "mode": {"times": 2}, + "data": {"failCommands": ["saslStart"], "errorCode": 18}, + } + ): + # Perform a find operation. + client2.test.test.find_one() + + def test_reauthentication_succeeds_multiple_connections(self): + client1 = self.create_client() + client2 = self.create_client() + + # Perform an insert operation. + client1.test.test.insert_many([{"a": 1}, {"a": 1}]) + client2.test.test.find_one() + self.assertEqual(self.request_called, 2) + + # Use the same authenticator for both clients + # to simulate a race condition with separate connections. + # We should only see one extra callback despite both connections + # needing to reauthenticate. + client2.options.pool_options._credentials.cache.data = ( + client1.options.pool_options._credentials.cache.data + ) + + client1.test.test.find_one() + client2.test.test.find_one() + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client1.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 391}, + } + ): + client2.test.test.find_one() + + self.assertEqual(self.request_called, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_auth_spec.py b/test/test_auth_spec.py index 9f2fa374ac..ac6411cd89 100644 --- a/test/test_auth_spec.py +++ b/test/test_auth_spec.py @@ -13,25 +13,41 @@ # limitations under the License. """Run the auth spec tests.""" +from __future__ import annotations import glob import json import os import sys +import warnings +from test import PyMongoTestCase + +import pytest sys.path[0:0] = [""] from test import unittest +from test.unified_format import generate_test_classes from pymongo import MongoClient +from pymongo.auth_oidc_shared import OIDCCallback + +pytestmark = pytest.mark.auth + +_IS_SYNC = True _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "auth") -class TestAuthSpec(unittest.TestCase): +class TestAuthSpec(PyMongoTestCase): pass +class SampleHumanCallback(OIDCCallback): + def fetch(self, context): + pass + + def create_test(test_case): def run_test(self): uri = test_case["uri"] @@ -39,9 +55,11 @@ def run_test(self): credential = test_case.get("credential") if not valid: - self.assertRaises(Exception, MongoClient, uri, connect=False) + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.assertRaises(Exception, MongoClient, uri, connect=False) else: - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) credentials = client.options.pool_options._credentials if credential is None: self.assertIsNone(credentials) @@ -57,21 +75,8 @@ def run_test(self): expected = credential["mechanism_properties"] if expected is not None: actual = credentials.mechanism_properties - for key, val in expected.items(): - if "SERVICE_NAME" in expected: - self.assertEqual(actual.service_name, expected["SERVICE_NAME"]) - elif "CANONICALIZE_HOST_NAME" in expected: - self.assertEqual( - actual.canonicalize_host_name, expected["CANONICALIZE_HOST_NAME"] - ) - elif "SERVICE_REALM" in expected: - self.assertEqual(actual.service_realm, expected["SERVICE_REALM"]) - elif "AWS_SESSION_TOKEN" in expected: - self.assertEqual( - actual.aws_session_token, expected["AWS_SESSION_TOKEN"] - ) - else: - self.fail("Unhandled property: %s" % (key,)) + for key, value in expected.items(): + self.assertEqual(getattr(actual, key.lower()), value) else: if credential["mechanism"] == "MONGODB-AWS": self.assertIsNone(credentials.mechanism_properties.aws_session_token) @@ -82,7 +87,7 @@ def run_test(self): def create_tests(): - for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): + for filename in glob.glob(os.path.join(_TEST_PATH, "legacy", "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) with open(filename) as auth_tests: test_cases = json.load(auth_tests)["tests"] @@ -91,11 +96,18 @@ def create_tests(): continue test_method = create_test(test_case) name = str(test_case["description"].lower().replace(" ", "_")) - setattr(TestAuthSpec, "test_%s_%s" % (test_suffix, name), test_method) + setattr(TestAuthSpec, f"test_{test_suffix}_{name}", test_method) create_tests() +globals().update( + generate_test_classes( + os.path.join(_TEST_PATH, "unified"), + module=__name__, + ) +) + if __name__ == "__main__": unittest.main() diff --git a/test/test_binary.py b/test/test_binary.py index 65abdca796..a64aa42280 100644 --- a/test/test_binary.py +++ b/test/test_binary.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the Binary wrapper.""" +from __future__ import annotations import array import base64 @@ -21,6 +22,7 @@ import pickle import sys import uuid +from typing import Any sys.path[0:0] = [""] @@ -32,60 +34,56 @@ from bson.codec_options import CodecOptions from bson.son import SON from pymongo.common import validate_uuid_representation -from pymongo.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -class TestBinary(unittest.TestCase): - csharp_data: bytes - java_data: bytes +class BinaryData: + # Generated by the Java driver + from_java = ( + b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" + b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" + b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" + b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" + b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" + b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" + b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" + b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" + b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" + b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" + b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" + b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" + b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" + b"0MQAA" + ) + java_data = base64.b64decode(from_java) + + # Generated by the .net driver + from_csharp = ( + b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" + b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" + b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" + b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" + b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" + b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" + b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" + b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" + b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" + b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" + b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" + b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" + b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" + ) + csharp_data = base64.b64decode(from_csharp) - @classmethod - def setUpClass(cls): - # Generated by the Java driver - from_java = ( - b"bAAAAAdfaWQAUCBQxkVm+XdxJ9tOBW5ld2d1aWQAEAAAAAMIQkfACFu" - b"Z/0RustLOU/G6Am5ld2d1aWRzdHJpbmcAJQAAAGZmOTk1YjA4LWMwND" - b"ctNDIwOC1iYWYxLTUzY2VkMmIyNmU0NAAAbAAAAAdfaWQAUCBQxkVm+" - b"XdxJ9tPBW5ld2d1aWQAEAAAAANgS/xhRXXv8kfIec+dYdyCAm5ld2d1" - b"aWRzdHJpbmcAJQAAAGYyZWY3NTQ1LTYxZmMtNGI2MC04MmRjLTYxOWR" - b"jZjc5Yzg0NwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tQBW5ld2d1aWQAEA" - b"AAAAPqREIbhZPUJOSdHCJIgaqNAm5ld2d1aWRzdHJpbmcAJQAAADI0Z" - b"DQ5Mzg1LTFiNDItNDRlYS04ZGFhLTgxNDgyMjFjOWRlNAAAbAAAAAdf" - b"aWQAUCBQxkVm+XdxJ9tRBW5ld2d1aWQAEAAAAANjQBn/aQuNfRyfNyx" - b"29COkAm5ld2d1aWRzdHJpbmcAJQAAADdkOGQwYjY5LWZmMTktNDA2My" - b"1hNDIzLWY0NzYyYzM3OWYxYwAAbAAAAAdfaWQAUCBQxkVm+XdxJ9tSB" - b"W5ld2d1aWQAEAAAAAMtSv/Et1cAQUFHUYevqxaLAm5ld2d1aWRzdHJp" - b"bmcAJQAAADQxMDA1N2I3LWM0ZmYtNGEyZC04YjE2LWFiYWY4NzUxNDc" - b"0MQAA" - ) - cls.java_data = base64.b64decode(from_java) - - # Generated by the .net driver - from_csharp = ( - b"ZAAAABBfaWQAAAAAAAVuZXdndWlkABAAAAAD+MkoCd/Jy0iYJ7Vhl" - b"iF3BAJuZXdndWlkc3RyaW5nACUAAAAwOTI4YzlmOC1jOWRmLTQ4Y2" - b"ItOTgyNy1iNTYxOTYyMTc3MDQAAGQAAAAQX2lkAAEAAAAFbmV3Z3V" - b"pZAAQAAAAA9MD0oXQe6VOp7mK4jkttWUCbmV3Z3VpZHN0cmluZwAl" - b"AAAAODVkMjAzZDMtN2JkMC00ZWE1LWE3YjktOGFlMjM5MmRiNTY1A" - b"ABkAAAAEF9pZAACAAAABW5ld2d1aWQAEAAAAAPRmIO2auc/Tprq1Z" - b"oQ1oNYAm5ld2d1aWRzdHJpbmcAJQAAAGI2ODM5OGQxLWU3NmEtNGU" - b"zZi05YWVhLWQ1OWExMGQ2ODM1OAAAZAAAABBfaWQAAwAAAAVuZXdn" - b"dWlkABAAAAADISpriopuTEaXIa7arYOCFAJuZXdndWlkc3RyaW5nA" - b"CUAAAA4YTZiMmEyMS02ZThhLTQ2NGMtOTcyMS1hZWRhYWQ4MzgyMT" - b"QAAGQAAAAQX2lkAAQAAAAFbmV3Z3VpZAAQAAAAA98eg0CFpGlPihP" - b"MwOmYGOMCbmV3Z3VpZHN0cmluZwAlAAAANDA4MzFlZGYtYTQ4NS00" - b"ZjY5LThhMTMtY2NjMGU5OTgxOGUzAAA=" - ) - cls.csharp_data = base64.b64decode(from_csharp) +class TestBinary(unittest.TestCase): def test_binary(self): a_string = "hello world" a_binary = Binary(b"hello world") self.assertTrue(a_binary.startswith(b"hello")) self.assertTrue(a_binary.endswith(b"world")) - self.assertTrue(isinstance(a_binary, Binary)) - self.assertFalse(isinstance(a_string, Binary)) + self.assertIsInstance(a_binary, Binary) + self.assertNotIsInstance(a_string, Binary) def test_exceptions(self): self.assertRaises(TypeError, Binary, None) @@ -122,15 +120,15 @@ def test_equality(self): def test_repr(self): one = Binary(b"hello world") - self.assertEqual(repr(one), "Binary(%s, 0)" % (repr(b"hello world"),)) + self.assertEqual(repr(one), "Binary({}, 0)".format(repr(b"hello world"))) two = Binary(b"hello world", 2) - self.assertEqual(repr(two), "Binary(%s, 2)" % (repr(b"hello world"),)) + self.assertEqual(repr(two), "Binary({}, 2)".format(repr(b"hello world"))) three = Binary(b"\x08\xFF") - self.assertEqual(repr(three), "Binary(%s, 0)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(three), "Binary({}, 0)".format(repr(b"\x08\xFF"))) four = Binary(b"\x08\xFF", 2) - self.assertEqual(repr(four), "Binary(%s, 2)" % (repr(b"\x08\xFF"),)) + self.assertEqual(repr(four), "Binary({}, 2)".format(repr(b"\x08\xFF"))) five = Binary(b"test", 100) - self.assertEqual(repr(five), "Binary(%s, 100)" % (repr(b"test"),)) + self.assertEqual(repr(five), "Binary({}, 100)".format(repr(b"test"))) def test_hash(self): one = Binary(b"hello world") @@ -157,7 +155,7 @@ def test_uuid_subtype_4(self): def test_legacy_java_uuid(self): # Test decoding - data = self.java_data + data = BinaryData.java_data docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) for d in docs: self.assertNotEqual(d["newguid"], uuid.UUID(d["newguidstring"])) @@ -195,27 +193,8 @@ def test_legacy_java_uuid(self): ) self.assertEqual(data, encoded) - @client_context.require_connection - def test_legacy_java_uuid_roundtrip(self): - data = self.java_data - docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) - - client_context.client.pymongo_test.drop_collection("java_uuid") - db = client_context.client.pymongo_test - coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) - - coll.insert_many(docs) - self.assertEqual(5, coll.count_documents({})) - for d in coll.find(): - self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - - coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - for d in coll.find(): - self.assertNotEqual(d["newguid"], d["newguidstring"]) - client_context.client.pymongo_test.drop_collection("java_uuid") - def test_legacy_csharp_uuid(self): - data = self.csharp_data + data = BinaryData.csharp_data # Test decoding docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, PYTHON_LEGACY)) @@ -255,60 +234,6 @@ def test_legacy_csharp_uuid(self): ) self.assertEqual(data, encoded) - @client_context.require_connection - def test_legacy_csharp_uuid_roundtrip(self): - data = self.csharp_data - docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) - - client_context.client.pymongo_test.drop_collection("csharp_uuid") - db = client_context.client.pymongo_test - coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) - - coll.insert_many(docs) - self.assertEqual(5, coll.count_documents({})) - for d in coll.find(): - self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) - - coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - for d in coll.find(): - self.assertNotEqual(d["newguid"], d["newguidstring"]) - client_context.client.pymongo_test.drop_collection("csharp_uuid") - - def test_uri_to_uuid(self): - - uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" - client = MongoClient(uri, connect=False) - self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) - - @client_context.require_connection - def test_uuid_queries(self): - db = client_context.client.pymongo_test - coll = db.test - coll.drop() - - uu = uuid.uuid4() - coll.insert_one({"uuid": Binary(uu.bytes, 3)}) - self.assertEqual(1, coll.count_documents({})) - - # Test regular UUID queries (using subtype 4). - coll = db.get_collection( - "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - ) - self.assertEqual(0, coll.count_documents({"uuid": uu})) - coll.insert_one({"uuid": uu}) - self.assertEqual(2, coll.count_documents({})) - docs = list(coll.find({"uuid": uu})) - self.assertEqual(1, len(docs)) - self.assertEqual(uu, docs[0]["uuid"]) - - # Test both. - uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) - predicate = {"uuid": {"$in": [uu, uu_legacy]}} - self.assertEqual(2, coll.count_documents(predicate)) - docs = list(coll.find(predicate)) - self.assertEqual(2, len(docs)) - coll.drop() - def test_pickle(self): b1 = Binary(b"123", 2) @@ -351,7 +276,7 @@ class TestUuidSpecExplicitCoding(unittest.TestCase): @classmethod def setUpClass(cls): - super(TestUuidSpecExplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod @@ -452,7 +377,7 @@ class TestUuidSpecImplicitCoding(IntegrationTest): @classmethod def setUpClass(cls): - super(TestUuidSpecImplicitCoding, cls).setUpClass() + super().setUpClass() cls.uuid = uuid.UUID("00112233445566778899AABBCCDDEEFF") @staticmethod @@ -495,7 +420,7 @@ def test_encoding_4(self): # Implicit encoding prose test #5 def test_encoding_5(self): with self.assertRaises(ValueError): - self._test_encoding("unspecifed", "dummy", -1) + self._test_encoding("unspecified", "dummy", -1) def _test_decoding( self, diff --git a/test/test_bson.py b/test/test_bson.py index 8ad65f3412..f792db1e89 100644 --- a/test/test_bson.py +++ b/test/test_bson.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -15,6 +14,7 @@ # limitations under the License. """Test the bson module.""" +from __future__ import annotations import array import collections @@ -23,6 +23,7 @@ import os import pickle import re +import struct import sys import tempfile import uuid @@ -32,23 +33,34 @@ sys.path[0:0] = [""] from test import qcheck, unittest -from test.utils import ExceptionCatchingThread +from test.helpers import ExceptionCatchingTask import bson from bson import ( BSON, EPOCH_AWARE, + DatetimeMS, Regex, + _array_of_documents_to_buffer, + _datetime_to_millis, decode, decode_all, decode_file_iter, decode_iter, encode, is_valid, + json_util, +) +from bson.binary import ( + USER_DEFINED_SUBTYPE, + Binary, + BinaryVector, + BinaryVectorDtype, + UuidRepresentation, ) -from bson.binary import Binary, UuidRepresentation from bson.code import Code -from bson.codec_options import CodecOptions +from bson.codec_options import CodecOptions, DatetimeConversion +from bson.datetime_ms import _DATETIME_ERROR_SUGGESTION from bson.dbref import DBRef from bson.errors import InvalidBSON, InvalidDocument from bson.int64 import Int64 @@ -120,7 +132,6 @@ def assertInvalid(self, data): self.assertRaises(InvalidBSON, decode, data) def check_encode_then_decode(self, doc_class=dict, decoder=decode, encoder=encode): - # Work around http://bugs.jython.org/issue1728 if sys.platform.startswith("java"): doc_class = SON @@ -131,7 +142,7 @@ def helper(doc): helper({}) helper({"test": "hello"}) - self.assertTrue(isinstance(decoder(encoder({"hello": "world"}))["hello"], str)) + self.assertIsInstance(decoder(encoder({"hello": "world"}))["hello"], str) helper({"mike": -10120}) helper({"long": Int64(10)}) helper({"really big long": 2147483648}) @@ -144,6 +155,9 @@ def helper(doc): helper({"a binary": Binary(b"test", 128)}) helper({"a binary": Binary(b"test", 254)}) helper({"another binary": Binary(b"test", 2)}) + helper({"binary packed bit vector": Binary(b"\x10\x00\x7f\x07", 9)}) + helper({"binary int8 vector": Binary(b"\x03\x00\x7f\x07", 9)}) + helper({"binary float32 vector": Binary(b"'\x00\x00\x00\xfeB\x00\x00\xe0@", 9)}) helper(SON([("test dst", datetime.datetime(1993, 4, 4, 2))])) helper(SON([("test negative dst", datetime.datetime(1, 1, 1, 1, 1, 1))])) helper({"big float": float(10000000000)}) @@ -368,7 +382,7 @@ def test_invalid_decodes(self): ), ] for i, data in enumerate(bad_bsons): - msg = "bad_bson[{}]".format(i) + msg = f"bad_bson[{i}]" with self.assertRaises(InvalidBSON, msg=msg): decode_all(data) with self.assertRaises(InvalidBSON, msg=msg): @@ -443,6 +457,20 @@ def test_basic_encode(self): encode({"test": Binary(b"test", 128)}), b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00\x00\x00\x80\x74\x65\x73\x74\x00", ) + self.assertEqual( + encode({"vector_int8": Binary.from_vector([-128, -1, 127], BinaryVectorDtype.INT8)}), + b"\x1c\x00\x00\x00\x05vector_int8\x00\x05\x00\x00\x00\t\x03\x00\x80\xff\x7f\x00", + ) + self.assertEqual( + encode({"vector_bool": Binary.from_vector([1, 127], BinaryVectorDtype.PACKED_BIT)}), + b"\x1b\x00\x00\x00\x05vector_bool\x00\x04\x00\x00\x00\t\x10\x00\x01\x7f\x00", + ) + self.assertEqual( + encode( + {"vector_float32": Binary.from_vector([-1.1, 1.1e10], BinaryVectorDtype.FLOAT32)} + ), + b"$\x00\x00\x00\x05vector_float32\x00\n\x00\x00\x00\t'\x00\xcd\xcc\x8c\xbf\xac\xe9#P\x00", + ) self.assertEqual(encode({"test": None}), b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00") self.assertEqual( encode({"date": datetime.datetime(2007, 1, 8, 0, 30, 11)}), @@ -487,9 +515,36 @@ def test_basic_encode(self): b"\x00", ) + def test_bad_code(self): + # Assert that decoding invalid Code with scope does not include a field name. + def generate_payload(length: int) -> bytes: + string_size = length - 0x1E + + return bytes.fromhex( + struct.pack(", >=, !=, and ==. + # These tests should be kept as assertTrue as opposed to using unittest's built-in comparison assertions because + # MinKey and MaxKey define their own __ge__, __le__, and other comparison attributes, and we want to explicitly test that. self.assertTrue(MinKey() < None) self.assertTrue(MinKey() < 1) self.assertTrue(MinKey() <= 1) @@ -923,10 +1136,10 @@ def test_bad_id_keys(self): def test_bson_encode_thread_safe(self): def target(i): for j in range(1000): - my_int = type("MyInt_%s_%s" % (i, j), (int,), {}) + my_int = type(f"MyInt_{i}_{j}", (int,), {}) bson.encode({"my_int": my_int()}) - threads = [ExceptionCatchingThread(target=target, args=(i,)) for i in range(3)] + threads = [ExceptionCatchingTask(target=target, args=(i,)) for i in range(3)] for t in threads: t.start() @@ -937,7 +1150,7 @@ def target(i): self.assertIsNone(t.exc) def test_raise_invalid_document(self): - class Wrapper(object): + class Wrapper: def __init__(self, val): self.val = val @@ -950,6 +1163,54 @@ def __repr__(self): ): encode({"t": Wrapper(1)}) + def test_doc_in_invalid_document_error_as_property(self): + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = {"t": Wrapper(1)} + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: + encode(doc) + self.assertEqual(cm.exception.document, doc) + + def test_doc_in_invalid_document_error_as_property_mapping(self): + class MyMapping(abc.Mapping): + def keys(self): + return ["t"] + + def __getitem__(self, name): + if name == "_id": + return None + return Wrapper(name) + + def __len__(self): + return 1 + + def __iter__(self): + return iter(["t"]) + + def __eq__(self, other): + if isinstance(other, MyMapping): + return True + return False + + class Wrapper: + def __init__(self, val): + self.val = val + + def __repr__(self): + return repr(self.val) + + self.assertEqual("1", repr(Wrapper(1))) + doc = MyMapping() + with self.assertRaisesRegex(InvalidDocument, "Invalid document:") as cm: + encode(doc) + self.assertEqual(cm.exception.document, doc) + class TestCodecOptions(unittest.TestCase): def test_document_class(self): @@ -970,6 +1231,10 @@ def test_tzinfo(self): tz = FixedOffset(42, "forty-two") self.assertRaises(ValueError, CodecOptions, tzinfo=tz) self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo) + self.assertEqual(repr(tz), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')") + self.assertEqual( + repr(eval(repr(tz))), "FixedOffset(datetime.timedelta(seconds=2520), 'forty-two')" + ) def test_codec_options_repr(self): r = ( @@ -977,14 +1242,15 @@ def test_codec_options_repr(self): "uuid_representation=UuidRepresentation.UNSPECIFIED, " "unicode_decode_error_handler='strict', " "tzinfo=None, type_registry=TypeRegistry(type_codecs=[], " - "fallback_encoder=None))" + "fallback_encoder=None), " + "datetime_conversion=DatetimeConversion.DATETIME)" ) self.assertEqual(r, repr(CodecOptions())) def test_decode_all_defaults(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc))[0] self.assertIsInstance(decoded["sub_document"], dict) @@ -996,7 +1262,7 @@ def test_decode_all_defaults(self): def test_decode_all_no_options(self): # Test decode_all()'s default document_class is dict and tz_aware is # False. - doc = {"sub_document": {}, "dt": datetime.datetime.utcnow()} + doc = {"sub_document": {}, "dt": datetime.datetime.now(tz=datetime.timezone.utc)} decoded = bson.decode_all(bson.encode(doc), None)[0] self.assertIsInstance(decoded["sub_document"], dict) @@ -1026,7 +1292,10 @@ def test_unicode_decode_error_handler(self): # Ensure that strict mode raises an error. for invalid in [invalid_key, invalid_val, invalid_both]: self.assertRaises( - InvalidBSON, decode, invalid, CodecOptions(unicode_decode_error_handler="strict") + InvalidBSON, + decode, + invalid, + CodecOptions(unicode_decode_error_handler="strict"), ) self.assertRaises(InvalidBSON, decode, invalid, CodecOptions()) self.assertRaises(InvalidBSON, decode, invalid) @@ -1047,7 +1316,10 @@ def test_unicode_decode_error_handler(self): self.assertEqual(dec, {"keystr": "foobar"}) self.assertRaises( - InvalidBSON, decode, invalid_both, CodecOptions(unicode_decode_error_handler="junk") + InvalidBSON, + decode, + invalid_both, + CodecOptions(unicode_decode_error_handler="junk"), ) def round_trip_pickle(self, obj, pickled_with_older): @@ -1142,5 +1414,284 @@ def test_bson_encode_decode(self) -> None: self.assertTrue(decoded["_id"].generation_time) +class TestDatetimeConversion(unittest.TestCase): + def test_comps(self): + # Tests other timestamp formats. + # Test each of the rich comparison methods. + pairs = [ + (DatetimeMS(-1), DatetimeMS(1)), + (DatetimeMS(0), DatetimeMS(0)), + (DatetimeMS(1), DatetimeMS(-1)), + ] + + comp_ops = ["__lt__", "__le__", "__eq__", "__ne__", "__gt__", "__ge__"] + for lh, rh in pairs: + for op in comp_ops: + self.assertEqual(getattr(lh, op)(rh), getattr(lh._value, op)(rh._value)) + + def test_class_conversions(self): + # Test class conversions. + dtr1 = DatetimeMS(1234) + dt1 = dtr1.as_datetime() + self.assertEqual(dtr1, DatetimeMS(dt1)) + + dt2 = datetime.datetime(1969, 1, 1) + dtr2 = DatetimeMS(dt2) + self.assertEqual(dtr2.as_datetime(), dt2) + + # Test encode and decode without codec options. Expect: DatetimeMS => datetime + dtr1 = DatetimeMS(0) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1) + self.assertEqual(dec1["x"], datetime.datetime(1970, 1, 1)) + self.assertNotEqual(type(dtr1), type(dec1["x"])) + + # Test encode and decode with codec options. Expect: UTCDateimteRaw => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + enc1 = encode({"x": dtr1}) + dec1 = decode(enc1, opts1) + self.assertEqual(type(dtr1), type(dec1["x"])) + self.assertEqual(dtr1, dec1["x"]) + + # Expect: datetime => DatetimeMS + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_MS) + dt1 = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc) + enc1 = encode({"x": dt1}) + dec1 = decode(enc1, opts1) + self.assertEqual(dec1["x"], DatetimeMS(0)) + self.assertNotEqual(dt1, type(dec1["x"])) + + def test_clamping(self): + # Test clamping from below and above. + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1)}) + dec_below = decode(below, opts) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping_local(self): + # Naive clamping to local tz. + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=False) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + + dec_below = decode(below, opts) + self.assertEqual(dec_below["x"], datetime.datetime.min) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(microsecond=999000), + ) + + def test_tz_clamping_utc(self): + # Aware clamping default utc. + opts = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts) + self.assertEqual( + dec_below["x"], datetime.datetime.min.replace(tzinfo=datetime.timezone.utc) + ) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=datetime.timezone.utc, microsecond=999000), + ) + + def test_tz_clamping_non_utc(self): + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + # Min/max values in this timezone which can be represented in both BSON and datetime UTC. + try: + min_tz = datetime.datetime.min.replace(tzinfo=utc).astimezone(tz) + except OverflowError: + min_tz = datetime.datetime.min.replace(tzinfo=tz) + try: + max_tz = datetime.datetime.max.replace(tzinfo=utc, microsecond=999000).astimezone( + tz + ) + except OverflowError: + max_tz = datetime.datetime.max.replace(tzinfo=tz, microsecond=999000) + + for in_range in [ + min_tz, + min_tz + datetime.timedelta(milliseconds=1), + max_tz - datetime.timedelta(milliseconds=1), + max_tz, + ]: + doc = decode(encode({"x": in_range}), opts) + self.assertEqual(doc["x"], in_range) + + for too_low in [ + DatetimeMS(_datetime_to_millis(min_tz) - 1), + DatetimeMS(_datetime_to_millis(min_tz) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(min_tz) - 1 - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 1 - 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_low}), opts) + self.assertEqual(doc["x"], min_tz) + + for too_high in [ + DatetimeMS(_datetime_to_millis(max_tz) + 1), + DatetimeMS(_datetime_to_millis(max_tz) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(max_tz) + 1 + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 60 * 60 * 1000), + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 1 + 60 * 60 * 1000), + ]: + doc = decode(encode({"x": too_high}), opts) + self.assertEqual(doc["x"], max_tz) + + def test_tz_clamping_non_utc_simple(self): + dtm = datetime.datetime(2024, 8, 23) + encoded = encode({"d": dtm}) + self.assertEqual(decode(encoded)["d"], dtm) + for conversion in [ + DatetimeConversion.DATETIME, + DatetimeConversion.DATETIME_CLAMP, + DatetimeConversion.DATETIME_AUTO, + ]: + for tz in [FixedOffset(60, "+1H"), FixedOffset(-60, "-1H")]: + opts = CodecOptions(datetime_conversion=conversion, tz_aware=True, tzinfo=tz) + self.assertEqual(decode(encoded, opts)["d"], dtm.replace(tzinfo=utc).astimezone(tz)) + + def test_tz_clamping_non_hashable(self): + class NonHashableTZ(FixedOffset): + __hash__ = None + + tz = NonHashableTZ(0, "UTC-non-hashable") + self.assertRaises(TypeError, hash, tz) + # Aware clamping. + opts = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_CLAMP, tz_aware=True, tzinfo=tz + ) + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts) + self.assertEqual(dec_below["x"], datetime.datetime.min.replace(tzinfo=tz)) + + within = encode({"x": EPOCH_AWARE.astimezone(tz)}) + dec_within = decode(within, opts) + self.assertEqual(dec_within["x"], EPOCH_AWARE.astimezone(tz)) + + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts) + self.assertEqual( + dec_above["x"], + datetime.datetime.max.replace(tzinfo=tz, microsecond=999000), + ) + + def test_datetime_auto(self): + # Naive auto, in range. + opts1 = CodecOptions(datetime_conversion=DatetimeConversion.DATETIME_AUTO) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts1) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Naive auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts1) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Naive auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts1) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + # Aware auto, in range. + opts2 = CodecOptions( + datetime_conversion=DatetimeConversion.DATETIME_AUTO, + tz_aware=True, + tzinfo=datetime.timezone.utc, + ) + inr = encode({"x": datetime.datetime(1970, 1, 1)}, codec_options=opts2) + dec_inr = decode(inr) + self.assertEqual(dec_inr["x"], datetime.datetime(1970, 1, 1)) + + # Aware auto, below range. + below = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60)}) + dec_below = decode(below, opts2) + self.assertEqual( + dec_below["x"], DatetimeMS(_datetime_to_millis(datetime.datetime.min) - 24 * 60 * 60) + ) + + # Aware auto, above range. + above = encode({"x": DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60)}) + dec_above = decode(above, opts2) + self.assertEqual( + dec_above["x"], + DatetimeMS(_datetime_to_millis(datetime.datetime.max) + 24 * 60 * 60), + ) + + def test_millis_from_datetime_ms(self): + # Test 65+ bit integer conversion, expect OverflowError. + big_ms = 2**65 + with self.assertRaises(OverflowError): + encode({"x": DatetimeMS(big_ms)}) + + # Subclass of DatetimeMS w/ __int__ override, expect an Error. + class DatetimeMSOverride(DatetimeMS): + def __int__(self): + return float(self._value) + + float_ms = DatetimeMSOverride(2) + with self.assertRaises(TypeError): + encode({"x": float_ms}) + + # Test InvalidBSON errors on conversion include _DATETIME_ERROR_SUGGESTION + small_ms = -2 << 51 + with self.assertRaisesRegex(InvalidBSON, re.compile(re.escape(_DATETIME_ERROR_SUGGESTION))): + decode(encode({"a": DatetimeMS(small_ms)})) + + def test_array_of_documents_to_buffer(self): + doc = dict(a=1) + buf = _array_of_documents_to_buffer(encode({"0": doc})) + self.assertEqual(buf, encode(doc)) + buf = _array_of_documents_to_buffer(encode({"0": doc, "1": doc})) + self.assertEqual(buf, encode(doc) + encode(doc)) + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(encode({"0": doc, "1": doc}) + b"1") + buf = encode({"0": doc, "1": doc}) + buf = buf[:-1] + b"1" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + # We replace the size of the array with \xff\xff\xff\x00 which is -221 as an int32. + buf = b"\x14\x00\x00\x00\x04a\x00\xff\xff\xff\x00\x100\x00\x01\x00\x00\x00\x00\x00" + with self.assertRaises(InvalidBSON): + _array_of_documents_to_buffer(buf) + + +class TestLongLongToString(unittest.TestCase): + def test_long_long_to_string(self): + try: + from bson import _cbson + + _cbson._test_long_long_to_str() + except ImportError: + print("_cbson was not imported. Check compilation logs.") + + if __name__ == "__main__": unittest.main() diff --git a/test/test_bson_binary_vector.py b/test/test_bson_binary_vector.py new file mode 100644 index 0000000000..2783338793 --- /dev/null +++ b/test/test_bson_binary_vector.py @@ -0,0 +1,122 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import binascii +import struct +from pathlib import Path +from test import unittest + +from bson import decode, encode, json_util +from bson.binary import Binary, BinaryVectorDtype + +_TEST_PATH = Path(__file__).parent / "bson_binary_vector" + + +class TestBSONBinaryVector(unittest.TestCase): + """Runs Binary Vector subtype tests. + + Follows the style of the BSON corpus specification tests. + Tests are automatically generated on import + from json files in _TEST_PATH via `create_tests`. + The actual tests are defined in the inner function `run_test` + of the test generator `create_test`.""" + + +def create_test(case_spec): + """Create standard test given specification in json. + + We use the naming convention expected (exp) and observed (obj) + to differentiate what is in the json (expected or suffix _exp) + from what is produced by the API (observed or suffix _obs) + """ + test_key = case_spec.get("test_key") + + def run_test(self): + for test_case in case_spec.get("tests", []): + description = test_case["description"] + vector_exp = test_case.get("vector") + dtype_hex_exp = test_case["dtype_hex"] + dtype_alias_exp = test_case.get("dtype_alias") + padding_exp = test_case.get("padding", 0) + canonical_bson_exp = test_case.get("canonical_bson") + # Convert dtype hex string into bytes + dtype_exp = BinaryVectorDtype(int(dtype_hex_exp, 16).to_bytes(1, byteorder="little")) + + if test_case["valid"]: + # Convert bson string to bytes + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + + # Test round-tripping canonical bson. + self.assertEqual(encode(decoded_doc), cB_exp, description) + + # Test BSON to Binary Vector + vector_obs = binary_obs.as_vector() + self.assertEqual(vector_obs.dtype, dtype_exp, description) + if dtype_alias_exp: + self.assertEqual( + vector_obs.dtype, BinaryVectorDtype[dtype_alias_exp], description + ) + if dtype_exp in [BinaryVectorDtype.FLOAT32]: + [ + self.assertAlmostEqual(vector_obs.data[i], vector_exp[i], delta=1e-5) + for i in range(len(vector_exp)) + ] + else: + self.assertEqual(vector_obs.data, vector_exp, description) + # Test Binary Vector to BSON + vector_exp = Binary.from_vector(vector_exp, dtype_exp, padding_exp) + cB_obs = binascii.hexlify(encode({test_key: vector_exp})).decode().upper() + self.assertEqual(cB_obs, canonical_bson_exp, description) + + else: + """ + #### To prove correct in an invalid case (`valid:false`), one MUST + - (encoding case) if the vector field is present, raise an exception + when attempting to encode a document from the numeric values,dtype, and padding. + - (decoding case) if the canonical_bson field is present, raise an exception + when attempting to deserialize it into the corresponding + numeric values, as the field contains corrupted data. + """ + # Tests Binary.from_vector() + if vector_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + Binary.from_vector(vector_exp, dtype_exp, padding_exp) + + # Tests Binary.as_vector() + if canonical_bson_exp is not None: + with self.assertRaises((struct.error, ValueError), msg=description): + cB_exp = binascii.unhexlify(canonical_bson_exp.encode("utf8")) + decoded_doc = decode(cB_exp) + binary_obs = decoded_doc[test_key] + binary_obs.as_vector() + + return run_test + + +def create_tests(): + for filename in _TEST_PATH.glob("*.json"): + with open(str(filename), encoding="utf-8") as test_file: + test_method = create_test(json_util.loads(test_file.read())) + setattr(TestBSONBinaryVector, "test_" + filename.stem, test_method) + + +create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_bson_corpus.py b/test/test_bson_corpus.py index 193a6dff3d..504025e766 100644 --- a/test/test_bson_corpus.py +++ b/test/test_bson_corpus.py @@ -13,9 +13,9 @@ # limitations under the License. """Run the BSON corpus specification tests.""" +from __future__ import annotations import binascii -import codecs import functools import glob import json @@ -226,7 +226,7 @@ def run_test(self): def create_tests(): for filename in glob.glob(os.path.join(_TEST_PATH, "*.json")): test_suffix, _ = os.path.splitext(os.path.basename(filename)) - with codecs.open(filename, encoding="utf-8") as bson_test_file: + with open(filename, encoding="utf-8") as bson_test_file: test_method = create_test(json.load(bson_test_file)) setattr(TestBSONCorpus, "test_" + test_suffix, test_method) diff --git a/test/test_bulk.py b/test/test_bulk.py index fae1c7e201..1de406fca5 100644 --- a/test/test_bulk.py +++ b/test/test_bulk.py @@ -13,50 +13,40 @@ # limitations under the License. """Test the bulk API.""" +from __future__ import annotations import sys import uuid +from typing import Any, Optional -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.mongo_client import MongoClient sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest -from test.utils import ( - remove_all_users, - rs_or_single_client_noauth, - single_client, - wait_until, -) +from test import IntegrationTest, client_context, remove_all_users, unittest +from test.utils_shared import wait_until from bson.binary import Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.objectid import ObjectId -from pymongo.collection import Collection from pymongo.common import partition_node -from pymongo.errors import ( - BulkWriteError, - ConfigurationError, - InvalidOperation, - OperationFailure, -) +from pymongo.errors import BulkWriteError, ConfigurationError, InvalidOperation, OperationFailure from pymongo.operations import * +from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class BulkTestBase(IntegrationTest): coll: Collection coll_w0: Collection - @classmethod - def setUpClass(cls): - super(BulkTestBase, cls).setUpClass() - cls.coll = cls.db.test - cls.coll_w0 = cls.coll.with_options(write_concern=WriteConcern(w=0)) - def setUp(self): - super(BulkTestBase, self).setUp() + super().setUp() + self.coll = self.db.test self.coll.drop() + self.coll_w0 = self.coll.with_options(write_concern=WriteConcern(w=0)) def assertEqualResponse(self, expected, actual): """Compare response from bulk.execute() to expected response.""" @@ -93,7 +83,7 @@ def assertEqualResponse(self, expected, actual): self.assertEqual( actual.get(key), value, - "%r value of %r does not match expected %r" % (key, actual.get(key), value), + f"{key!r} value of {actual.get(key)!r} does not match expected {value!r}", ) def assertEqualUpsert(self, expected, actual): @@ -104,7 +94,7 @@ def assertEqualUpsert(self, expected, actual): self.assertEqual(expected["index"], actual["index"]) if expected["_id"] == "...": # Unspecified value. - self.assertTrue("_id" in actual) + self.assertIn("_id", actual) else: self.assertEqual(expected["_id"], actual["_id"]) @@ -117,7 +107,7 @@ def assertEqualWriteError(self, expected, actual): self.assertEqual(expected["code"], actual["code"]) if expected["errmsg"] == "...": # Unspecified value. - self.assertTrue("errmsg" in actual) + self.assertIn("errmsg", actual) else: self.assertEqual(expected["errmsg"], actual["errmsg"]) @@ -125,7 +115,7 @@ def assertEqualWriteError(self, expected, actual): actual_op = actual["op"].copy() if expected_op.get("_id") == "...": # Unspecified _id. - self.assertTrue("_id" in actual_op) + self.assertIn("_id", actual_op) actual_op.pop("_id") expected_op.pop("_id") @@ -134,7 +124,8 @@ def assertEqualWriteError(self, expected, actual): class TestBulk(BulkTestBase): def test_empty(self): - self.assertRaises(InvalidOperation, self.coll.bulk_write, []) + with self.assertRaises(InvalidOperation): + self.coll.bulk_write([]) def test_insert(self): expected = { @@ -154,7 +145,6 @@ def test_insert(self): self.assertEqual(1, self.coll.count_documents({})) def _test_update_many(self, update): - expected = { "nMatched": 2, "nModified": 2, @@ -170,25 +160,29 @@ def _test_update_many(self, update): result = self.coll.bulk_write([UpdateMany({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (2, None)) + self.assertIn(result.modified_count, (2, None)) def test_update_many(self): self._test_update_many({"$set": {"foo": "bar"}}) - @client_context.require_version_min(4, 1, 11) + @client_context.require_version_min(4, 2, 0) def test_update_many_pipeline(self): self._test_update_many([{"$set": {"foo": "bar"}}]) def test_array_filters_validation(self): - self.assertRaises(TypeError, UpdateMany, {}, {}, array_filters={}) - self.assertRaises(TypeError, UpdateOne, {}, {}, array_filters={}) + with self.assertRaises(TypeError): + UpdateMany({}, {}, array_filters={}) # type: ignore[arg-type] + with self.assertRaises(TypeError): + UpdateOne({}, {}, array_filters={}) # type: ignore[arg-type] def test_array_filters_unacknowledged(self): coll = self.coll_w0 update_one = UpdateOne({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) update_many = UpdateMany({}, {"$set": {"y.$[i].b": 5}}, array_filters=[{"i.b": 1}]) - self.assertRaises(ConfigurationError, coll.bulk_write, [update_one]) - self.assertRaises(ConfigurationError, coll.bulk_write, [update_many]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_one]) + with self.assertRaises(ConfigurationError): + coll.bulk_write([update_many]) def _test_update_one(self, update): expected = { @@ -207,17 +201,16 @@ def _test_update_one(self, update): result = self.coll.bulk_write([UpdateOne({}, update)]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) def test_update_one(self): self._test_update_one({"$set": {"foo": "bar"}}) - @client_context.require_version_min(4, 1, 11) + @client_context.require_version_min(4, 2, 0) def test_update_one_pipeline(self): self._test_update_one([{"$set": {"foo": "bar"}}]) def test_replace_one(self): - expected = { "nMatched": 1, "nModified": 1, @@ -234,7 +227,7 @@ def test_replace_one(self): result = self.coll.bulk_write([ReplaceOne({}, {"foo": "bar"})]) self.assertEqualResponse(expected, result.bulk_api_result) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (1, None)) + self.assertIn(result.modified_count, (1, None)) def test_remove(self): # Test removing all documents, ordered. @@ -274,7 +267,6 @@ def test_remove_one(self): self.assertEqual(self.coll.count_documents({}), 1) def test_upsert(self): - expected = { "nMatched": 0, "nModified": 0, @@ -289,14 +281,14 @@ def test_upsert(self): self.assertEqual(1, result.upserted_count) assert result.upserted_ids is not None self.assertEqual(1, len(result.upserted_ids)) - self.assertTrue(isinstance(result.upserted_ids.get(0), ObjectId)) + self.assertIsInstance(result.upserted_ids.get(0), ObjectId) self.assertEqual(self.coll.count_documents({"foo": "bar"}), 1) def test_numerous_inserts(self): # Ensure we don't exceed server's maxWriteBatchSize size limit. n_docs = client_context.max_write_batch_size + 100 - requests = [InsertOne({}) for _ in range(n_docs)] + requests = [InsertOne[dict]({}) for _ in range(n_docs)] result = self.coll.bulk_write(requests, ordered=False) self.assertEqual(n_docs, result.inserted_count) self.assertEqual(n_docs, self.coll.count_documents({})) @@ -347,7 +339,7 @@ def test_bulk_write_no_results(self): def test_bulk_write_invalid_arguments(self): # The requests argument must be a list. - generator = (InsertOne({}) for _ in range(10)) + generator = (InsertOne[dict]({}) for _ in range(10)) with self.assertRaises(TypeError): self.coll.bulk_write(generator) # type: ignore[arg-type] @@ -789,14 +781,10 @@ def test_large_inserts_unordered(self): class BulkAuthorizationTestBase(BulkTestBase): - @classmethod @client_context.require_auth @client_context.require_no_api_version - def setUpClass(cls): - super(BulkAuthorizationTestBase, cls).setUpClass() - def setUp(self): - super(BulkAuthorizationTestBase, self).setUp() + super().setUp() client_context.create_user(self.db.name, "readonly", "pw", ["read"]) self.db.command( "createRole", @@ -830,8 +818,16 @@ def test_no_results_ordered_success(self): ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') def test_no_results_ordered_failure(self): requests: list = [ @@ -845,7 +841,11 @@ def test_no_results_ordered_failure(self): ] result = self.coll_w0.bulk_write(requests) self.assertFalse(result.acknowledged) - wait_until(lambda: 3 == self.coll.count_documents({}), "insert 3 documents") + + def predicate(): + return self.coll.count_documents({}) == 3 + + wait_until(predicate, "insert 3 documents") self.assertEqual({"_id": 1}, self.coll.find_one({"_id": 1})) def test_no_results_unordered_success(self): @@ -857,8 +857,16 @@ def test_no_results_unordered_success(self): ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') def test_no_results_unordered_failure(self): requests: list = [ @@ -872,25 +880,34 @@ def test_no_results_unordered_failure(self): ] result = self.coll_w0.bulk_write(requests, ordered=False) self.assertFalse(result.acknowledged) - wait_until(lambda: 2 == self.coll.count_documents({}), "insert 2 documents") - wait_until(lambda: self.coll.find_one({"_id": 1}) is None, 'removed {"_id": 1}') + + def predicate(): + return self.coll.count_documents({}) == 2 + + wait_until(predicate, "insert 2 documents") + + def predicate(): + return self.coll.find_one({"_id": 1}) is None + + wait_until(predicate, 'removed {"_id": 1}') class TestBulkAuthorization(BulkAuthorizationTestBase): def test_readonly(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth( + cli = self.rs_or_single_client_noauth( username="readonly", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test coll.find_one() - self.assertRaises(OperationFailure, coll.bulk_write, [InsertOne({"x": 1})]) + with self.assertRaises(OperationFailure): + coll.bulk_write([InsertOne({"x": 1})]) def test_no_remove(self): # We test that an authorization failure aborts the batch and is raised # as OperationFailure. - cli = rs_or_single_client_noauth( + cli = self.rs_or_single_client_noauth( username="noremove", password="pw", authSource="pymongo_test" ) coll = cli.pymongo_test.test @@ -901,29 +918,28 @@ def test_no_remove(self): DeleteMany({}), # Prohibited. InsertOne({"x": 3}), # Never attempted. ] - self.assertRaises(OperationFailure, coll.bulk_write, requests) - self.assertEqual(set([1, 2]), set(self.coll.distinct("x"))) + with self.assertRaises(OperationFailure): + coll.bulk_write(requests) # type: ignore[arg-type] + self.assertEqual({1, 2}, set(self.coll.distinct("x"))) class TestBulkWriteConcern(BulkTestBase): w: Optional[int] secondary: MongoClient - @classmethod - def setUpClass(cls): - super(TestBulkWriteConcern, cls).setUpClass() - cls.w = client_context.w - cls.secondary = None - if cls.w is not None and cls.w > 1: - for member in client_context.hello["hosts"]: - if member != client_context.hello["primary"]: - cls.secondary = single_client(*partition_node(member)) + def setUp(self): + super().setUp() + self.w = client_context.w + self.secondary = None + if self.w is not None and self.w > 1: + for member in (client_context.hello)["hosts"]: + if member != (client_context.hello)["primary"]: + self.secondary = self.single_client(*partition_node(member)) break - @classmethod - def tearDownClass(cls): - if cls.secondary: - cls.secondary.close() + def tearDown(self): + if self.secondary: + self.secondary.close() def cause_wtimeout(self, requests, ordered): if not client_context.test_commands_enabled: @@ -939,15 +955,18 @@ def cause_wtimeout(self, requests, ordered): finally: self.secondary.admin.command("configureFailPoint", "rsSyncApplyStop", mode="off") + @client_context.require_version_max(7, 1) # PYTHON-4560 @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_ordered(self): + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})]) self.assertTrue(result.acknowledged) - requests = [InsertOne({"a": 1}), InsertOne({"a": 2})] + requests: list[Any] = [InsertOne({"a": 1}), InsertOne({"a": 2})] # Replication wtimeout is a 'soft' error. # It shouldn't stop batch processing. try: @@ -973,11 +992,11 @@ def test_write_concern_failure_ordered(self): # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 0) + self.assertGreater(len(details["writeConcernErrors"]), 0) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) self.coll.delete_many({}) self.coll.create_index("a", unique=True) @@ -1014,13 +1033,17 @@ def test_write_concern_failure_ordered(self): details, ) - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] - self.assertTrue("duplicate" in failed["errmsg"]) + self.assertIn("duplicate", failed["errmsg"]) + @client_context.require_version_max(7, 1) # PYTHON-4560 @client_context.require_replica_set @client_context.require_secondaries_count(1) def test_write_concern_failure_unordered(self): + self.skipTest("Skipping until PYTHON-4865 is resolved.") + details = None + # Ensure we don't raise on wnote. coll_ww = self.coll.with_options(write_concern=WriteConcern(w=self.w)) result = coll_ww.bulk_write([DeleteOne({"something": "that does no exist"})], ordered=False) @@ -1046,7 +1069,7 @@ def test_write_concern_failure_unordered(self): self.assertEqual(0, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) self.coll.delete_many({}) self.coll.create_index("a", unique=True) @@ -1073,17 +1096,17 @@ def test_write_concern_failure_unordered(self): self.assertEqual(1, len(details["writeErrors"])) # When talking to legacy servers there will be a # write concern error for each operation. - self.assertTrue(len(details["writeConcernErrors"]) > 1) + self.assertGreater(len(details["writeConcernErrors"]), 1) failed = details["writeErrors"][0] self.assertEqual(2, failed["index"]) self.assertEqual(11000, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) self.assertEqual(1, failed["op"]["a"]) failed = details["writeConcernErrors"][0] self.assertEqual(64, failed["code"]) - self.assertTrue(isinstance(failed["errmsg"], str)) + self.assertIsInstance(failed["errmsg"], str) upserts = details["upserted"] self.assertEqual(1, len(upserts)) diff --git a/test/test_change_stream.py b/test/test_change_stream.py index 11ed2895ac..ad51f91873 100644 --- a/test/test_change_stream.py +++ b/test/test_change_stream.py @@ -13,7 +13,9 @@ # limitations under the License. """Test the change_stream module.""" +from __future__ import annotations +import asyncio import os import random import string @@ -26,12 +28,18 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest +from test import ( + IntegrationTest, + PyMongoTestCase, + Version, + client_context, + unittest, +) from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, EventListener, - rs_or_single_client, + OvertCommandListener, wait_until, ) @@ -39,7 +47,6 @@ from bson.binary import ALL_UUID_REPRESENTATIONS, PYTHON_LEGACY, STANDARD, Binary from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from pymongo import MongoClient -from pymongo.command_cursor import CommandCursor from pymongo.errors import ( InvalidOperation, OperationFailure, @@ -47,8 +54,11 @@ ) from pymongo.message import _CursorAddress from pymongo.read_concern import ReadConcern +from pymongo.synchronous.command_cursor import CommandCursor from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestChangeStreamBase(IntegrationTest): RUN_ON_LOAD_BALANCER = True @@ -64,8 +74,7 @@ def change_stream(self, *args, **kwargs): def client_with_listener(self, *commands): """Return a client with a AllowListEventListener.""" listener = AllowListEventListener(*commands) - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) return client, listener def watched_collection(self, *args, **kwargs): @@ -96,16 +105,17 @@ def get_resume_token(self, invalidate=False): if isinstance(cs._target, MongoClient): self.skipTest("cluster-level change streams cannot be invalidated") self.generate_invalidate_event(cs) - return cs.next()["_id"] + return (cs.next())["_id"] else: with self.change_stream() as cs: coll.insert_one({"data": 1}) - return cs.next()["_id"] + return (cs.next())["_id"] def get_start_at_operation_time(self): """Get an operationTime. Advances the operation clock beyond the most - recently returned timestamp.""" - optime = self.client.admin.command("ping")["operationTime"] + recently returned timestamp. + """ + optime = (self.client.admin.command("ping"))["operationTime"] return Timestamp(optime.time, optime.inc + 1) def insert_one_and_check(self, change_stream, doc): @@ -115,12 +125,12 @@ def insert_one_and_check(self, change_stream, doc): def kill_change_stream_cursor(self, change_stream): """Cause a cursor not found error on the next getMore.""" cursor = change_stream._cursor - address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) + address = _CursorAddress(cursor.address, cursor._ns) client = self.watched_collection().database.client client._close_cursor_now(cursor.cursor_id, address) -class APITestsMixin(object): +class APITestsMixin: @no_type_check def test_watch(self): with self.change_stream( @@ -134,7 +144,7 @@ def test_watch(self): self.assertEqual(1000, change_stream._max_await_time_ms) self.assertEqual(100, change_stream._batch_size) self.assertIsInstance(change_stream._cursor, CommandCursor) - self.assertEqual(1000, change_stream._cursor._CommandCursor__max_await_time_ms) + self.assertEqual(1000, change_stream._cursor._max_await_time_ms) self.watched_collection(write_concern=WriteConcern("majority")).insert_one({}) _ = change_stream.next() resume_token = change_stream.resume_token @@ -156,18 +166,22 @@ def test_try_next(self): with self.change_stream(max_await_time_ms=250) as stream: self.assertIsNone(stream.try_next()) # No changes initially. coll.insert_one({}) # Generate a change. + # On sharded clusters, even majority-committed changes only show # up once an event that sorts after it shows up on the other # shard. So, we wait on try_next to eventually return changes. - wait_until(lambda: stream.try_next() is not None, "get change from try_next") + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") @no_type_check def test_try_next_runs_one_getmore(self): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -177,25 +191,29 @@ def test_try_next_runs_one_getmore(self): self.addCleanup(coll.drop) with self.change_stream_with_client(client, max_await_time_ms=250) as stream: self.assertEqual(listener.started_command_names(), ["aggregate"]) - listener.results.clear() + listener.reset() # Confirm that only a single getMore is run even when no documents # are returned. self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore"]) - listener.results.clear() + listener.reset() # Get at least one change before resuming. coll.insert_one({"_id": 2}) - wait_until(lambda: stream.try_next() is not None, "get change from try_next") - listener.results.clear() + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") + listener.reset() # Cause the next request to initiate the resume process. self.kill_change_stream_cursor(stream) - listener.results.clear() + listener.reset() # The sequence should be: # - getMore, fail @@ -203,21 +221,25 @@ def test_try_next_runs_one_getmore(self): # - no results, return immediately without another getMore self.assertIsNone(stream.try_next()) self.assertEqual(listener.started_command_names(), ["getMore", "aggregate"]) - listener.results.clear() + listener.reset() # Stream still works after a resume. coll.insert_one({"_id": 3}) - wait_until(lambda: stream.try_next() is not None, "get change from try_next") - self.assertEqual(set(listener.started_command_names()), set(["getMore"])) + + def _wait_until(): + return stream.try_next() is not None + + wait_until(_wait_until, "get change from try_next") + self.assertEqual(set(listener.started_command_names()), {"getMore"}) self.assertIsNone(stream.try_next()) @no_type_check def test_batch_size_is_honored(self): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) # Connect to the cluster. client.admin.command("ping") - listener.results.clear() + listener.reset() # ChangeStreams only read majority committed data so use w:majority. coll = self.watched_collection().with_options(write_concern=WriteConcern("majority")) coll.drop() @@ -229,18 +251,18 @@ def test_batch_size_is_honored(self): expected = {"batchSize": 23} with self.change_stream_with_client(client, max_await_time_ms=250, batch_size=23) as stream: # Confirm that batchSize is honored for initial batch. - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command self.assertEqual(cmd["cursor"], expected) - listener.results.clear() + listener.reset() # Confirm that batchSize is honored by getMores. self.assertIsNone(stream.try_next()) - cmd = listener.results["started"][0].command + cmd = listener.started_events[0].command key = next(iter(expected)) self.assertEqual(expected[key], cmd[key]) # $changeStream.startAtOperationTime was added in 4.0.0. @no_type_check - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) def test_start_at_operation_time(self): optime = self.get_start_at_operation_time() @@ -249,18 +271,17 @@ def test_start_at_operation_time(self): coll.insert_many([{"data": i} for i in range(ndocs)]) with self.change_stream(start_at_operation_time=optime) as cs: - for i in range(ndocs): + for _i in range(ndocs): cs.next() @no_type_check def _test_full_pipeline(self, expected_cs_stage): client, listener = self.client_with_listener("aggregate") - results = listener.results with self.change_stream_with_client(client, [{"$project": {"foo": 0}}]) as _: pass - self.assertEqual(1, len(results["started"])) - command = results["started"][0] + self.assertEqual(1, len(listener.started_events)) + command = listener.started_events[0] self.assertEqual("aggregate", command.command_name) self.assertEqual( [{"$changeStream": expected_cs_stage}, {"$project": {"foo": 0}}], @@ -288,6 +309,7 @@ def test_iteration(self): self._test_invalidate_stops_iteration(change_stream) @no_type_check + @client_context.require_sync def _test_next_blocks(self, change_stream): inserted_doc = {"_id": ObjectId()} changes = [] @@ -307,13 +329,15 @@ def _test_next_blocks(self, change_stream): self.assertEqual(changes[0]["fullDocument"], inserted_doc) @no_type_check + @client_context.require_sync def test_next_blocks(self): """Test that next blocks until a change is readable""" - # Use a short await time to speed up the test. + # Use a short wait time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: self._test_next_blocks(change_stream) @no_type_check + @client_context.require_sync def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.watched_collection().aggregate( @@ -322,14 +346,19 @@ def test_aggregate_cursor_blocks(self): self._test_next_blocks(change_stream) @no_type_check + @client_context.require_sync def test_concurrent_close(self): """Ensure a ChangeStream can be closed from another thread.""" - # Use a short await time to speed up the test. + # Use a short wait time to speed up the test. with self.change_stream(max_await_time_ms=250) as change_stream: def iterate_cursor(): - for _ in change_stream: - pass + try: + for _ in change_stream: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise t = threading.Thread(target=iterate_cursor) t.start() @@ -376,7 +405,14 @@ def test_change_operations(self): expected_update_description = {"updatedFields": {"new": 1}, "removedFields": ["foo"]} if client_context.version.at_least(4, 5, 0): expected_update_description["truncatedArrays"] = [] - self.assertEqual(expected_update_description, change["updateDescription"]) + self.assertEqual( + expected_update_description, + { + k: v + for k, v in change["updateDescription"].items() + if k in expected_update_description + }, + ) # Replace. self.watched_collection().replace_one({"new": 1}, {"foo": "bar"}) change = change_stream.next() @@ -395,7 +431,7 @@ def test_change_operations(self): self._test_get_invalidate_event(change_stream) @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_start_after(self): resume_token = self.get_resume_token(invalidate=True) @@ -411,7 +447,7 @@ def test_start_after(self): self.assertEqual(change["fullDocument"], {"_id": 2}) @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_start_after_resume_process_with_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -430,8 +466,7 @@ def test_start_after_resume_process_with_changes(self): self.assertEqual(change["fullDocument"], {"_id": 3}) @no_type_check - @client_context.require_no_mongos # Remove after SERVER-41196 - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2) def test_start_after_resume_process_without_changes(self): resume_token = self.get_resume_token(invalidate=True) @@ -445,11 +480,11 @@ def test_start_after_resume_process_without_changes(self): self.assertEqual(change["fullDocument"], {"_id": 2}) -class ProseSpecTestsMixin(object): +class ProseSpecTestsMixin: @no_type_check def _client_with_listener(self, *commands): listener = AllowListEventListener(*commands) - client = rs_or_single_client(event_listeners=[listener]) + client = PyMongoTestCase.unmanaged_rs_or_single_client(event_listeners=[listener]) self.addCleanup(client.close) return client, listener @@ -463,9 +498,10 @@ def _populate_and_exhaust_change_stream(self, change_stream, batch_size=3): def _get_expected_resume_token_legacy(self, stream, listener, previous_change=None): """Predicts what the resume token should currently be for server versions that don't support postBatchResumeToken. Assumes the stream - has never returned any changes if previous_change is None.""" + has never returned any changes if previous_change is None. + """ if previous_change is None: - agg_cmd = listener.results["started"][0] + agg_cmd = listener.started_events[0] stage = agg_cmd.command["pipeline"][0]["$changeStream"] return stage.get("resumeAfter") or stage.get("startAfter") @@ -476,17 +512,18 @@ def _get_expected_resume_token(self, stream, listener, previous_change=None): versions that support postBatchResumeToken. Assumes the stream has never returned any changes if previous_change is None. Assumes listener is a AllowListEventListener that listens for aggregate and - getMore commands.""" + getMore commands. + """ if previous_change is None or stream._cursor._has_next(): token = self._get_expected_resume_token_legacy(stream, listener, previous_change) if token is not None: return token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply return response["cursor"]["postBatchResumeToken"] @no_type_check - def _test_raises_error_on_missing_id(self, expected_exception, expected_exception2): + def _test_raises_error_on_missing_id(self, expected_exception): """ChangeStream will raise an exception if the server response is missing the resume token. """ @@ -494,7 +531,8 @@ def _test_raises_error_on_missing_id(self, expected_exception, expected_exceptio self.watched_collection().insert_one({}) with self.assertRaises(expected_exception): next(change_stream) - with self.assertRaises(expected_exception2): + # The cursor should now be closed. + with self.assertRaises(StopIteration): next(change_stream) @no_type_check @@ -514,26 +552,15 @@ def _test_update_resume_token(self, expected_rt_getter): ) # Prose test no. 1 - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_update_resume_token(self): self._test_update_resume_token(self._get_expected_resume_token) - # Prose test no. 1 - @client_context.require_version_max(4, 0, 7) - def test_update_resume_token_legacy(self): - self._test_update_resume_token(self._get_expected_resume_token_legacy) - # Prose test no. 2 - @client_context.require_version_min(4, 1, 8) + @client_context.require_version_min(4, 2, 0) def test_raises_error_on_missing_id_418plus(self): - # Server returns an error on 4.1.8+, subsequent next() resumes and gets the same error. - self._test_raises_error_on_missing_id(OperationFailure, OperationFailure) - - # Prose test no. 2 - @client_context.require_version_max(4, 1, 8) - def test_raises_error_on_missing_id_418minus(self): - # PyMongo raises an error, closes the cursor, subsequent next() raises StopIteration. - self._test_raises_error_on_missing_id(InvalidOperation, StopIteration) + # Server returns an error on 4.1.8+ + self._test_raises_error_on_missing_id(OperationFailure) # Prose test no. 3 @no_type_check @@ -558,8 +585,8 @@ def test_no_resume_attempt_if_aggregate_command_fails(self): pass # Driver should have attempted aggregate command only once. - self.assertEqual(len(listener.results["started"]), 1) - self.assertEqual(listener.results["started"][0].command_name, "aggregate") + self.assertEqual(len(listener.started_events), 1) + self.assertEqual(listener.started_events[0].command_name, "aggregate") # Prose test no. 5 - REMOVED # Prose test no. 6 - SKIPPED @@ -593,91 +620,33 @@ def raise_error(): cursor.close = raise_error self.insert_one_and_check(change_stream, {"_id": 2}) - # Prose test no. 9 - @no_type_check - @client_context.require_version_min(4, 0, 0) - @client_context.require_version_max(4, 0, 7) - def test_start_at_operation_time_caching(self): - # Case 1: change stream not started with startAtOperationTime - client, listener = self.client_with_listener("aggregate") - with self.change_stream_with_client(client) as cs: - self.kill_change_stream_cursor(cs) - cs.try_next() - cmd = listener.results["started"][-1].command - self.assertIsNotNone(cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime")) - - # Case 2: change stream started with startAtOperationTime - listener.results.clear() - optime = self.get_start_at_operation_time() - with self.change_stream_with_client(client, start_at_operation_time=optime) as cs: - self.kill_change_stream_cursor(cs) - cs.try_next() - cmd = listener.results["started"][-1].command - self.assertEqual( - cmd["pipeline"][0]["$changeStream"].get("startAtOperationTime"), - optime, - str([k.command for k in listener.results["started"]]), - ) - # Prose test no. 10 - SKIPPED # This test is identical to prose test no. 3. # Prose test no. 11 @no_type_check - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_empty_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: self.assertIsNone(change_stream.try_next()) resume_token = change_stream.resume_token - response = listener.results["succeeded"][0].reply + response = listener.succeeded_events[0].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) # Prose test no. 11 @no_type_check - @client_context.require_version_min(4, 0, 7) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_exhausted_batch(self): client, listener = self._client_with_listener("getMore") with self.change_stream_with_client(client) as change_stream: self._populate_and_exhaust_change_stream(change_stream) resume_token = change_stream.resume_token - response = listener.results["succeeded"][-1].reply + response = listener.succeeded_events[-1].reply self.assertEqual(resume_token, response["cursor"]["postBatchResumeToken"]) - # Prose test no. 12 - @no_type_check - @client_context.require_version_max(4, 0, 7) - def test_resumetoken_empty_batch_legacy(self): - resume_point = self.get_resume_token() - - # Empty resume token when neither resumeAfter or startAfter specified. - with self.change_stream() as change_stream: - change_stream.try_next() - self.assertIsNone(change_stream.resume_token) - - # Resume token value is same as resumeAfter. - with self.change_stream(resume_after=resume_point) as change_stream: - change_stream.try_next() - resume_token = change_stream.resume_token - self.assertEqual(resume_token, resume_point) - - # Prose test no. 12 - @no_type_check - @client_context.require_version_max(4, 0, 7) - def test_resumetoken_exhausted_batch_legacy(self): - # Resume token is _id of last change. - with self.change_stream() as change_stream: - change = self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - resume_point = change["_id"] - - # Resume token is _id of last change even if resumeAfter is specified. - with self.change_stream(resume_after=resume_point) as change_stream: - change = self._populate_and_exhaust_change_stream(change_stream) - self.assertEqual(change_stream.resume_token, change["_id"]) - # Prose test no. 13 @no_type_check def test_resumetoken_partially_iterated_batch(self): @@ -719,13 +688,13 @@ def test_resumetoken_uniterated_nonempty_batch_resumeafter(self): # Prose test no. 14 @no_type_check @client_context.require_no_mongos - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_resumetoken_uniterated_nonempty_batch_startafter(self): self._test_resumetoken_uniterated_nonempty_batch("start_after") # Prose test no. 17 @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_startafter_resume_uses_startafter_after_empty_getMore(self): # Resume should use startAfter after no changes have been returned. resume_point = self.get_resume_token() @@ -737,13 +706,13 @@ def test_startafter_resume_uses_startafter_after_empty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) # Prose test no. 18 @no_type_check - @client_context.require_version_min(4, 1, 1) + @client_context.require_version_min(4, 2, 0) def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): # Resume should use resumeAfter after some changes have been returned. resume_point = self.get_resume_token() @@ -756,27 +725,48 @@ def test_startafter_resume_uses_resumeafter_after_nonempty_getMore(self): self.kill_change_stream_cursor(change_stream) change_stream.try_next() # Resume attempt - response = listener.results["started"][-1] + response = listener.started_events[-1] self.assertIsNotNone(response.command["pipeline"][0]["$changeStream"].get("resumeAfter")) self.assertIsNone(response.command["pipeline"][0]["$changeStream"].get("startAfter")) + # Prose test no. 19 + @no_type_check + def test_split_large_change(self): + server_version = client_context.version + if not server_version.at_least(6, 0, 9): + self.skipTest("$changeStreamSplitLargeEvent requires MongoDB 6.0.9+") + if server_version.at_least(6, 1, 0) and server_version < Version(7, 0, 0): + self.skipTest("$changeStreamSplitLargeEvent is not available in 6.x rapid releases") + self.db.drop_collection("test_split_large_change") + coll = self.db.create_collection( + "test_split_large_change", changeStreamPreAndPostImages={"enabled": True} + ) + coll.insert_one({"_id": 1, "value": "q" * 10 * 1024 * 1024}) + with coll.watch( + [{"$changeStreamSplitLargeEvent": {}}], full_document_before_change="required" + ) as change_stream: + coll.update_one({"_id": 1}, {"$set": {"value": "z" * 10 * 1024 * 1024}}) + doc_1 = change_stream.next() + self.assertIn("splitEvent", doc_1) + self.assertEqual(doc_1["splitEvent"], {"fragment": 1, "of": 2}) + doc_2 = change_stream.next() + self.assertIn("splitEvent", doc_2) + self.assertEqual(doc_2["splitEvent"], {"fragment": 2, "of": 2}) + class TestClusterChangeStream(TestChangeStreamBase, APITestsMixin): dbs: list - @classmethod - @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestClusterChangeStream, cls).setUpClass() - cls.dbs = [cls.db, cls.client.pymongo_test_2] + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self) -> None: + super().setUp() + self.dbs = [self.db, self.client.pymongo_test_2] - @classmethod - def tearDownClass(cls): - for db in cls.dbs: - cls.client.drop_database(db) - super(TestClusterChangeStream, cls).tearDownClass() + def tearDown(self): + for db in self.dbs: + self.client.drop_database(db) + super().tearDown() def change_stream_with_client(self, client, *args, **kwargs): return client.watch(*args, **kwargs) @@ -811,6 +801,7 @@ def test_simple(self): for db, collname in product(self.dbs, collnames): self._insert_and_check(change_stream, db, collname, {"_id": collname}) + @client_context.require_sync def test_aggregate_cursor_blocks(self): """Test that an aggregate cursor blocks until a change is readable.""" with self.client.admin.aggregate( @@ -826,12 +817,10 @@ def test_full_pipeline(self): class TestDatabaseChangeStream(TestChangeStreamBase, APITestsMixin): - @classmethod - @client_context.require_version_min(4, 0, 0, -1) - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestDatabaseChangeStream, cls).setUpClass() + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self) -> None: + super().setUp() def change_stream_with_client(self, client, *args, **kwargs): return client[self.db.name].watch(*args, **kwargs) @@ -913,14 +902,9 @@ def test_isolation(self): class TestCollectionChangeStream(TestChangeStreamBase, APITestsMixin, ProseSpecTestsMixin): - @classmethod - @client_context.require_version_min(3, 5, 11) - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestCollectionChangeStream, cls).setUpClass() - + @client_context.require_change_streams def setUp(self): + super().setUp() # Use a new collection for each test. self.watched_collection().drop() self.watched_collection().insert_one({}) @@ -993,21 +977,32 @@ def test_raw(self): self.assertEqual(change["ns"]["coll"], self.watched_collection().name) self.assertEqual(change["fullDocument"], raw_doc) + @client_context.require_version_min(4, 0) # Needed for start_at_operation_time. def test_uuid_representations(self): """Test with uuid document _ids and different uuid_representation.""" + optime = (self.db.command("ping"))["operationTime"] + self.watched_collection().insert_many( + [ + {"_id": Binary(uuid.uuid4().bytes, id_subtype)} + for id_subtype in (STANDARD, PYTHON_LEGACY) + ] + ) for uuid_representation in ALL_UUID_REPRESENTATIONS: - for id_subtype in (STANDARD, PYTHON_LEGACY): - options = self.watched_collection().codec_options.with_options( - uuid_representation=uuid_representation - ) - coll = self.watched_collection(codec_options=options) - with coll.watch() as change_stream: - coll.insert_one({"_id": Binary(uuid.uuid4().bytes, id_subtype)}) - _ = change_stream.next() - resume_token = change_stream.resume_token + options = self.watched_collection().codec_options.with_options( + uuid_representation=uuid_representation + ) + coll = self.watched_collection(codec_options=options) + with coll.watch(start_at_operation_time=optime, max_await_time_ms=1) as change_stream: + _ = change_stream.next() + resume_token_1 = change_stream.resume_token + _ = change_stream.next() + resume_token_2 = change_stream.resume_token - # Should not error. - coll.watch(resume_after=resume_token) + # Should not error. + with coll.watch(resume_after=resume_token_1): + pass + with coll.watch(resume_after=resume_token_2): + pass def test_document_id_order(self): """Test with document _ids that need their order preserved.""" @@ -1026,7 +1021,8 @@ def test_document_id_order(self): # The resume token is always a document. self.assertIsInstance(resume_token, document_class) # Should not error. - coll.watch(resume_after=resume_token) + with coll.watch(resume_after=resume_token): + pass coll.delete_many({}) def test_read_concern(self): @@ -1046,21 +1042,12 @@ class TestAllLegacyScenarios(IntegrationTest): RUN_ON_LOAD_BALANCER = True listener: AllowListEventListener - @classmethod @client_context.require_connection - def setUpClass(cls): - super(TestAllLegacyScenarios, cls).setUpClass() - cls.listener = AllowListEventListener("aggregate", "getMore") - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def tearDownClass(cls): - cls.client.close() - super(TestAllLegacyScenarios, cls).tearDownClass() - def setUp(self): - super(TestAllLegacyScenarios, self).setUp() - self.listener.results.clear() + super().setUp() + self.listener = AllowListEventListener("aggregate", "getMore") + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.listener.reset() def setUpCluster(self, scenario_dict): assets = [ @@ -1093,7 +1080,8 @@ def setFailPoint(self, scenario_dict): def assert_list_contents_are_subset(self, superlist, sublist): """Check that each element in sublist is a subset of the corresponding - element in superlist.""" + element in superlist. + """ self.assertEqual(len(superlist), len(sublist)) for sup, sub in zip(superlist, sublist): if isinstance(sub, dict): @@ -1109,7 +1097,7 @@ def assert_dict_is_subset(self, superdict, subdict): exempt_fields = ["documentKey", "_id", "getMore"] for key, value in subdict.items(): if key not in superdict: - self.fail("Key %s not found in %s" % (key, superdict)) + self.fail(f"Key {key} not found in {superdict}") if isinstance(value, dict): self.assert_dict_is_subset(superdict[key], value) continue @@ -1132,7 +1120,7 @@ def check_event(self, event, expectation_dict): self.assertEqual(getattr(event, key), value) def tearDown(self): - self.listener.results.clear() + self.listener.reset() _TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "change_streams") diff --git a/test/test_client.py b/test/test_client.py index 3630cec06c..9d201c663b 100644 --- a/test/test_client.py +++ b/test/test_client.py @@ -13,13 +13,18 @@ # limitations under the License. """Test the mongo_client module.""" +from __future__ import annotations import _thread as thread +import asyncio +import base64 import contextlib import copy import datetime import gc +import logging import os +import re import signal import socket import struct @@ -27,7 +32,15 @@ import sys import threading import time -from typing import Iterable, Type, no_type_check +import uuid +from typing import Any, Iterable, Type, no_type_check +from unittest import mock, skipIf +from unittest.mock import patch + +import pytest + +from bson.binary import CSHARP_LEGACY, JAVA_LEGACY, PYTHON_LEGACY, Binary, UuidRepresentation +from pymongo.operations import _Op sys.path[0:0] = [""] @@ -36,45 +49,48 @@ IntegrationTest, MockClientTest, SkipTest, + UnitTest, client_context, client_knobs, + connected, db_pwd, db_user, + remove_all_users, unittest, ) from test.pymongo_mocks import MockClient +from test.test_binary import BinaryData from test.utils import ( + assertRaisesExactly, + get_pool, + wait_until, +) +from test.utils_shared import ( NTHREADS, CMAPListener, FunctionCallRecorder, - assertRaisesExactly, - connected, delay, - get_pool, gevent_monkey_patched, is_greenthread_patched, lazy_client_trial, one, - remove_all_users, - rs_client, - rs_or_single_client, - rs_or_single_client_noauth, - single_client, - wait_until, ) +import bson import pymongo from bson import encode -from bson.codec_options import CodecOptions, TypeEncoder, TypeRegistry +from bson.codec_options import ( + CodecOptions, + DatetimeConversion, + TypeEncoder, + TypeRegistry, +) from bson.son import SON from bson.tz_util import utc from pymongo import event_loggers, message, monitoring from pymongo.client_options import ClientOptions -from pymongo.command_cursor import CommandCursor -from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT -from pymongo.compression_support import _HAVE_SNAPPY, _HAVE_ZSTD -from pymongo.cursor import Cursor, CursorType -from pymongo.database import Database +from pymongo.common import _UUID_REPRESENTATIONS, CONNECT_TIMEOUT, MIN_SUPPORTED_WIRE_VERSION, has_c +from pymongo.compression_support import _have_snappy, _have_zstd from pymongo.driver_info import DriverInfo from pymongo.errors import ( AutoReconnect, @@ -86,38 +102,44 @@ NetworkTimeout, OperationFailure, ServerSelectionTimeoutError, + WaitQueueTimeoutError, WriteConcernError, ) -from pymongo.mongo_client import MongoClient from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent -from pymongo.pool import _METADATA, PoolOptions, SocketInfo +from pymongo.pool_options import _MAX_METADATA_SIZE, _METADATA, ENV_VAR_K8S, PoolOptions from pymongo.read_preferences import ReadPreference from pymongo.server_description import ServerDescription from pymongo.server_selectors import readable_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TOPOLOGY_TYPE -from pymongo.srv_resolver import _HAVE_DNSPYTHON -from pymongo.topology import _ErrorContext +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor, CursorType +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +from pymongo.synchronous.pool import ( + Connection, +) +from pymongo.synchronous.settings import TOPOLOGY_TYPE +from pymongo.synchronous.topology import _ErrorContext from pymongo.topology_description import TopologyDescription from pymongo.write_concern import WriteConcern +_IS_SYNC = True + -class ClientUnitTest(unittest.TestCase): +class ClientUnitTest(UnitTest): """MongoClient tests that don't require a server.""" client: MongoClient - @classmethod - @client_context.require_connection - def setUpClass(cls): - cls.client = rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) + def setUp(self) -> None: + self.client = self.rs_or_single_client(connect=False, serverSelectionTimeoutMS=100) - @classmethod - def tearDownClass(cls): - cls.client.close() + @pytest.fixture(autouse=True) + def inject_fixtures(self, caplog): + self._caplog = caplog def test_keyword_arg_defaults(self): - client = MongoClient( + client = self.simple_client( socketTimeoutMS=None, connectTimeoutMS=20000, waitQueueTimeoutMS=None, @@ -131,7 +153,7 @@ def test_keyword_arg_defaults(self): serverSelectionTimeoutMS=12000, ) - options = client._MongoClient__options + options = client.options pool_opts = options.pool_options self.assertEqual(None, pool_opts.socket_timeout) # socket.Socket.settimeout takes a float in seconds @@ -143,18 +165,20 @@ def test_keyword_arg_defaults(self): self.assertAlmostEqual(12, client.options.server_selection_timeout) def test_connect_timeout(self): - client = MongoClient(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) - pool_opts = client._MongoClient__options.pool_options + client = self.simple_client(connect=False, connectTimeoutMS=None, socketTimeoutMS=None) + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) - pool_opts = client._MongoClient__options.pool_options + + client = self.simple_client(connect=False, connectTimeoutMS=0, socketTimeoutMS=0) + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) - client = MongoClient( + + client = self.simple_client( "mongodb://localhost/?connectTimeoutMS=0&socketTimeoutMS=0", connect=False ) - pool_opts = client._MongoClient__options.pool_options + pool_opts = client.options.pool_options self.assertEqual(None, pool_opts.socket_timeout) self.assertEqual(None, pool_opts.connect_timeout) @@ -168,7 +192,7 @@ def test_types(self): self.assertRaises(ConfigurationError, MongoClient, []) def test_max_pool_size_zero(self): - MongoClient(maxPoolSize=0) + self.simple_client(maxPoolSize=0) def test_uri_detection(self): self.assertRaises(ConfigurationError, MongoClient, "/foo") @@ -186,7 +210,7 @@ def make_db(base, name): self.assertRaises(InvalidName, make_db, self.client, "te/t") self.assertRaises(InvalidName, make_db, self.client, "te st") - self.assertTrue(isinstance(self.client.test, Database)) + self.assertIsInstance(self.client.test, Database) self.assertEqual(self.client.test, self.client["test"]) self.assertEqual(self.client.test, Database(self.client, "test")) @@ -200,7 +224,7 @@ def test_get_database(self): self.assertEqual(write_concern, db.write_concern) def test_getattr(self): - self.assertTrue(isinstance(self.client["_does_not_exist"], Database)) + self.assertIsInstance(self.client["_does_not_exist"], Database) with self.assertRaises(AttributeError) as context: self.client._does_not_exist @@ -212,10 +236,7 @@ def test_getattr(self): def test_iteration(self): client = self.client - if "PyPy" in sys.version: - msg = "'NoneType' object is not callable" - else: - msg = "'MongoClient' object is not iterable" + msg = "'MongoClient' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in client: # type: ignore[misc] # error: "None" not callable [misc] @@ -233,8 +254,9 @@ def test_iteration(self): self.assertNotIsInstance(client, Iterable) def test_get_default_database(self): - c = rs_or_single_client( - "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + c = self.rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), + connect=False, ) self.assertEqual(Database(c, "foo"), c.get_default_database()) # Test that default doesn't override the URI value. @@ -248,101 +270,151 @@ def test_get_default_database(self): self.assertEqual(ReadPreference.SECONDARY, db.read_preference) self.assertEqual(write_concern, db.write_concern) - c = rs_or_single_client( - "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, ) self.assertEqual(Database(c, "foo"), c.get_default_database("foo")) def test_get_default_database_error(self): # URI with no database. - c = rs_or_single_client( - "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, ) self.assertRaises(ConfigurationError, c.get_default_database) def test_get_default_database_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) - c = rs_or_single_client(uri, connect=False) + uri = "mongodb://%s:%d/foo?authSource=src" % ( + client_context.host, + client_context.port, + ) + c = self.rs_or_single_client(uri, connect=False) self.assertEqual(Database(c, "foo"), c.get_default_database()) def test_get_database_default(self): - c = rs_or_single_client( - "mongodb://%s:%d/foo" % (client_context.host, client_context.port), connect=False + c = self.rs_or_single_client( + "mongodb://%s:%d/foo" % (client_context.host, client_context.port), + connect=False, ) self.assertEqual(Database(c, "foo"), c.get_database()) def test_get_database_default_error(self): # URI with no database. - c = rs_or_single_client( - "mongodb://%s:%d/" % (client_context.host, client_context.port), connect=False + c = self.rs_or_single_client( + "mongodb://%s:%d/" % (client_context.host, client_context.port), + connect=False, ) self.assertRaises(ConfigurationError, c.get_database) def test_get_database_default_with_authsource(self): # Ensure we distinguish database name from authSource. - uri = "mongodb://%s:%d/foo?authSource=src" % (client_context.host, client_context.port) - c = rs_or_single_client(uri, connect=False) + uri = "mongodb://%s:%d/foo?authSource=src" % ( + client_context.host, + client_context.port, + ) + c = self.rs_or_single_client(uri, connect=False) self.assertEqual(Database(c, "foo"), c.get_database()) def test_primary_read_pref_with_tags(self): # No tags allowed with "primary". with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?readpreferencetags=dc:east") + self.single_client("mongodb://host/?readpreferencetags=dc:east") with self.assertRaises(ConfigurationError): - MongoClient("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") + self.single_client("mongodb://host/?readpreference=primary&readpreferencetags=dc:east") def test_read_preference(self): - c = rs_or_single_client( + c = self.rs_or_single_client( "mongodb://host", connect=False, readpreference=ReadPreference.NEAREST.mongos_mode ) self.assertEqual(c.read_preference, ReadPreference.NEAREST) def test_metadata(self): metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" metadata["application"] = {"name": "foobar"} - client = MongoClient("mongodb://foo:27017/?appname=foobar&connect=false") - options = client._MongoClient__options + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options self.assertEqual(options.pool_options.metadata, metadata) - client = MongoClient("foo", 27017, appname="foobar", connect=False) - options = client._MongoClient__options + client = self.simple_client("foo", 27017, appname="foobar", connect=False) + options = client.options self.assertEqual(options.pool_options.metadata, metadata) # No error - MongoClient(appname="x" * 128) - self.assertRaises(ValueError, MongoClient, appname="x" * 129) + self.simple_client(appname="x" * 128) + with self.assertRaises(ValueError): + self.simple_client(appname="x" * 129) # Bad "driver" options. self.assertRaises(TypeError, DriverInfo, "Foo", 1, "a") self.assertRaises(TypeError, DriverInfo, version="1", platform="a") self.assertRaises(TypeError, DriverInfo) - self.assertRaises(TypeError, MongoClient, driver=1) - self.assertRaises(TypeError, MongoClient, driver="abc") - self.assertRaises(TypeError, MongoClient, driver=("Foo", "1", "a")) + with self.assertRaises(TypeError): + self.simple_client(driver=1) + with self.assertRaises(TypeError): + self.simple_client(driver="abc") + with self.assertRaises(TypeError): + self.simple_client(driver=("Foo", "1", "a")) # Test appending to driver info. - metadata["driver"]["name"] = "PyMongo|FooDriver" - metadata["driver"]["version"] = "%s|1.2.3" % (_METADATA["driver"]["version"],) - client = MongoClient( + if has_c(): + metadata["driver"]["name"] = "PyMongo|c|FooDriver" + else: + metadata["driver"]["name"] = "PyMongo|FooDriver" + metadata["driver"]["version"] = "{}|1.2.3".format(_METADATA["driver"]["version"]) + client = self.simple_client( "foo", 27017, appname="foobar", driver=DriverInfo("FooDriver", "1.2.3", None), connect=False, ) - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) - metadata["platform"] = "%s|FooPlatform" % (_METADATA["platform"],) - client = MongoClient( + metadata["platform"] = "{}|FooPlatform".format(_METADATA["platform"]) + client = self.simple_client( "foo", 27017, appname="foobar", driver=DriverInfo("FooDriver", "1.2.3", "FooPlatform"), connect=False, ) - options = client._MongoClient__options + options = client.options self.assertEqual(options.pool_options.metadata, metadata) + # Test truncating driver info metadata. + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + client = self.simple_client( + driver=DriverInfo(name="s" * _MAX_METADATA_SIZE, version="s" * _MAX_METADATA_SIZE), + connect=False, + ) + options = client.options + self.assertLessEqual( + len(bson.encode(options.pool_options.metadata)), + _MAX_METADATA_SIZE, + ) + + @mock.patch.dict("os.environ", {ENV_VAR_K8S: "1"}) + def test_container_metadata(self): + metadata = copy.deepcopy(_METADATA) + metadata["driver"]["name"] = "PyMongo" + metadata["env"] = {} + metadata["env"]["container"] = {"orchestrator": "kubernetes"} + client = self.simple_client("mongodb://foo:27017/?appname=foobar&connect=false") + options = client.options + self.assertEqual(options.pool_options.metadata["env"], metadata["env"]) def test_kwargs_codec_options(self): - class MyFloatType(object): + class MyFloatType: def __init__(self, x): self.__x = x @@ -363,7 +435,7 @@ def transform_python(self, value): uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" tzinfo = utc - c = MongoClient( + c = self.simple_client( document_class=document_class, type_registry=type_registry, tz_aware=tz_aware, @@ -372,12 +444,12 @@ def transform_python(self, value): tzinfo=tzinfo, connect=False, ) - self.assertEqual(c.codec_options.document_class, document_class) self.assertEqual(c.codec_options.type_registry, type_registry) self.assertEqual(c.codec_options.tz_aware, tz_aware) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) self.assertEqual(c.codec_options.tzinfo, tzinfo) @@ -386,45 +458,59 @@ def test_uri_codec_options(self): # Ensure codec options are passed in correctly uuid_representation_label = "javaLegacy" unicode_decode_error_handler = "ignore" + datetime_conversion = "DATETIME_CLAMP" uri = ( "mongodb://%s:%d/foo?tz_aware=true&uuidrepresentation=" "%s&unicode_decode_error_handler=%s" + "&datetime_conversion=%s" % ( client_context.host, client_context.port, uuid_representation_label, unicode_decode_error_handler, + datetime_conversion, ) ) - c = MongoClient(uri, connect=False) - + c = self.simple_client(uri, connect=False) self.assertEqual(c.codec_options.tz_aware, True) self.assertEqual( - c.codec_options.uuid_representation, _UUID_REPRESENTATIONS[uuid_representation_label] + c.codec_options.uuid_representation, + _UUID_REPRESENTATIONS[uuid_representation_label], ) self.assertEqual(c.codec_options.unicode_decode_error_handler, unicode_decode_error_handler) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) + + # Change the passed datetime_conversion to a number and re-assert. + uri = uri.replace(datetime_conversion, f"{int(DatetimeConversion[datetime_conversion])}") + c = self.simple_client(uri, connect=False) + self.assertEqual( + c.codec_options.datetime_conversion, DatetimeConversion[datetime_conversion] + ) def test_uri_option_precedence(self): # Ensure kwarg options override connection string options. uri = "mongodb://localhost/?ssl=true&replicaSet=name&readPreference=primary" - c = MongoClient(uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred") - clopts = c._MongoClient__options + c = self.simple_client( + uri, ssl=False, replicaSet="newname", readPreference="secondaryPreferred" + ) + clopts = c.options opts = clopts._options self.assertEqual(opts["tls"], False) self.assertEqual(clopts.replica_set_name, "newname") self.assertEqual(clopts.read_preference, ReadPreference.SECONDARY_PREFERRED) - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_connection_timeout_ms_propagates_to_DNS_resolver(self): # Patch the resolver. - from pymongo.srv_resolver import _resolve + from pymongo.synchronous.srv_resolver import _resolve patched_resolver = FunctionCallRecorder(_resolve) - pymongo.srv_resolver._resolve = patched_resolver + pymongo.synchronous.srv_resolver._resolve = patched_resolver def reset_resolver(): - pymongo.srv_resolver._resolve = _resolve + pymongo.synchronous.srv_resolver._resolve = _resolve self.addCleanup(reset_resolver) @@ -437,7 +523,7 @@ def reset_resolver(): def test_scenario(args, kwargs, expected_value): patched_resolver.reset() - MongoClient(*args, **kwargs) + self.simple_client(*args, **kwargs) for _, kw in patched_resolver.call_list(): self.assertAlmostEqual(kw["lifetime"], expected_value) @@ -457,15 +543,15 @@ def test_scenario(args, kwargs, expected_value): def test_uri_security_options(self): # Ensure that we don't silently override security-related options. with self.assertRaises(InvalidURI): - MongoClient("mongodb://localhost/?ssl=true", tls=False, connect=False) + self.simple_client("mongodb://localhost/?ssl=true", tls=False, connect=False) # Matching SSL and TLS options should not cause errors. - c = MongoClient("mongodb://localhost/?ssl=false", tls=False, connect=False) - self.assertEqual(c._MongoClient__options._options["tls"], False) + c = self.simple_client("mongodb://localhost/?ssl=false", tls=False, connect=False) + self.assertEqual(c.options._options["tls"], False) # Conflicting tlsInsecure options should raise an error. with self.assertRaises(InvalidURI): - MongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidHostnames=True, @@ -473,7 +559,7 @@ def test_uri_security_options(self): # Conflicting legacy tlsInsecure options should also raise an error. with self.assertRaises(InvalidURI): - MongoClient( + self.simple_client( "mongodb://localhost/?tlsInsecure=true", connect=False, tlsAllowInvalidCertificates=False, @@ -481,10 +567,10 @@ def test_uri_security_options(self): # Conflicting kwargs should raise InvalidURI with self.assertRaises(InvalidURI): - MongoClient(ssl=True, tls=False) + self.simple_client(ssl=True, tls=False) def test_event_listeners(self): - c = MongoClient(event_listeners=[], connect=False) + c = self.simple_client(event_listeners=[], connect=False) self.assertEqual(c.options.event_listeners, []) listeners = [ event_loggers.CommandLogger(), @@ -493,11 +579,11 @@ def test_event_listeners(self): event_loggers.TopologyLogger(), event_loggers.ConnectionPoolLogger(), ] - c = MongoClient(event_listeners=listeners, connect=False) + c = self.simple_client(event_listeners=listeners, connect=False) self.assertEqual(c.options.event_listeners, listeners) def test_client_options(self): - c = MongoClient(connect=False) + c = self.simple_client(connect=False) self.assertIsInstance(c.options, ClientOptions) self.assertIsInstance(c.options.pool_options, PoolOptions) self.assertEqual(c.options.server_selection_timeout, 30) @@ -505,6 +591,59 @@ def test_client_options(self): self.assertIsInstance(c.options.retry_writes, bool) self.assertIsInstance(c.options.retry_reads, bool) + def test_validate_suggestion(self): + """Validate kwargs in constructor.""" + for typo in ["auth", "Auth", "AUTH"]: + expected = f"Unknown option: {typo}. Did you mean one of (authsource, authmechanism, authoidcallowedhosts) or maybe a camelCase version of one? Refer to docstring." + expected = re.escape(expected) + with self.assertRaisesRegex(ConfigurationError, expected): + MongoClient(**{typo: "standard"}) # type: ignore[arg-type] + + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_logging(self, mock_get_hosts): + normal_hosts = [ + "normal.host.com", + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + with self.assertLogs("pymongo", level="INFO") as cm: + for host in normal_hosts: + MongoClient(host, connect=False) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + MongoClient(host, connect=False) + MongoClient(multi_host, connect=False) + logs = [record.getMessage() for record in cm.records if record.name == "pymongo.client"] + self.assertEqual(len(logs), 7) + + @skipIf(os.environ.get("DEBUG_LOG"), "Enabling debug logs breaks this test") + @patch("pymongo.synchronous.srv_resolver._SrvResolver.get_hosts") + def test_detected_environment_warning(self, mock_get_hosts): + with self._caplog.at_level(logging.WARN): + normal_hosts = [ + "host.cosmos.azure.com", + "host.docdb.amazonaws.com", + "host.docdb-elastic.amazonaws.com", + ] + srv_hosts = ["mongodb+srv://:@" + s for s in normal_hosts] + multi_host = ( + "host.cosmos.azure.com,host.docdb.amazonaws.com,host.docdb-elastic.amazonaws.com" + ) + for host in normal_hosts: + with self.assertWarns(UserWarning): + self.simple_client(host) + for host in srv_hosts: + mock_get_hosts.return_value = [(host, 1)] + with self.assertWarns(UserWarning): + self.simple_client(host) + with self.assertWarns(UserWarning): + self.simple_client(multi_host) + class TestClient(IntegrationTest): def test_multiple_uris(self): @@ -519,107 +658,106 @@ def test_multiple_uris(self): def test_max_idle_time_reaper_default(self): with client_knobs(kill_cursor_frequency=0.1): - # Assert reaper doesn't remove sockets when maxIdleTimeMS not set - client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info: + # Assert reaper doesn't remove connections when maxIdleTimeMS not set + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) - self.assertTrue(sock_info in server._pool.sockets) - client.close() + self.assertEqual(1, len(server._pool.conns)) + self.assertIn(conn, server._pool.conns) def test_max_idle_time_reaper_removes_stale_minPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper removes idle socket and replaces it with a new one - client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info: + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, two - # sockets could be created and checked into the pool. - self.assertGreaterEqual(len(server._pool.sockets), 1) - wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") - wait_until(lambda: 1 <= len(server._pool.sockets), "replace stale socket") - client.close() + # connections could be created and checked into the pool. + self.assertGreaterEqual(len(server._pool.conns), 1) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) >= 1, "replace stale socket") def test_max_idle_time_reaper_does_not_exceed_maxPoolSize(self): with client_knobs(kill_cursor_frequency=0.1): - # Assert reaper respects maxPoolSize when adding new sockets. - client = rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info: + # Assert reaper respects maxPoolSize when adding new connections. + client = self.rs_or_single_client(maxIdleTimeMS=500, minPoolSize=1, maxPoolSize=1) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: pass # When the reaper runs at the same time as the get_socket, - # maxPoolSize=1 should prevent two sockets from being created. - self.assertEqual(1, len(server._pool.sockets)) - wait_until(lambda: sock_info not in server._pool.sockets, "remove stale socket") - wait_until(lambda: 1 == len(server._pool.sockets), "replace stale socket") - client.close() + # maxPoolSize=1 should prevent two connections from being created. + self.assertEqual(1, len(server._pool.conns)) + wait_until(lambda: conn not in server._pool.conns, "remove stale socket") + wait_until(lambda: len(server._pool.conns) == 1, "replace stale socket") def test_max_idle_time_reaper_removes_stale(self): with client_knobs(kill_cursor_frequency=0.1): # Assert reaper has removed idle socket and NOT replaced it - client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info_one: + client = self.rs_or_single_client(maxIdleTimeMS=500) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn_one: pass - # Assert that the pool does not close sockets prematurely. + # Assert that the pool does not close connections prematurely. time.sleep(0.300) - with server._pool.get_socket() as sock_info_two: + with server._pool.checkout() as conn_two: pass - self.assertIs(sock_info_one, sock_info_two) + self.assertIs(conn_one, conn_two) wait_until( - lambda: 0 == len(server._pool.sockets), + lambda: len(server._pool.conns) == 0, "stale socket reaped and new one NOT added to the pool", ) - client.close() def test_min_pool_size(self): with client_knobs(kill_cursor_frequency=0.1): - client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) - self.assertEqual(0, len(server._pool.sockets)) + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + self.assertEqual(0, len(server._pool.conns)) # Assert that pool started up at minPoolSize - client = rs_or_single_client(minPoolSize=10) - server = client._get_topology().select_server(readable_server_selector) - wait_until(lambda: 10 == len(server._pool.sockets), "pool initialized with 10 sockets") + client = self.rs_or_single_client(minPoolSize=10) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + wait_until( + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", + ) # Assert that if a socket is closed, a new one takes its place - with server._pool.get_socket() as sock_info: - sock_info.close_socket(None) + with server._pool.checkout() as conn: + conn.close_conn(None) wait_until( - lambda: 10 == len(server._pool.sockets), + lambda: len(server._pool.conns) == 10, "a closed socket gets replaced from the pool", ) - self.assertFalse(sock_info in server._pool.sockets) + self.assertNotIn(conn, server._pool.conns) def test_max_idle_time_checkout(self): # Use high frequency to test _get_socket_no_auth. with client_knobs(kill_cursor_frequency=99999999): - client = rs_or_single_client(maxIdleTimeMS=500) - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info: + client = self.rs_or_single_client(maxIdleTimeMS=500) + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) + self.assertEqual(1, len(server._pool.conns)) time.sleep(1) # Sleep so that the socket becomes stale. - with server._pool.get_socket() as new_sock_info: - self.assertNotEqual(sock_info, new_sock_info) - self.assertEqual(1, len(server._pool.sockets)) - self.assertFalse(sock_info in server._pool.sockets) - self.assertTrue(new_sock_info in server._pool.sockets) + with server._pool.checkout() as new_con: + self.assertNotEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) + self.assertNotIn(conn, server._pool.conns) + self.assertIn(new_con, server._pool.conns) - # Test that sockets are reused if maxIdleTimeMS is not set. - client = rs_or_single_client() - server = client._get_topology().select_server(readable_server_selector) - with server._pool.get_socket() as sock_info: + # Test that connections are reused if maxIdleTimeMS is not set. + client = self.rs_or_single_client() + server = (client._get_topology()).select_server(readable_server_selector, _Op.TEST) + with server._pool.checkout() as conn: pass - self.assertEqual(1, len(server._pool.sockets)) + self.assertEqual(1, len(server._pool.conns)) time.sleep(1) - with server._pool.get_socket() as new_sock_info: - self.assertEqual(sock_info, new_sock_info) - self.assertEqual(1, len(server._pool.sockets)) + with server._pool.checkout() as new_con: + self.assertEqual(conn, new_con) + self.assertEqual(1, len(server._pool.conns)) def test_constants(self): """This test uses MongoClient explicitly to make sure that host and @@ -635,40 +773,40 @@ def test_constants(self): MongoClient.HOST = "somedomainthatdoesntexist.org" MongoClient.PORT = 123456789 with self.assertRaises(AutoReconnect): - connected(MongoClient(serverSelectionTimeoutMS=10, **kwargs)) + c = self.simple_client(serverSelectionTimeoutMS=10, **kwargs) + connected(c) + c = self.simple_client(host, port, **kwargs) # Override the defaults. No error. - connected(MongoClient(host, port, **kwargs)) + connected(c) # Set good defaults. MongoClient.HOST = host MongoClient.PORT = port # No error. - connected(MongoClient(**kwargs)) + c = self.simple_client(**kwargs) + connected(c) def test_init_disconnected(self): host, port = client_context.host, client_context.port - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) # is_primary causes client to block until connected self.assertIsInstance(c.is_primary, bool) - - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.is_mongos, bool) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.options.pool_options.max_pool_size, int) self.assertIsInstance(c.nodes, frozenset) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertEqual(c.codec_options, CodecOptions()) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertFalse(c.primary) self.assertFalse(c.secondaries) - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertIsInstance(c.topology_description, TopologyDescription) self.assertEqual(c.topology_description, c._topology._description) - self.assertIsNone(c.address) # PYTHON-2981 - c.admin.command("ping") # connect if client_context.is_rs: # The primary's host and port are from the replica set config. self.assertIsNotNone(c.address) @@ -676,47 +814,104 @@ def test_init_disconnected(self): self.assertEqual(c.address, (host, port)) bad_host = "somedomainthatdoesntexist.org" - c = MongoClient(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) + c = self.simple_client(bad_host, port, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + c.pymongo_test.test.find_one() def test_init_disconnected_with_auth(self): uri = "mongodb://user:pass@somedomainthatdoesntexist" - c = MongoClient(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) - self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one) + c = self.simple_client(uri, connectTimeoutMS=1, serverSelectionTimeoutMS=10) + with self.assertRaises(ConnectionFailure): + c.pymongo_test.test.find_one() + + @client_context.require_replica_set + @client_context.require_no_load_balancer + @client_context.require_tls + def test_init_disconnected_with_srv(self): + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # nodes returns an empty set if not connected + self.assertEqual(c.nodes, frozenset()) + # topology_description returns the initial seed description if not connected + topology_description = c.topology_description + self.assertEqual(topology_description.topology_type, TOPOLOGY_TYPE.Unknown) + self.assertEqual( + { + ("test1.test.build.10gen.cc", None): ServerDescription( + ("test1.test.build.10gen.cc", None) + ) + }, + topology_description.server_descriptions(), + ) + + # address causes client to block until connected + self.assertIsNotNone(c.address) + # Initial seed topology and connected topology have the same ID + self.assertEqual( + c._topology._topology_id, topology_description._topology_settings._topology_id + ) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # primary causes client to block until connected + c.primary + self.assertIsNotNone(c._topology) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # secondaries causes client to block until connected + c.secondaries + self.assertIsNotNone(c._topology) + c.close() + + c = self.rs_or_single_client( + "mongodb+srv://test1.test.build.10gen.cc", connect=False, tlsInsecure=True + ) + # arbiters causes client to block until connected + c.arbiters + self.assertIsNotNone(c._topology) def test_equality(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] - c = rs_or_single_client(seed, connect=False) - self.addCleanup(c.close) + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = self.rs_or_single_client(seed, connect=False) self.assertEqual(client_context.client, c) # Explicitly test inequality self.assertFalse(client_context.client != c) - c = rs_or_single_client("invalid.com", connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client("invalid.com", connect=False) self.assertNotEqual(client_context.client, c) self.assertTrue(client_context.client != c) + + c1 = self.simple_client("a", connect=False) + c2 = self.simple_client("b", connect=False) + # Seeds differ: - self.assertNotEqual(MongoClient("a", connect=False), MongoClient("b", connect=False)) + self.assertNotEqual(c1, c2) + + c1 = self.simple_client(["a", "b", "c"], connect=False) + c2 = self.simple_client(["c", "a", "b"], connect=False) + # Same seeds but out of order still compares equal: - self.assertEqual( - MongoClient(["a", "b", "c"], connect=False), MongoClient(["c", "a", "b"], connect=False) - ) + self.assertEqual(c1, c2) def test_hashable(self): - seed = "%s:%s" % list(self.client._topology_settings.seeds)[0] - c = rs_or_single_client(seed, connect=False) - self.addCleanup(c.close) + seed = "{}:{}".format(*list(self.client._topology_settings.seeds)[0]) + c = self.rs_or_single_client(seed, connect=False) self.assertIn(c, {client_context.client}) - c = rs_or_single_client("invalid.com", connect=False) - self.addCleanup(c.close) + c = self.rs_or_single_client("invalid.com", connect=False) self.assertNotIn(c, {client_context.client}) def test_host_w_port(self): with self.assertRaises(ValueError): + host = client_context.host connected( MongoClient( - "%s:1234567" % (client_context.host,), + f"{host}:1234567", connectTimeoutMS=1, serverSelectionTimeoutMS=10, ) @@ -724,7 +919,7 @@ def test_host_w_port(self): def test_repr(self): # Used to test 'eval' below. - import bson # noqa: F401 + import bson client = MongoClient( # type: ignore[type-var] "mongodb://localhost:27017,localhost:27018/?replicaSet=replset" @@ -741,9 +936,10 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) - client = MongoClient( + client = self.simple_client( "localhost:27017,localhost:27018", replicaSet="replset", connectTimeoutMS=12345, @@ -761,28 +957,39 @@ def test_repr(self): self.assertIn("w=1", the_repr) self.assertIn("wtimeoutms=100", the_repr) - self.assertEqual(eval(the_repr), client) + with eval(the_repr) as client_two: + self.assertEqual(client_two, client) + + def test_repr_srv_host(self): + client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", connect=False) + # before srv resolution + self.assertIn("host='mongodb+srv://test1.test.build.10gen.cc'", repr(client)) + client._connect() + # after srv resolution + self.assertIn("host=['localhost.test.build.10gen.cc:", repr(client)) + client.close() def test_getters(self): wait_until(lambda: client_context.nodes == self.client.nodes, "find all nodes") def test_list_databases(self): - cmd_docs = self.client.admin.command("listDatabases")["databases"] + cmd_docs = (self.client.admin.command("listDatabases"))["databases"] cursor = self.client.list_databases() self.assertIsInstance(cursor, CommandCursor) - helper_docs = list(cursor) - self.assertTrue(len(helper_docs) > 0) - self.assertEqual(helper_docs, cmd_docs) - for doc in helper_docs: - self.assertIs(type(doc), dict) - client = rs_or_single_client(document_class=SON) - self.addCleanup(client.close) + helper_docs = cursor.to_list() + self.assertGreater(len(helper_docs), 0) + self.assertEqual(len(helper_docs), len(cmd_docs)) + # PYTHON-3529 Some fields may change between calls, just compare names. + for helper_doc, cmd_doc in zip(helper_docs, cmd_docs): + self.assertIs(type(helper_doc), dict) + self.assertEqual(helper_doc.keys(), cmd_doc.keys()) + client = self.rs_or_single_client(document_class=SON) for doc in client.list_databases(): self.assertIs(type(doc), dict) self.client.pymongo_test.test.insert_one({}) cursor = self.client.list_databases(filter={"name": "admin"}) - docs = list(cursor) + docs = cursor.to_list() self.assertEqual(1, len(docs)) self.assertEqual(docs[0]["name"], "admin") @@ -793,17 +1000,19 @@ def test_list_databases(self): def test_list_database_names(self): self.client.pymongo_test.test.insert_one({"dummy": "object"}) self.client.pymongo_test_mike.test.insert_one({"dummy": "object"}) - cmd_docs = self.client.admin.command("listDatabases")["databases"] + cmd_docs = (self.client.admin.command("listDatabases"))["databases"] cmd_names = [doc["name"] for doc in cmd_docs] db_names = self.client.list_database_names() - self.assertTrue("pymongo_test" in db_names) - self.assertTrue("pymongo_test_mike" in db_names) + self.assertIn("pymongo_test", db_names) + self.assertIn("pymongo_test_mike", db_names) self.assertEqual(db_names, cmd_names) def test_drop_database(self): - self.assertRaises(TypeError, self.client.drop_database, 5) - self.assertRaises(TypeError, self.client.drop_database, None) + with self.assertRaises(TypeError): + self.client.drop_database(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + self.client.drop_database(None) # type: ignore[arg-type] self.client.pymongo_test.test.insert_one({"dummy": "object"}) self.client.pymongo_test2.test.insert_one({"dummy": "object"}) @@ -813,7 +1022,7 @@ def test_drop_database(self): self.client.drop_database("pymongo_test") if client_context.is_rs: - wc_client = rs_or_single_client(w=len(client_context.nodes) + 1) + wc_client = self.rs_or_single_client(w=len(client_context.nodes) + 1) with self.assertRaises(WriteConcernError): wc_client.drop_database("pymongo_test2") @@ -823,16 +1032,17 @@ def test_drop_database(self): self.assertNotIn("pymongo_test2", dbs) def test_close(self): - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() coll = test_client.pymongo_test.bar test_client.close() - self.assertRaises(InvalidOperation, coll.count_documents, {}) + with self.assertRaises(InvalidOperation): + coll.count_documents({}) def test_close_kills_cursors(self): if sys.platform.startswith("java"): # We can't figure out how to make this test reliable with Jython. raise SkipTest("Can't test with Jython") - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() # Kill any cursors possibly queued up by previous tests. gc.collect() test_client._process_periodic_tasks() @@ -859,13 +1069,13 @@ def test_close_kills_cursors(self): self.assertTrue(test_client._topology._opened) test_client.close() self.assertFalse(test_client._topology._opened) - test_client = rs_or_single_client() + test_client = self.rs_or_single_client() # The killCursors task should not need to re-open the topology. test_client._process_periodic_tasks() self.assertTrue(test_client._topology._opened) def test_close_stops_kill_cursors_thread(self): - client = rs_client() + client = self.rs_client() client.test.test.find_one() self.assertFalse(client._kill_cursors_executor._stopped) @@ -874,53 +1084,58 @@ def test_close_stops_kill_cursors_thread(self): self.assertTrue(client._kill_cursors_executor._stopped) # Reusing the closed client should raise an InvalidOperation error. - self.assertRaises(InvalidOperation, client.admin.command, "ping") + with self.assertRaises(InvalidOperation): + client.admin.command("ping") # Thread is still stopped. self.assertTrue(client._kill_cursors_executor._stopped) def test_uri_connect_option(self): # Ensure that topology is not opened if connect=False. - client = rs_client(connect=False) + client = self.rs_client(connect=False) self.assertFalse(client._topology._opened) # Ensure kill cursors thread has not been started. - kc_thread = client._kill_cursors_executor._thread - self.assertFalse(kc_thread and kc_thread.is_alive()) - + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertFalse(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertFalse(kc_task and not kc_task.done()) # Using the client should open topology and start the thread. client.admin.command("ping") self.assertTrue(client._topology._opened) - kc_thread = client._kill_cursors_executor._thread - self.assertTrue(kc_thread and kc_thread.is_alive()) - - # Tear down. - client.close() + if _IS_SYNC: + kc_thread = client._kill_cursors_executor._thread + self.assertTrue(kc_thread and kc_thread.is_alive()) + else: + kc_task = client._kill_cursors_executor._task + self.assertTrue(kc_task and not kc_task.done()) def test_close_does_not_open_servers(self): - client = rs_client(connect=False) + client = self.rs_client(connect=False) topology = client._topology self.assertEqual(topology._servers, {}) client.close() self.assertEqual(topology._servers, {}) def test_close_closes_sockets(self): - client = rs_client() - self.addCleanup(client.close) + client = self.rs_client() client.test.test.find_one() topology = client._topology client.close() for server in topology._servers.values(): - self.assertFalse(server._pool.sockets) + self.assertFalse(server._pool.conns) self.assertTrue(server._monitor._executor._stopped) self.assertTrue(server._monitor._rtt_monitor._executor._stopped) - self.assertFalse(server._monitor._pool.sockets) - self.assertFalse(server._monitor._rtt_monitor._pool.sockets) + self.assertFalse(server._monitor._pool.conns) + self.assertFalse(server._monitor._rtt_monitor._pool.conns) def test_bad_uri(self): with self.assertRaises(InvalidURI): MongoClient("http://localhost") @client_context.require_auth + @client_context.require_no_fips def test_auth_from_uri(self): host, port = client_context.host, client_context.port client_context.create_user("admin", "admin", "pass") @@ -930,39 +1145,42 @@ def test_auth_from_uri(self): client_context.create_user("pymongo_test", "user", "pass", roles=["userAdmin", "readWrite"]) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) + connected(self.rs_or_single_client_noauth("mongodb://a:b@%s:%d" % (host, port))) # No error. - connected(rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) + connected(self.rs_or_single_client_noauth("mongodb://admin:pass@%s:%d" % (host, port))) # Wrong database. uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port) with self.assertRaises(OperationFailure): - connected(rs_or_single_client_noauth(uri)) + connected(self.rs_or_single_client_noauth(uri)) # No error. connected( - rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) + self.rs_or_single_client_noauth("mongodb://user:pass@%s:%d/pymongo_test" % (host, port)) ) # Auth with lazy connection. - rs_or_single_client_noauth( - "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ( + self.rs_or_single_client_noauth( + "mongodb://user:pass@%s:%d/pymongo_test" % (host, port), connect=False + ) ).pymongo_test.test.find_one() # Wrong password. - bad_client = rs_or_single_client_noauth( + bad_client = self.rs_or_single_client_noauth( "mongodb://user:wrong@%s:%d/pymongo_test" % (host, port), connect=False ) - self.assertRaises(OperationFailure, bad_client.pymongo_test.test.find_one) + with self.assertRaises(OperationFailure): + bad_client.pymongo_test.test.find_one() @client_context.require_auth def test_username_and_password(self): client_context.create_user("admin", "ad min", "pa/ss") self.addCleanup(client_context.drop_user, "admin", "ad min") - c = rs_or_single_client_noauth(username="ad min", password="pa/ss") + c = self.rs_or_single_client_noauth(username="ad min", password="pa/ss") # Username and password aren't in strings that will likely be logged. self.assertNotIn("ad min", repr(c)) @@ -974,12 +1192,14 @@ def test_username_and_password(self): c.server_info() with self.assertRaises(OperationFailure): - rs_or_single_client_noauth(username="ad min", password="foo").server_info() + (self.rs_or_single_client_noauth(username="ad min", password="foo")).server_info() @client_context.require_auth + @client_context.require_no_fips def test_lazy_auth_raises_operation_failure(self): - lazy_client = rs_or_single_client_noauth( - "mongodb://user:wrong@%s/pymongo_test" % (client_context.host,), connect=False + host = client_context.host + lazy_client = self.rs_or_single_client_noauth( + f"mongodb://user:wrong@{host}/pymongo_test", connect=False ) assertRaisesExactly(OperationFailure, lazy_client.test.collection.find_one) @@ -996,20 +1216,19 @@ def test_unix_socket(self): uri = "mongodb://%s" % encoded_socket # Confirm we can do operations via the socket. - client = rs_or_single_client(uri) - self.addCleanup(client.close) + client = self.rs_or_single_client(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) + self.assertIn("pymongo_test", dbs) - self.assertTrue(mongodb_socket in repr(client)) + self.assertIn(mongodb_socket, repr(client)) # Confirm it fails with a missing socket. - self.assertRaises( - ConnectionFailure, - connected, - MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100), - ) + with self.assertRaises(ConnectionFailure): + c = self.simple_client( + "mongodb://%2Ftmp%2Fnon-existent.sock", serverSelectionTimeoutMS=100 + ) + connected(c) def test_document_class(self): c = self.client @@ -1017,50 +1236,55 @@ def test_document_class(self): db.test.insert_one({"x": 1}) self.assertEqual(dict, c.codec_options.document_class) - self.assertTrue(isinstance(db.test.find_one(), dict)) - self.assertFalse(isinstance(db.test.find_one(), SON)) + self.assertIsInstance(db.test.find_one(), dict) + self.assertNotIsInstance(db.test.find_one(), SON) + + c = self.rs_or_single_client(document_class=SON) - c = rs_or_single_client(document_class=SON) - self.addCleanup(c.close) db = c.pymongo_test self.assertEqual(SON, c.codec_options.document_class) - self.assertTrue(isinstance(db.test.find_one(), SON)) + self.assertIsInstance(db.test.find_one(), SON) def test_timeouts(self): - client = rs_or_single_client( + client = self.rs_or_single_client( connectTimeoutMS=10500, socketTimeoutMS=10500, maxIdleTimeMS=10500, serverSelectionTimeoutMS=10500, ) - self.assertEqual(10.5, get_pool(client).opts.connect_timeout) - self.assertEqual(10.5, get_pool(client).opts.socket_timeout) - self.assertEqual(10.5, get_pool(client).opts.max_idle_time_seconds) + self.assertEqual(10.5, (get_pool(client)).opts.connect_timeout) + self.assertEqual(10.5, (get_pool(client)).opts.socket_timeout) + self.assertEqual(10.5, (get_pool(client)).opts.max_idle_time_seconds) self.assertEqual(10.5, client.options.pool_options.max_idle_time_seconds) self.assertEqual(10.5, client.options.server_selection_timeout) def test_socket_timeout_ms_validation(self): - c = rs_or_single_client(socketTimeoutMS=10 * 1000) - self.assertEqual(10, get_pool(c).opts.socket_timeout) + c = self.rs_or_single_client(socketTimeoutMS=10 * 1000) + self.assertEqual(10, (get_pool(c)).opts.socket_timeout) - c = connected(rs_or_single_client(socketTimeoutMS=None)) - self.assertEqual(None, get_pool(c).opts.socket_timeout) + c = connected(self.rs_or_single_client(socketTimeoutMS=None)) + self.assertEqual(None, (get_pool(c)).opts.socket_timeout) - c = connected(rs_or_single_client(socketTimeoutMS=0)) - self.assertEqual(None, get_pool(c).opts.socket_timeout) + c = connected(self.rs_or_single_client(socketTimeoutMS=0)) + self.assertEqual(None, (get_pool(c)).opts.socket_timeout) - self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=-1) + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS=-1): + pass - self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS=1e10) + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS=1e10): + pass - self.assertRaises(ValueError, rs_or_single_client, socketTimeoutMS="foo") + with self.assertRaises(ValueError): + with self.rs_or_single_client(socketTimeoutMS="foo"): + pass def test_socket_timeout(self): no_timeout = self.client timeout_sec = 1 - timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) - self.addCleanup(timeout.close) + timeout = self.rs_or_single_client(socketTimeoutMS=1000 * timeout_sec) no_timeout.pymongo_test.drop_collection("test") no_timeout.pymongo_test.test.insert_one({"x": 1}) @@ -1073,13 +1297,16 @@ def get_x(db): return doc["x"] self.assertEqual(1, get_x(no_timeout.pymongo_test)) - self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test) + with self.assertRaises(NetworkTimeout): + get_x(timeout.pymongo_test) def test_server_selection_timeout(self): client = MongoClient(serverSelectionTimeoutMS=100, connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() client = MongoClient(serverSelectionTimeoutMS=0, connect=False) + self.assertAlmostEqual(0, client.options.server_selection_timeout) self.assertRaises(ValueError, MongoClient, serverSelectionTimeoutMS="foo", connect=False) @@ -1087,47 +1314,59 @@ def test_server_selection_timeout(self): self.assertRaises( ConfigurationError, MongoClient, serverSelectionTimeoutMS=None, connect=False ) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=100", connect=False) self.assertAlmostEqual(0.1, client.options.server_selection_timeout) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=0", connect=False) self.assertAlmostEqual(0, client.options.server_selection_timeout) + client.close() # Test invalid timeout in URI ignored and set to default. client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=-1", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) + client.close() client = MongoClient("mongodb://localhost/?serverSelectionTimeoutMS=", connect=False) self.assertAlmostEqual(30, client.options.server_selection_timeout) def test_waitQueueTimeoutMS(self): - client = rs_or_single_client(waitQueueTimeoutMS=2000) - self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2) + listener = CMAPListener() + client = self.rs_or_single_client( + waitQueueTimeoutMS=10, maxPoolSize=1, event_listeners=[listener] + ) + pool = get_pool(client) + self.assertEqual(pool.opts.wait_queue_timeout, 0.01) + with pool.checkout(): + with self.assertRaises(WaitQueueTimeoutError): + client.test.command("ping") + self.assertFalse(listener.events_by_type(monitoring.PoolClearedEvent)) def test_socketKeepAlive(self): pool = get_pool(self.client) - with pool.get_socket() as sock_info: - keepalive = sock_info.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) + with pool.checkout() as conn: + keepalive = conn.conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE) self.assertTrue(keepalive) @no_type_check def test_tz_aware(self): self.assertRaises(ValueError, MongoClient, tz_aware="foo") - aware = rs_or_single_client(tz_aware=True) + aware = self.rs_or_single_client(tz_aware=True) self.addCleanup(aware.close) naive = self.client aware.pymongo_test.drop_collection("test") - now = datetime.datetime.utcnow() + now = datetime.datetime.now(tz=datetime.timezone.utc) aware.pymongo_test.test.insert_one({"x": now}) - self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo) - self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo) + self.assertEqual(None, (naive.pymongo_test.test.find_one())["x"].tzinfo) + self.assertEqual(utc, (aware.pymongo_test.test.find_one())["x"].tzinfo) self.assertEqual( - aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None), - naive.pymongo_test.test.find_one()["x"], + (aware.pymongo_test.test.find_one())["x"].replace(tzinfo=None), + (naive.pymongo_test.test.find_one())["x"], ) @client_context.require_ipv6 @@ -1137,7 +1376,7 @@ def test_ipv6(self): raise SkipTest("Need the ipaddress module to test with SSL") if client_context.auth_enabled: - auth_str = "%s:%s@" % (db_user, db_pwd) + auth_str = f"{db_user}:{db_pwd}@" else: auth_str = "" @@ -1145,34 +1384,36 @@ def test_ipv6(self): if client_context.is_rs: uri += "/?replicaSet=" + (client_context.replica_set_name or "") - client = rs_or_single_client_noauth(uri) - self.addCleanup(client.close) + client = self.rs_or_single_client_noauth(uri) client.pymongo_test.test.insert_one({"dummy": "object"}) client.pymongo_test_bernie.test.insert_one({"dummy": "object"}) dbs = client.list_database_names() - self.assertTrue("pymongo_test" in dbs) - self.assertTrue("pymongo_test_bernie" in dbs) + self.assertIn("pymongo_test", dbs) + self.assertIn("pymongo_test_bernie", dbs) def test_contextlib(self): - client = rs_or_single_client() + client = self.rs_or_single_client() client.pymongo_test.drop_collection("test") client.pymongo_test.test.insert_one({"foo": "bar"}) # The socket used for the previous commands has been returned to the # pool - self.assertEqual(1, len(get_pool(client).sockets)) + self.assertEqual(1, len((get_pool(client)).conns)) - with contextlib.closing(client): - self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - with self.assertRaises(InvalidOperation): - client.pymongo_test.test.find_one() - client = rs_or_single_client() - with client as client: - self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"]) - with self.assertRaises(InvalidOperation): - client.pymongo_test.test.find_one() + # contextlib async support was added in Python 3.10 + if _IS_SYNC or sys.version_info >= (3, 10): + with contextlib.closing(client): + self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + client = self.rs_or_single_client() + with client as client: + self.assertEqual("bar", (client.pymongo_test.test.find_one())["foo"]) + with self.assertRaises(InvalidOperation): + client.pymongo_test.test.find_one() + @client_context.require_sync def test_interrupt_signal(self): if sys.platform.startswith("java"): # We can't figure out how to raise an exception on a thread that's @@ -1200,7 +1441,7 @@ def test_interrupt_signal(self): # main thread while find() is in-progress: On Windows, SIGALRM is # unavailable so we use a second thread. In our Evergreen setup on # Linux, the thread technique causes an error in the test at - # sock.recv(): TypeError: 'int' object is not callable + # conn.recv(): TypeError: 'int' object is not callable # We don't know what causes this, so we hack around it. if sys.platform == "win32": @@ -1223,7 +1464,7 @@ def sigalarm(num, frame): raised = False try: # Will be interrupted by a KeyboardInterrupt. - next(db.foo.find({"$where": where})) + next(db.foo.find({"$where": where})) # type: ignore[call-overload] except KeyboardInterrupt: raised = True @@ -1234,7 +1475,7 @@ def sigalarm(num, frame): # Raises AssertionError due to PYTHON-294 -- Mongo's response to # the previous find() is still waiting to be read on the socket, # so the request id's don't match. - self.assertEqual({"_id": 1}, next(db.foo.find())) + self.assertEqual({"_id": 1}, next(db.foo.find())) # type: ignore[call-overload] finally: if old_signal_handler: signal.signal(signal.SIGALRM, old_signal_handler) @@ -1244,20 +1485,20 @@ def test_operation_failure(self): # response to getLastError. PYTHON-395. We need a new client here # to avoid race conditions caused by replica set failover or idle # socket reaping. - client = single_client() - self.addCleanup(client.close) + client = self.single_client() client.pymongo_test.test.find_one() pool = get_pool(client) - socket_count = len(pool.sockets) + socket_count = len(pool.conns) self.assertGreaterEqual(socket_count, 1) - old_sock_info = next(iter(pool.sockets)) + old_conn = next(iter(pool.conns)) client.pymongo_test.test.drop() client.pymongo_test.test.insert_one({"_id": "foo"}) - self.assertRaises(OperationFailure, client.pymongo_test.test.insert_one, {"_id": "foo"}) + with self.assertRaises(OperationFailure): + client.pymongo_test.test.insert_one({"_id": "foo"}) - self.assertEqual(socket_count, len(pool.sockets)) - new_sock_info = next(iter(pool.sockets)) - self.assertEqual(old_sock_info, new_sock_info) + self.assertEqual(socket_count, len(pool.conns)) + new_con = next(iter(pool.conns)) + self.assertEqual(old_conn, new_con) def test_lazy_connect_w0(self): # Ensure that connect-on-demand works when the first operation is @@ -1268,33 +1509,35 @@ def test_lazy_connect_w0(self): client_context.client.drop_database("test_lazy_connect_w0") self.addCleanup(client_context.client.drop_database, "test_lazy_connect_w0") - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.insert_one({}) - wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 1, "find one document" - ) - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + def predicate(): + return client.test_lazy_connect_w0.test.count_documents({}) == 1 + + wait_until(predicate, "find one document") + + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.update_one({}, {"$set": {"x": 1}}) - wait_until( - lambda: client.test_lazy_connect_w0.test.find_one().get("x") == 1, "update one document" - ) - client = rs_or_single_client(connect=False, w=0) - self.addCleanup(client.close) + def predicate(): + return (client.test_lazy_connect_w0.test.find_one()).get("x") == 1 + + wait_until(predicate, "update one document") + + client = self.rs_or_single_client(connect=False, w=0) client.test_lazy_connect_w0.test.delete_one({}) - wait_until( - lambda: client.test_lazy_connect_w0.test.count_documents({}) == 0, "delete one document" - ) + + def predicate(): + return client.test_lazy_connect_w0.test.count_documents({}) == 0 + + wait_until(predicate, "delete one document") @client_context.require_no_mongos def test_exhaust_network_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = rs_or_single_client(maxPoolSize=1, retryReads=False) - self.addCleanup(client.close) + client = self.rs_or_single_client(maxPoolSize=1, retryReads=False) collection = client.pymongo_test.test pool = get_pool(client) pool._check_interval_seconds = None # Never check. @@ -1303,13 +1546,13 @@ def test_exhaust_network_error(self): connected(client) # Cause a network error. - sock_info = one(pool.sockets) - sock_info.sock.close() + conn = one(pool.conns) + conn.conn.close() cursor = collection.find(cursor_type=CursorType.EXHAUST) with self.assertRaises(ConnectionFailure): next(cursor) - self.assertTrue(sock_info.closed) + self.assertTrue(conn.closed) # The semaphore was decremented despite the error. self.assertEqual(0, pool.requests) @@ -1320,24 +1563,26 @@ def test_auth_network_error(self): # when authenticating a new socket with cached credentials. # Get a client with one socket so we detect if it's leaked. - c = connected(rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False)) + c = connected( + self.rs_or_single_client(maxPoolSize=1, waitQueueTimeoutMS=1, retryReads=False) + ) # Cause a network error on the actual socket. pool = get_pool(c) - socket_info = one(pool.sockets) - socket_info.sock.close() + conn = one(pool.conns) + conn.conn.close() - # SocketInfo.authenticate logs, but gets a socket.error. Should be + # Connection.authenticate logs, but gets a socket.error. Should be # reraised as AutoReconnect. - self.assertRaises(AutoReconnect, c.test.collection.find_one) + with self.assertRaises(AutoReconnect): + c.test.collection.find_one() # No semaphore leak, the pool is allowed to make a new socket. c.test.collection.find_one() @client_context.require_no_replica_set def test_connect_to_standalone_using_replica_set_name(self): - client = single_client(replicaSet="anything", serverSelectionTimeoutMS=100) - + client = self.single_client(replicaSet="anything", serverSelectionTimeoutMS=100) with self.assertRaises(AutoReconnect): client.test.test.find_one() @@ -1347,7 +1592,7 @@ def test_stale_getmore(self): # the topology before the getMore message is sent. Test that # MongoClient._run_operation_with_response handles the error. with self.assertRaises(AutoReconnect): - client = rs_client(connect=False, serverSelectionTimeoutMS=100) + client = self.rs_client(connect=False, serverSelectionTimeoutMS=100) client._run_operation( operation=message._GetMore( "pymongo_test", @@ -1395,7 +1640,7 @@ def init(self, *args): client_context.host, client_context.port, ) - client = single_client(uri, event_listeners=[listener]) + self.single_client(uri, event_listeners=[listener]) wait_until( lambda: len(listener.results) >= 2, "record two ServerHeartbeatStartedEvents" ) @@ -1404,7 +1649,6 @@ def init(self, *args): # closer to 0.5 sec with heartbeatFrequencyMS configured. self.assertAlmostEqual(heartbeat_times[1] - heartbeat_times[0], 0.5, delta=2) - client.close() finally: ServerHeartbeatStartedEvent.__init__ = old_init # type: ignore @@ -1417,35 +1661,35 @@ def test_small_heartbeat_frequency_ms(self): def test_compression(self): def compression_settings(client): - pool_options = client._MongoClient__options.pool_options + pool_options = client.options.pool_options return pool_options._compression_settings uri = "mongodb://localhost:27017/?compressors=zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=4" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, 4) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-1" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=foobar,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) @@ -1453,56 +1697,56 @@ def compression_settings(client): # According to the connection string spec, unsupported values # just raise a warning and are ignored. uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=10" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) uri = "mongodb://localhost:27017/?compressors=zlib&zlibCompressionLevel=-2" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zlib"]) self.assertEqual(opts.zlib_compression_level, -1) - if not _HAVE_SNAPPY: + if not _have_snappy(): uri = "mongodb://localhost:27017/?compressors=snappy" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=snappy" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy"]) uri = "mongodb://localhost:27017/?compressors=snappy,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["snappy", "zlib"]) - if not _HAVE_ZSTD: + if not _have_zstd(): uri = "mongodb://localhost:27017/?compressors=zstd" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, []) else: uri = "mongodb://localhost:27017/?compressors=zstd" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd"]) uri = "mongodb://localhost:27017/?compressors=zstd,zlib" - client = MongoClient(uri, connect=False) + client = self.simple_client(uri, connect=False) opts = compression_settings(client) self.assertEqual(opts.compressors, ["zstd", "zlib"]) options = client_context.default_client_options if "compressors" in options and "zlib" in options["compressors"]: for level in range(-1, 10): - client = single_client(zlibcompressionlevel=level) + client = self.single_client(zlibcompressionlevel=level) # No error client.pymongo_test.test.find_one() + @client_context.require_sync def test_reset_during_update_pool(self): - client = rs_or_single_client(minPoolSize=10) - self.addCleanup(client.close) + client = self.rs_or_single_client(minPoolSize=10) client.admin.command("ping") pool = get_pool(client) generation = pool.gen.get_overall() @@ -1510,20 +1754,23 @@ def test_reset_during_update_pool(self): # Continuously reset the pool. class ResetPoolThread(threading.Thread): def __init__(self, pool): - super(ResetPoolThread, self).__init__() + super().__init__() self.running = True self.pool = pool def stop(self): self.running = False - def run(self): + def _run(self): while self.running: exc = AutoReconnect("mock pool error") ctx = _ErrorContext(exc, 0, pool.gen.get_overall(), False, None) client._topology.handle_error(pool.address, ctx) time.sleep(0.001) + def run(self): + self._run() + t = ResetPoolThread(pool) t.start() @@ -1542,11 +1789,9 @@ def run(self): def test_background_connections_do_not_hold_locks(self): min_pool_size = 10 - client = rs_or_single_client( + client = self.rs_or_single_client( serverSelectionTimeoutMS=3000, minPoolSize=min_pool_size, connect=False ) - self.addCleanup(client.close) - # Create a single connection in the pool. client.admin.command("ping") @@ -1563,7 +1808,7 @@ def stall_connect(*args, **kwargs): self.addCleanup(delattr, pool, "connect") # Wait for the background thread to start creating connections - wait_until(lambda: len(pool.sockets) > 1, "start creating connections") + wait_until(lambda: len(pool.conns) > 1, "start creating connections") # Assert that application operations do not block. for _ in range(10): @@ -1573,24 +1818,45 @@ def stall_connect(*args, **kwargs): # Each ping command should not take more than 2 seconds self.assertLess(total, 2) + def test_background_connections_log_on_error(self): + with self.assertLogs("pymongo.client", level="ERROR") as cm: + client = self.rs_or_single_client(minPoolSize=1) + # Create a single connection in the pool. + client.admin.command("ping") + + # Cause new connections to fail. + pool = get_pool(client) + + def fail_connect(*args, **kwargs): + raise Exception("failed to connect") + + pool.connect = fail_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + pool.reset_without_pause() + + wait_until( + lambda: "failed to connect" in "".join(cm.output), "start creating connections" + ) + self.assertIn("MongoClient background task encountered an error", "".join(cm.output)) + @client_context.require_replica_set def test_direct_connection(self): # direct_connection=True should result in Single topology. - client = rs_or_single_client(directConnection=True) + client = self.rs_or_single_client(directConnection=True) client.admin.command("ping") self.assertEqual(len(client.nodes), 1) self.assertEqual(client._topology_settings.get_topology_type(), TOPOLOGY_TYPE.Single) - client.close() # direct_connection=False should result in RS topology. - client = rs_or_single_client(directConnection=False) + client = self.rs_or_single_client(directConnection=False) client.admin.command("ping") self.assertGreaterEqual(len(client.nodes), 1) self.assertIn( client._topology_settings.get_topology_type(), [TOPOLOGY_TYPE.ReplicaSetNoPrimary, TOPOLOGY_TYPE.ReplicaSetWithPrimary], ) - client.close() # directConnection=True, should error with multiple hosts as a list. with self.assertRaises(ConfigurationError): @@ -1609,36 +1875,36 @@ def server_description_count(): return i gc.collect() - with client_knobs(min_heartbeat_interval=0.003): - client = MongoClient( - "invalid:27017", heartbeatFrequencyMS=3, serverSelectionTimeoutMS=150 + with client_knobs(min_heartbeat_interval=0.002): + client = self.simple_client( + "invalid:27017", heartbeatFrequencyMS=2, serverSelectionTimeoutMS=200 ) initial_count = server_description_count() - self.addCleanup(client.close) with self.assertRaises(ServerSelectionTimeoutError): client.test.test.find_one() gc.collect() final_count = server_description_count() + client.close() # If a bug like PYTHON-2433 is reintroduced then too many # ServerDescriptions will be kept alive and this test will fail: - # AssertionError: 19 != 46 within 15 delta (27 difference) - self.assertAlmostEqual(initial_count, final_count, delta=15) + # AssertionError: 11 != 47 within 20 delta (36 difference) + self.assertAlmostEqual(initial_count, final_count, delta=30) @client_context.require_failCommand_fail_point def test_network_error_message(self): - client = single_client(retryReads=False) - self.addCleanup(client.close) + client = self.single_client(retryReads=False) client.admin.command("ping") # connect with self.fail_point( {"mode": {"times": 1}, "data": {"closeConnection": True, "failCommands": ["find"]}} ): - expected = "%s:%s: " % client.address + assert client.address is not None + expected = "{}:{}: ".format(*(client.address)) with self.assertRaisesRegex(AutoReconnect, expected): client.pymongo_test.test.find_one({}) @unittest.skipIf("PyPy" in sys.version, "PYTHON-2938 could fail on PyPy") def test_process_periodic_tasks(self): - client = rs_or_single_client() + client = self.rs_or_single_client() coll = client.db.collection coll.insert_many([{} for _ in range(5)]) cursor = coll.find(batch_size=2) @@ -1649,55 +1915,58 @@ def test_process_periodic_tasks(self): # Add cursor to kill cursors queue del cursor wait_until( - lambda: client._MongoClient__kill_cursors_queue, + lambda: client._kill_cursors_queue, "waited for cursor to be added to queue", ) client._process_periodic_tasks() # This must not raise or print any exceptions with self.assertRaises(InvalidOperation): coll.insert_many([{} for _ in range(5)]) - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_service_name_from_kwargs(self): client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc", srvServiceName="customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc" "/?srvServiceName=shouldbeoverriden", srvServiceName="customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() client = MongoClient( "mongodb+srv://user:password@test22.test.build.10gen.cc/?srvServiceName=customname", connect=False, ) + client._connect() self.assertEqual(client._topology_settings.srv_service_name, "customname") + client.close() - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS-related tests need dnspython to be installed") def test_srv_max_hosts_kwarg(self): - client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/") + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/") + client._connect() self.assertGreater(len(client.topology_description.server_descriptions()), 1) - client = MongoClient("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client = self.simple_client("mongodb+srv://test1.test.build.10gen.cc/", srvmaxhosts=1) + client._connect() self.assertEqual(len(client.topology_description.server_descriptions()), 1) - client = MongoClient( + client = self.simple_client( "mongodb+srv://test1.test.build.10gen.cc/?srvMaxHosts=1", srvmaxhosts=2 ) + client._connect() self.assertEqual(len(client.topology_description.server_descriptions()), 2) - @unittest.skipIf(_HAVE_DNSPYTHON, "dnspython must not be installed") - def test_srv_no_dnspython_error(self): - with self.assertRaisesRegex(ConfigurationError, 'The "dnspython" module must be'): - MongoClient("mongodb+srv://test1.test.build.10gen.cc/") - @unittest.skipIf( - client_context.load_balancer or client_context.serverless, + client_context.load_balancer, "loadBalanced clients do not run SDAM", ) @unittest.skipIf(sys.platform == "win32", "Windows does not support SIGSTOP") + @client_context.require_sync def test_sigstop_sigcont(self): test_dir = os.path.dirname(os.path.realpath(__file__)) script = os.path.join(test_dir, "sigstop_sigcont.py") @@ -1726,23 +1995,210 @@ def test_sigstop_sigcont(self): self.assertIn("TEST COMPLETED", log_output) self.assertNotIn("ServerHeartbeatFailedEvent", log_output) + def _test_handshake(self, env_vars, expected_env): + with patch.dict("os.environ", env_vars): + metadata = copy.deepcopy(_METADATA) + if has_c(): + metadata["driver"]["name"] = "PyMongo|c" + else: + metadata["driver"]["name"] = "PyMongo" + if expected_env is not None: + metadata["env"] = expected_env + + if "AWS_REGION" not in env_vars: + os.environ["AWS_REGION"] = "" + client = self.rs_or_single_client(serverSelectionTimeoutMS=10000) + client.admin.command("ping") + options = client.options + self.assertEqual(options.pool_options.metadata, metadata) + + def test_handshake_01_aws(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_REGION": "us-east-2", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "1024", + }, + {"name": "aws.lambda", "region": "us-east-2", "memory_mb": 1024}, + ) + + def test_handshake_02_azure(self): + self._test_handshake({"FUNCTIONS_WORKER_RUNTIME": "python"}, {"name": "azure.func"}) + + def test_handshake_03_gcp(self): + self._test_handshake( + { + "K_SERVICE": "servicename", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + # Extra case for FUNCTION_NAME. + self._test_handshake( + { + "FUNCTION_NAME": "funcname", + "FUNCTION_MEMORY_MB": "1024", + "FUNCTION_TIMEOUT_SEC": "60", + "FUNCTION_REGION": "us-central1", + }, + {"name": "gcp.func", "region": "us-central1", "memory_mb": 1024, "timeout_sec": 60}, + ) + + def test_handshake_04_vercel(self): + self._test_handshake( + {"VERCEL": "1", "VERCEL_REGION": "cdg1"}, {"name": "vercel", "region": "cdg1"} + ) + + def test_handshake_05_multiple(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "FUNCTIONS_WORKER_RUNTIME": "python"}, + None, + ) + # Extra cases for other combos. + self._test_handshake( + {"FUNCTIONS_WORKER_RUNTIME": "python", "K_SERVICE": "servicename"}, + None, + ) + self._test_handshake({"K_SERVICE": "servicename", "VERCEL": "1"}, None) + + def test_handshake_06_region_too_long(self): + self._test_handshake( + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", "AWS_REGION": "a" * 512}, + {"name": "aws.lambda"}, + ) + + def test_handshake_07_memory_invalid_int(self): + self._test_handshake( + { + "AWS_EXECUTION_ENV": "AWS_Lambda_python3.10", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "big", + }, + {"name": "aws.lambda"}, + ) + + def test_handshake_08_invalid_aws_ec2(self): + # AWS_EXECUTION_ENV needs to start with "AWS_Lambda_". + self._test_handshake( + {"AWS_EXECUTION_ENV": "EC2"}, + None, + ) + + def test_handshake_09_container_with_provider(self): + self._test_handshake( + { + ENV_VAR_K8S: "1", + "AWS_LAMBDA_RUNTIME_API": "1", + "AWS_REGION": "us-east-1", + "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": "256", + }, + { + "container": {"orchestrator": "kubernetes"}, + "name": "aws.lambda", + "region": "us-east-1", + "memory_mb": 256, + }, + ) + + def test_dict_hints(self): + self.db.t.find(hint={"x": 1}) + + def test_dict_hints_sort(self): + result = self.db.t.find() + result.sort({"x": 1}) + + self.db.t.find(sort={"x": 1}) + + def test_dict_hints_create_index(self): + self.db.t.create_index({"x": pymongo.ASCENDING}) + + def test_legacy_java_uuid_roundtrip(self): + data = BinaryData.java_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, JAVA_LEGACY)) + + client_context.client.pymongo_test.drop_collection("java_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=JAVA_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("java_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("java_uuid") + + def test_legacy_csharp_uuid_roundtrip(self): + data = BinaryData.csharp_data + docs = bson.decode_all(data, CodecOptions(SON[str, Any], False, CSHARP_LEGACY)) + + client_context.client.pymongo_test.drop_collection("csharp_uuid") + db = client_context.client.pymongo_test + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=CSHARP_LEGACY)) + + coll.insert_many(docs) + self.assertEqual(5, coll.count_documents({})) + for d in coll.find(): + self.assertEqual(d["newguid"], uuid.UUID(d["newguidstring"])) + + coll = db.get_collection("csharp_uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) + for d in coll.find(): + self.assertNotEqual(d["newguid"], d["newguidstring"]) + client_context.client.pymongo_test.drop_collection("csharp_uuid") + + def test_uri_to_uuid(self): + uri = "mongodb://foo/?uuidrepresentation=csharpLegacy" + client = self.single_client(uri, connect=False) + self.assertEqual(client.pymongo_test.test.codec_options.uuid_representation, CSHARP_LEGACY) + + def test_uuid_queries(self): + db = client_context.client.pymongo_test + coll = db.test + coll.drop() + + uu = uuid.uuid4() + coll.insert_one({"uuid": Binary(uu.bytes, 3)}) + self.assertEqual(1, coll.count_documents({})) + + # Test regular UUID queries (using subtype 4). + coll = db.get_collection( + "test", CodecOptions(uuid_representation=UuidRepresentation.STANDARD) + ) + self.assertEqual(0, coll.count_documents({"uuid": uu})) + coll.insert_one({"uuid": uu}) + self.assertEqual(2, coll.count_documents({})) + docs = coll.find({"uuid": uu}).to_list() + self.assertEqual(1, len(docs)) + self.assertEqual(uu, docs[0]["uuid"]) + + # Test both. + uu_legacy = Binary.from_uuid(uu, UuidRepresentation.PYTHON_LEGACY) + predicate = {"uuid": {"$in": [uu, uu_legacy]}} + self.assertEqual(2, coll.count_documents(predicate)) + docs = coll.find(predicate).to_list() + self.assertEqual(2, len(docs)) + coll.drop() + class TestExhaustCursor(IntegrationTest): """Test that clients properly handle errors from exhaust cursors.""" def setUp(self): - super(TestExhaustCursor, self).setUp() + super().setUp() if client_context.is_mongos: raise SkipTest("mongos doesn't support exhaust, SERVER-2627") def test_exhaust_query_server_error(self): # When doing an exhaust query, the socket stays checked out on success # but must be checked in on error to avoid semaphore leaks. - client = connected(rs_or_single_client(maxPoolSize=1)) + client = connected(self.rs_or_single_client(maxPoolSize=1)) collection = client.pymongo_test.test pool = get_pool(client) - sock_info = one(pool.sockets) + conn = one(pool.conns) # This will cause OperationFailure in all mongo versions since # the value for $orderby must be a document. @@ -1750,17 +2206,18 @@ def test_exhaust_query_server_error(self): SON([("$query", {}), ("$orderby", True)]), cursor_type=CursorType.EXHAUST ) - self.assertRaises(OperationFailure, cursor.next) - self.assertFalse(sock_info.closed) + with self.assertRaises(OperationFailure): + cursor.next() + self.assertFalse(conn.closed) # The socket was checked in and the semaphore was decremented. - self.assertIn(sock_info, pool.sockets) + self.assertIn(conn, pool.conns) self.assertEqual(0, pool.requests) def test_exhaust_getmore_server_error(self): # When doing a getmore on an exhaust cursor, the socket stays checked # out on success but it's checked in on error to avoid semaphore leaks. - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) collection = client.pymongo_test.test collection.drop() @@ -1769,7 +2226,7 @@ def test_exhaust_getmore_server_error(self): pool = get_pool(client) pool._check_interval_seconds = None # Never check. - sock_info = one(pool.sockets) + conn = one(pool.conns) cursor = collection.find(cursor_type=CursorType.EXHAUST) @@ -1779,46 +2236,48 @@ def test_exhaust_getmore_server_error(self): # Cause a server error on getmore. def receive_message(request_id): # Discard the actual server response. - SocketInfo.receive_message(sock_info, request_id) + Connection.receive_message(conn, request_id) # responseFlags bit 1 is QueryFailure. msg = struct.pack("= _NAMESPACE_DOC_BYTES: + num_models += 1 + b_repeated = "b" * (remainder_bytes - _OPERATION_DOC_BYTES) + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + return num_models, models + + @client_context.require_version_min(8, 0, 0, -24) + def test_11_no_batch_splits_if_new_namespace_is_not_too_large(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + num_models, models = self._setup_namespace_test_models() + models.append( + InsertOne( + namespace="db.coll", + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + + # No batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 1) + event = bulk_write_events[0] + + self.assertEqual(len(event.command["ops"]), num_models + 1) + self.assertEqual(len(event.command["nsInfo"]), 1) + self.assertEqual(event.command["nsInfo"][0]["ns"], "db.coll") + + @client_context.require_version_min(8, 0, 0, -24) + def test_11_batch_splits_if_new_namespace_is_too_large(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + num_models, models = self._setup_namespace_test_models() + c_repeated = "c" * 200 + namespace = f"db.{c_repeated}" + models.append( + InsertOne( + namespace=namespace, + document={"a": "b"}, + ) + ) + self.addCleanup(client.db["coll"].drop) + self.addCleanup(client.db[c_repeated].drop) + + # Batch splitting required. + result = client.bulk_write(models=models) + self.assertEqual(result.inserted_count, num_models + 1) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + + self.assertEqual(len(bulk_write_events), 2) + first_event, second_event = bulk_write_events + + self.assertEqual(len(first_event.command["ops"]), num_models) + self.assertEqual(len(first_event.command["nsInfo"]), 1) + self.assertEqual(first_event.command["nsInfo"][0]["ns"], "db.coll") + + self.assertEqual(len(second_event.command["ops"]), 1) + self.assertEqual(len(second_event.command["nsInfo"]), 1) + self.assertEqual(second_event.command["nsInfo"][0]["ns"], namespace) + + @client_context.require_version_min(8, 0, 0, -24) + def test_12_returns_error_if_no_writes_can_be_added_to_ops(self): + client = self.rs_or_single_client() + + # Document too large. + b_repeated = "b" * self.max_message_size_bytes + models = [InsertOne(namespace="db.coll", document={"a": b_repeated})] + with self.assertRaises(DocumentTooLarge) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + # Namespace too large. + c_repeated = "c" * self.max_message_size_bytes + namespace = f"db.{c_repeated}" + models = [InsertOne(namespace=namespace, document={"a": "b"})] + with self.assertRaises(DocumentTooLarge) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + + @client_context.require_version_min(8, 0, 0, -24) + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + def test_13_returns_error_if_auto_encryption_configured(self): + opts = AutoEncryptionOpts( + key_vault_namespace="db.coll", + kms_providers={"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}}, + ) + client = self.rs_or_single_client(auto_encryption_opts=opts) + + models = [InsertOne(namespace="db.coll", document={"a": "b"})] + with self.assertRaises(InvalidOperation) as context: + client.bulk_write(models=models) + self.assertIsNone(context.exception.partial_result) + self.assertIn( + "bulk_write does not currently support automatic encryption", context.exception._message + ) + + @client_context.require_version_min(8, 0, 0, -24) + def test_upserted_result(self): + client = self.rs_or_single_client() + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + models = [] + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": "a"}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + upsert=True, + ) + ) + models.append( + UpdateOne( + namespace="db.coll", + filter={"_id": None}, + update={"$set": {"x": 1}}, + ) + ) + result = client.bulk_write(models=models, verbose_results=True) + + self.assertEqual(result.upserted_count, 2) + self.assertEqual(result.update_results[0].did_upsert, True) + self.assertEqual(result.update_results[1].did_upsert, True) + self.assertEqual(result.update_results[2].did_upsert, False) + + # Note: test 14 is optional and intentionally not implemented because we provide multiple APIs to specify explain. + + @client_context.require_version_min(8, 0, 0, -24) + def test_15_unacknowledged_write_across_batches(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + + collection = client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + client.db.command({"create": "db.coll"}) + + b_repeated = "b" * (self.max_bson_object_size - 500) + models = [ + InsertOne(namespace="db.coll", document={"a": b_repeated}) + for _ in range(int(self.max_message_size_bytes / self.max_bson_object_size) + 1) + ] + + listener.reset() + + res = client.bulk_write(models, ordered=False, write_concern=WriteConcern(w=0)) + self.assertEqual(False, res.acknowledged) + + events = listener.started_events + self.assertEqual(2, len(events)) + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size), + len(events[0].command["ops"]), + ) + self.assertEqual(1, len(events[1].command["ops"])) + self.assertEqual(events[0].operation_id, events[1].operation_id) + self.assertEqual({"w": 0}, events[0].command["writeConcern"]) + self.assertEqual({"w": 0}, events[1].command["writeConcern"]) + + self.assertEqual( + int(self.max_message_size_bytes / self.max_bson_object_size) + 1, + collection.count_documents({}), + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#11-multi-batch-bulkwrites +class TestClientBulkWriteCSOT(IntegrationTest): + def setUp(self): + super().setUp() + self.max_write_batch_size = client_context.max_write_batch_size + self.max_bson_object_size = client_context.max_bson_size + self.max_message_size_bytes = client_context.max_message_size_bytes + + @client_context.require_version_min(8, 0, 0, -24) + @client_context.require_failCommand_fail_point + @flaky(reason="PYTHON-5290", max_runs=3, affects_cpython_linux=True) + def test_timeout_in_multi_batch_bulk_write(self): + if sys.platform != "linux" and "CI" in os.environ: + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows and MacOS") + _OVERHEAD = 500 + + internal_client = self.rs_or_single_client(timeoutMS=None) + + collection = internal_client.db["coll"] + self.addCleanup(collection.drop) + collection.drop() + + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["bulkWrite"], "blockConnection": True, "blockTimeMS": 1010}, + } + with self.fail_point(fail_command): + models = [] + num_models = int(self.max_message_size_bytes / self.max_bson_object_size + 1) + b_repeated = "b" * (self.max_bson_object_size - _OVERHEAD) + for _ in range(num_models): + models.append( + InsertOne( + namespace="db.coll", + document={"a": b_repeated}, + ) + ) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + event_listeners=[listener], + readConcernLevel="majority", + readPreference="primary", + timeoutMS=2000, + w="majority", + ) + # Initialize the client with a larger timeout to help make test less flakey + with pymongo.timeout(10): + client.admin.command("ping") + with self.assertRaises(ClientBulkWriteException) as context: + client.bulk_write(models=models) + self.assertIsInstance(context.exception.error, NetworkTimeout) + + bulk_write_events = [] + for event in listener.started_events: + if event.command_name == "bulkWrite": + bulk_write_events.append(event) + self.assertEqual(len(bulk_write_events), 2) diff --git a/test/test_client_context.py b/test/test_client_context.py index 9ee5b96d61..9c1b21ee78 100644 --- a/test/test_client_context.py +++ b/test/test_client_context.py @@ -11,52 +11,46 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import os import sys sys.path[0:0] = [""] -from test import SkipTest, client_context, unittest +from test import SkipTest, UnitTest, client_context, unittest +_IS_SYNC = True -class TestClientContext(unittest.TestCase): + +class TestClientContext(UnitTest): def test_must_connect(self): - if "PYMONGO_MUST_CONNECT" not in os.environ: + if not os.environ.get("PYMONGO_MUST_CONNECT"): raise SkipTest("PYMONGO_MUST_CONNECT is not set") self.assertTrue( client_context.connected, "client context must be connected when " - "PYMONGO_MUST_CONNECT is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), - ) - - def test_serverless(self): - if "TEST_SERVERLESS" not in os.environ: - raise SkipTest("TEST_SERVERLESS is not set") - - self.assertTrue( - client_context.connected and client_context.serverless, - "client context must be connected to serverless when " - "TEST_SERVERLESS is set. Failed attempts:\n%s" - % (client_context.connection_attempt_info(),), + "PYMONGO_MUST_CONNECT is set. Failed attempts:\n{}".format( + client_context.connection_attempt_info() + ), ) def test_enableTestCommands_is_disabled(self): - if "PYMONGO_DISABLE_TEST_COMMANDS" not in os.environ: - raise SkipTest("PYMONGO_DISABLE_TEST_COMMANDS is not set") + if not os.environ.get("DISABLE_TEST_COMMANDS"): + raise SkipTest("DISABLE_TEST_COMMANDS is not set") self.assertFalse( client_context.test_commands_enabled, - "enableTestCommands must be disabled when PYMONGO_DISABLE_TEST_COMMANDS is set.", + "enableTestCommands must be disabled when DISABLE_TEST_COMMANDS is set.", ) - def test_setdefaultencoding_worked(self): - if "SETDEFAULTENCODING" not in os.environ: - raise SkipTest("SETDEFAULTENCODING is not set") + def test_free_threading_is_enabled(self): + if "free-threading build" not in sys.version: + raise SkipTest("this test requires the Python free-threading build") - self.assertEqual(sys.getdefaultencoding(), os.environ["SETDEFAULTENCODING"]) + # If the GIL is enabled then pymongo or one of our deps does not support free-threading. + self.assertFalse(sys._is_gil_enabled()) # type: ignore[attr-defined] if __name__ == "__main__": diff --git a/test/test_client_metadata.py b/test/test_client_metadata.py new file mode 100644 index 0000000000..a94c5aa25e --- /dev/null +++ b/test/test_client_metadata.py @@ -0,0 +1,232 @@ +# Copyright 2013-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import asyncio +import os +import pathlib +import time +import unittest +from test import IntegrationTest +from test.unified_format import generate_test_classes +from test.utils_shared import CMAPListener +from typing import Any, Optional + +import pytest + +from pymongo import MongoClient +from pymongo.driver_info import DriverInfo +from pymongo.monitoring import ConnectionClosedEvent + +try: + from mockupdb import MockupDB, OpMsgReply + + _HAVE_MOCKUPDB = True +except ImportError: + _HAVE_MOCKUPDB = False + +pytestmark = pytest.mark.mockupdb + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "handshake", "unified") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "handshake", "unified" + ) + +# Generate unified tests. +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) + + +def _get_handshake_driver_info(request): + assert "client" in request + return request["client"] + + +class TestClientMetadataProse(IntegrationTest): + def setUp(self): + super().setUp() + self.server = MockupDB() + self.handshake_req = None + + def respond(r): + if "ismaster" in r: + # then this is a handshake request + self.handshake_req = r + return r.reply(OpMsgReply(maxWireVersion=13)) + + self.server.autoresponds(respond) + self.server.run() + self.addCleanup(self.server.stop) + + def send_ping_and_get_metadata( + self, client: MongoClient, is_handshake: bool + ) -> tuple[str, Optional[str], Optional[str], dict[str, Any]]: + # reset if handshake request + if is_handshake: + self.handshake_req: Optional[dict] = None + + client.admin.command("ping") + metadata = _get_handshake_driver_info(self.handshake_req) + driver_metadata = metadata["driver"] + name, version, platform = ( + driver_metadata["name"], + driver_metadata["version"], + metadata["platform"], + ) + return name, version, platform, metadata + + def check_metadata_added( + self, + client: MongoClient, + add_name: str, + add_version: Optional[str], + add_platform: Optional[str], + ) -> None: + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + # wait for connection to become idle + time.sleep(0.005) + + # add new metadata + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + new_name, new_version, new_platform, new_metadata = self.send_ping_and_get_metadata( + client, True + ) + if add_name is not None and add_name.lower() in name.lower().split("|"): + self.assertEqual(name, new_name) + self.assertEqual(version, new_version) + self.assertEqual(platform, new_platform) + else: + self.assertEqual(new_name, f"{name}|{add_name}" if add_name is not None else name) + self.assertEqual( + new_version, + f"{version}|{add_version}" if add_version is not None else version, + ) + self.assertEqual( + new_platform, + f"{platform}|{add_platform}" if add_platform is not None else platform, + ) + + metadata.pop("driver") + metadata.pop("platform") + new_metadata.pop("driver") + new_metadata.pop("platform") + self.assertEqual(metadata, new_metadata) + + def test_append_metadata(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_append_metadata_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_append_metadata_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_append_metadata_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + ) + self.check_metadata_added(client, "framework", None, None) + + def test_multiple_successive_metadata_updates(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, maxIdleTimeMS=1, connect=False + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", "2.0", None) + + def test_multiple_successive_metadata_updates_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, "Framework Platform") + + def test_multiple_successive_metadata_updates_platform_version_none(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + + def test_doesnt_update_established_connections(self): + listener = CMAPListener() + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + driver=DriverInfo("library", "1.2", "Library Platform"), + event_listeners=[listener], + ) + + # send initial metadata + name, version, platform, metadata = self.send_ping_and_get_metadata(client, True) + self.assertIsNotNone(name) + self.assertIsNotNone(version) + self.assertIsNotNone(platform) + + # add data + add_name, add_version, add_platform = "framework", "2.0", "Framework Platform" + client.append_metadata(DriverInfo(add_name, add_version, add_platform)) + # check new data isn't sent + self.handshake_req: Optional[dict] = None + client.admin.command("ping") + self.assertIsNone(self.handshake_req) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 0) + + def test_duplicate_driver_name_no_op(self): + client = self.rs_or_single_client( + "mongodb://" + self.server.address_string, + maxIdleTimeMS=1, + ) + client.append_metadata(DriverInfo("library", "1.2", "Library Platform")) + self.check_metadata_added(client, "framework", None, None) + # wait for connection to become idle + time.sleep(0.005) + # add same metadata again + self.check_metadata_added(client, "Framework", None, None) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_cmap.py b/test/test_cmap.py deleted file mode 100644 index 360edef0e8..0000000000 --- a/test/test_cmap.py +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright 2019-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Execute Transactions Spec tests.""" - -import os -import sys -import time - -sys.path[0:0] = [""] - -from test import IntegrationTest, client_knobs, unittest -from test.pymongo_mocks import DummyMonitor -from test.utils import ( - CMAPListener, - TestCreator, - camel_to_snake, - client_context, - get_pool, - get_pools, - rs_or_single_client, - single_client, - single_client_noauth, - wait_until, -) -from test.utils_spec_runner import SpecRunnerThread - -from bson.objectid import ObjectId -from bson.son import SON -from pymongo.errors import ( - ConnectionFailure, - OperationFailure, - PyMongoError, - WaitQueueTimeoutError, -) -from pymongo.monitoring import ( - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutFailedReason, - ConnectionCheckOutStartedEvent, - ConnectionClosedEvent, - ConnectionClosedReason, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, -) -from pymongo.pool import PoolState, _PoolClosedError -from pymongo.read_preferences import ReadPreference -from pymongo.topology_description import updated_topology_description - -OBJECT_TYPES = { - # Event types. - "ConnectionCheckedIn": ConnectionCheckedInEvent, - "ConnectionCheckedOut": ConnectionCheckedOutEvent, - "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, - "ConnectionClosed": ConnectionClosedEvent, - "ConnectionCreated": ConnectionCreatedEvent, - "ConnectionReady": ConnectionReadyEvent, - "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, - "ConnectionPoolCreated": PoolCreatedEvent, - "ConnectionPoolReady": PoolReadyEvent, - "ConnectionPoolCleared": PoolClearedEvent, - "ConnectionPoolClosed": PoolClosedEvent, - # Error types. - "PoolClosedError": _PoolClosedError, - "WaitQueueTimeoutError": WaitQueueTimeoutError, -} - - -class TestCMAP(IntegrationTest): - # Location of JSON test specifications. - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cmap") - - # Test operations: - - def start(self, op): - """Run the 'start' thread operation.""" - target = op["target"] - thread = SpecRunnerThread(target) - thread.start() - self.targets[target] = thread - - def wait(self, op): - """Run the 'wait' operation.""" - time.sleep(op["ms"] / 1000.0) - - def wait_for_thread(self, op): - """Run the 'waitForThread' operation.""" - target = op["target"] - thread = self.targets[target] - thread.stop() - thread.join() - if thread.exc: - raise thread.exc - self.assertFalse(thread.ops) - - def wait_for_event(self, op): - """Run the 'waitForEvent' operation.""" - event = OBJECT_TYPES[op["event"]] - count = op["count"] - timeout = op.get("timeout", 10000) / 1000.0 - wait_until( - lambda: self.listener.event_count(event) >= count, - "find %s %s event(s)" % (count, event), - timeout=timeout, - ) - - def check_out(self, op): - """Run the 'checkOut' operation.""" - label = op["label"] - with self.pool.get_socket() as sock_info: - # Call 'pin_cursor' so we can hold the socket. - sock_info.pin_cursor() - if label: - self.labels[label] = sock_info - else: - self.addCleanup(sock_info.close_socket, None) - - def check_in(self, op): - """Run the 'checkIn' operation.""" - label = op["connection"] - sock_info = self.labels[label] - self.pool.return_socket(sock_info) - - def ready(self, op): - """Run the 'ready' operation.""" - self.pool.ready() - - def clear(self, op): - """Run the 'clear' operation.""" - self.pool.reset() - - def close(self, op): - """Run the 'close' operation.""" - self.pool.close() - - def run_operation(self, op): - """Run a single operation in a test.""" - op_name = camel_to_snake(op["name"]) - thread = op["thread"] - meth = getattr(self, op_name) - if thread: - self.targets[thread].schedule(lambda: meth(op)) - else: - meth(op) - - def run_operations(self, ops): - """Run a test's operations.""" - for op in ops: - self._ops.append(op) - self.run_operation(op) - - def check_object(self, actual, expected): - """Assert that the actual object matches the expected object.""" - self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) - for attr, expected_val in expected.items(): - if attr == "type": - continue - c2s = camel_to_snake(attr) - actual_val = getattr(actual, c2s) - if expected_val == 42: - self.assertIsNotNone(actual_val) - else: - self.assertEqual(actual_val, expected_val) - - def check_event(self, actual, expected): - """Assert that the actual event matches the expected event.""" - self.check_object(actual, expected) - - def actual_events(self, ignore): - """Return all the non-ignored events.""" - ignore = tuple(OBJECT_TYPES[name] for name in ignore) - return [event for event in self.listener.events if not isinstance(event, ignore)] - - def check_events(self, events, ignore): - """Check the events of a test.""" - actual_events = self.actual_events(ignore) - for actual, expected in zip(actual_events, events): - self.logs.append("Checking event actual: %r vs expected: %r" % (actual, expected)) - self.check_event(actual, expected) - - if len(events) > len(actual_events): - self.fail("missing events: %r" % (events[len(actual_events) :],)) - - def check_error(self, actual, expected): - message = expected.pop("message") - self.check_object(actual, expected) - self.assertIn(message, str(actual)) - - def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - client.admin.command(cmd) - - def set_fail_point(self, command_args): - if not client_context.supports_failCommand_fail_point: - self.skipTest("failCommand fail point must be supported") - self._set_fail_point(self.client, command_args) - - def run_scenario(self, scenario_def, test): - """Run a CMAP spec test.""" - self.logs: list = [] - self.assertEqual(scenario_def["version"], 1) - self.assertIn(scenario_def["style"], ["unit", "integration"]) - self.listener = CMAPListener() - self._ops: list = [] - - # Configure the fail point before creating the client. - if "failPoint" in test: - fp = test["failPoint"] - self.set_fail_point(fp) - self.addCleanup( - self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} - ) - - opts = test["poolOptions"].copy() - opts["event_listeners"] = [self.listener] - opts["_monitor_class"] = DummyMonitor - opts["connect"] = False - # Support backgroundThreadIntervalMS, default to 50ms. - interval = opts.pop("backgroundThreadIntervalMS", 50) - if interval < 0: - kill_cursor_frequency = 99999999 - else: - kill_cursor_frequency = interval / 1000.0 - with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): - client = single_client(**opts) - # Update the SD to a known type because the DummyMonitor will not. - # Note we cannot simply call topology.on_change because that would - # internally call pool.ready() which introduces unexpected - # PoolReadyEvents. Instead, update the initial state before - # opening the Topology. - td = client_context.client._topology.description - sd = td.server_descriptions()[(client_context.host, client_context.port)] - client._topology._description = updated_topology_description( - client._topology._description, sd - ) - # When backgroundThreadIntervalMS is negative we do not start the - # background thread to ensure it never runs. - if interval < 0: - client._topology.open() - else: - client._get_topology() - self.addCleanup(client.close) - self.pool = list(client._topology._servers.values())[0].pool - - # Map of target names to Thread objects. - self.targets: dict = dict() - # Map of label names to Connection objects - self.labels: dict = dict() - - def cleanup(): - for t in self.targets.values(): - t.stop() - for t in self.targets.values(): - t.join(5) - for conn in self.labels.values(): - conn.close_socket(None) - - self.addCleanup(cleanup) - - try: - if test["error"]: - with self.assertRaises(PyMongoError) as ctx: - self.run_operations(test["operations"]) - self.check_error(ctx.exception, test["error"]) - else: - self.run_operations(test["operations"]) - - self.check_events(test["events"], test["ignore"]) - except Exception: - # Print the events after a test failure. - print("\nFailed test: %r" % (test["description"],)) - print("Operations:") - for op in self._ops: - print(op) - print("Threads:") - print(self.targets) - print("Connections:") - print(self.labels) - print("Events:") - for event in self.listener.events: - print(event) - print("Log:") - for log in self.logs: - print(log) - raise - - POOL_OPTIONS = { - "maxPoolSize": 50, - "minPoolSize": 1, - "maxIdleTimeMS": 10000, - "waitQueueTimeoutMS": 10000, - } - - # - # Prose tests. Numbers correspond to the prose test number in the spec. - # - def test_1_client_connection_pool_options(self): - client = rs_or_single_client(**self.POOL_OPTIONS) - self.addCleanup(client.close) - pool_opts = get_pool(client).opts - self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) - - def test_2_all_client_pools_have_same_options(self): - client = rs_or_single_client(**self.POOL_OPTIONS) - self.addCleanup(client.close) - client.admin.command("ping") - # Discover at least one secondary. - if client_context.has_secondaries: - client.admin.command("ping", read_preference=ReadPreference.SECONDARY) - pools = get_pools(client) - pool_opts = pools[0].opts - - self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) - for pool in pools[1:]: - self.assertEqual(pool.opts, pool_opts) - - def test_3_uri_connection_pool_options(self): - opts = "&".join(["%s=%s" % (k, v) for k, v in self.POOL_OPTIONS.items()]) - uri = "mongodb://%s/?%s" % (client_context.pair, opts) - client = rs_or_single_client(uri) - self.addCleanup(client.close) - pool_opts = get_pool(client).opts - self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) - - def test_4_subscribe_to_events(self): - listener = CMAPListener() - client = single_client(event_listeners=[listener]) - self.addCleanup(client.close) - self.assertEqual(listener.event_count(PoolCreatedEvent), 1) - - # Creates a new connection. - client.admin.command("ping") - self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) - self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) - self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) - self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) - self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) - - # Uses the existing connection. - client.admin.command("ping") - self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) - self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) - self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) - - client.close() - self.assertEqual(listener.event_count(PoolClearedEvent), 1) - self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) - - def test_5_check_out_fails_connection_error(self): - listener = CMAPListener() - client = single_client(event_listeners=[listener]) - self.addCleanup(client.close) - pool = get_pool(client) - - def mock_connect(*args, **kwargs): - raise ConnectionFailure("connect failed") - - pool.connect = mock_connect - # Un-patch Pool.connect to break the cyclic reference. - self.addCleanup(delattr, pool, "connect") - - # Attempt to create a new connection. - with self.assertRaisesRegex(ConnectionFailure, "connect failed"): - client.admin.command("ping") - - self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) - self.assertIsInstance(listener.events[4], PoolClearedEvent) - - failed_event = listener.events[3] - self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) - - def test_5_check_out_fails_auth_error(self): - listener = CMAPListener() - client = single_client_noauth( - username="notauser", password="fail", event_listeners=[listener] - ) - self.addCleanup(client.close) - - # Attempt to create a new connection. - with self.assertRaisesRegex(OperationFailure, "failed"): - client.admin.command("ping") - - self.assertIsInstance(listener.events[0], PoolCreatedEvent) - self.assertIsInstance(listener.events[1], PoolReadyEvent) - self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) - self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) - # Error happens here. - self.assertIsInstance(listener.events[4], ConnectionClosedEvent) - self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) - self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) - - # - # Extra non-spec tests - # - def assertRepr(self, obj): - new_obj = eval(repr(obj)) - self.assertEqual(type(new_obj), type(obj)) - self.assertEqual(repr(new_obj), repr(obj)) - - def test_events_repr(self): - host = ("localhost", 27017) - self.assertRepr(ConnectionCheckedInEvent(host, 1)) - self.assertRepr(ConnectionCheckedOutEvent(host, 1)) - self.assertRepr( - ConnectionCheckOutFailedEvent(host, ConnectionCheckOutFailedReason.POOL_CLOSED) - ) - self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) - self.assertRepr(ConnectionCreatedEvent(host, 1)) - self.assertRepr(ConnectionReadyEvent(host, 1)) - self.assertRepr(ConnectionCheckOutStartedEvent(host)) - self.assertRepr(PoolCreatedEvent(host, {})) - self.assertRepr(PoolClearedEvent(host)) - self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) - self.assertRepr(PoolClosedEvent(host)) - - def test_close_leaves_pool_unpaused(self): - # Needed until we implement PYTHON-2463. This test is related to - # test_threads.TestThreads.test_client_disconnect - listener = CMAPListener() - client = single_client(event_listeners=[listener]) - client.admin.command("ping") - pool = get_pool(client) - client.close() - self.assertEqual(1, listener.event_count(PoolClearedEvent)) - self.assertEqual(PoolState.READY, pool.state) - # Checking out a connection should succeed - with pool.get_socket(): - pass - - -def create_test(scenario_def, test, name): - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -class CMAPTestCreator(TestCreator): - def tests(self, scenario_def): - """Extract the tests from a spec file. - - CMAP tests do not have a 'tests' field. The whole file represents - a single test case. - """ - return [scenario_def] - - -test_creator = CMAPTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) -test_creator.create_tests() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_code.py b/test/test_code.py index 9ff305e39a..23f0af5cef 100644 --- a/test/test_code.py +++ b/test/test_code.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -15,6 +14,7 @@ # limitations under the License. """Tests for the Code wrapper.""" +from __future__ import annotations import sys @@ -46,8 +46,8 @@ def test_code(self): a_code = Code("hello world") self.assertTrue(a_code.startswith("hello")) self.assertTrue(a_code.endswith("world")) - self.assertTrue(isinstance(a_code, Code)) - self.assertFalse(isinstance(a_string, Code)) + self.assertIsInstance(a_code, Code) + self.assertNotIsInstance(a_string, Code) self.assertIsNone(a_code.scope) with_scope = Code("hello world", {"my_var": 5}) self.assertEqual({"my_var": 5}, with_scope.scope) @@ -67,7 +67,7 @@ def test_repr(self): c = Code("hello world", {"blah": 3}) self.assertEqual(repr(c), "Code('hello world', {'blah': 3})") c = Code("\x08\xFF") - self.assertEqual(repr(c), "Code(%s, None)" % (repr("\x08\xFF"),)) + self.assertEqual(repr(c), "Code({}, None)".format(repr("\x08\xFF"))) def test_equality(self): b = Code("hello") diff --git a/test/test_collation.py b/test/test_collation.py index d8410a9de4..903f24a228 100644 --- a/test/test_collation.py +++ b/test/test_collation.py @@ -13,11 +13,12 @@ # limitations under the License. """Test the collation module.""" +from __future__ import annotations import functools import warnings from test import IntegrationTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client +from test.utils_shared import EventListener, OvertCommandListener from typing import Any from pymongo.collation import ( @@ -38,6 +39,8 @@ ) from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestCollationObject(unittest.TestCase): def test_constructor(self): @@ -93,31 +96,24 @@ class TestCollation(IntegrationTest): warn_context: Any collation: Collation - @classmethod @client_context.require_connection - def setUpClass(cls): - super(TestCollation, cls).setUpClass() - cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test - cls.collation = Collation("en_US") - cls.warn_context = warnings.catch_warnings() - cls.warn_context.__enter__() - warnings.simplefilter("ignore", DeprecationWarning) - - @classmethod - def tearDownClass(cls): - cls.warn_context.__exit__() - cls.warn_context = None - cls.client.close() - super(TestCollation, cls).tearDownClass() - - def tearDown(self): - self.listener.results.clear() - super(TestCollation, self).tearDown() + def setUp(self) -> None: + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test + self.collation = Collation("en_US") + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + + def tearDown(self) -> None: + self.warn_context.__exit__() + self.warn_context = None + self.listener.reset() + super().tearDown() def last_command_started(self): - return self.listener.results["started"][-1].command + return self.listener.started_events[-1].command def assertCollationInLastCommand(self): self.assertEqual(self.collation.document, self.last_command_started()["collation"]) @@ -129,7 +125,7 @@ def test_create_collection(self): # Test passing collation as a dict as well. self.db.test.drop() - self.listener.results.clear() + self.listener.reset() self.db.create_collection("test", collation=self.collation.document) self.assertCollationInLastCommand() @@ -139,7 +135,7 @@ def test_index_model(self): def test_create_index(self): self.db.test.create_index("foo", collation=self.collation) - ci_cmd = self.listener.results["started"][0].command + ci_cmd = self.listener.started_events[0].command self.assertEqual(self.collation.document, ci_cmd["indexes"][0]["collation"]) def test_aggregate(self): @@ -154,18 +150,18 @@ def test_distinct(self): self.db.test.distinct("foo", collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).distinct("foo") self.assertCollationInLastCommand() def test_find_command(self): self.db.test.insert_one({"is this thing on?": True}) - self.listener.results.clear() + self.listener.reset() next(self.db.test.find(collation=self.collation)) self.assertCollationInLastCommand() def test_explain_command(self): - self.listener.results.clear() + self.listener.reset() self.db.test.find(collation=self.collation).explain() # The collation should be part of the explained command. self.assertEqual( @@ -174,40 +170,40 @@ def test_explain_command(self): def test_delete(self): self.db.test.delete_one({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.delete_many({"foo": 42}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["deletes"][0]["collation"]) def test_update(self): self.db.test.replace_one({"foo": 42}, {"foo": 43}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_one({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) - self.listener.results.clear() + self.listener.reset() self.db.test.update_many({"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation) - command = self.listener.results["started"][0].command + command = self.listener.started_events[0].command self.assertEqual(self.collation.document, command["updates"][0]["collation"]) def test_find_and(self): self.db.test.find_one_and_delete({"foo": 42}, collation=self.collation) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_update( {"foo": 42}, {"$set": {"foo": 43}}, collation=self.collation ) self.assertCollationInLastCommand() - self.listener.results.clear() + self.listener.reset() self.db.test.find_one_and_replace({"foo": 42}, {"foo": 43}, collation=self.collation) self.assertCollationInLastCommand() @@ -229,8 +225,8 @@ def test_bulk_write(self): ] ) - delete_cmd = self.listener.results["started"][0].command - update_cmd = self.listener.results["started"][1].command + delete_cmd = self.listener.started_events[0].command + update_cmd = self.listener.started_events[1].command def check_ops(ops): for op in ops: diff --git a/test/test_collection.py b/test/test_collection.py index bea2ed6ca6..18be309f22 100644 --- a/test/test_collection.py +++ b/test/test_collection.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2009-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,29 +13,34 @@ # limitations under the License. """Test the collection module.""" +from __future__ import annotations +import asyncio import contextlib import re import sys -from codecs import utf_8_decode # type: ignore +from codecs import utf_8_decode from collections import defaultdict -from typing import Iterable, no_type_check +from test.utils import get_pool, is_mongos +from typing import Any, Iterable, no_type_check -from pymongo.database import Database +from pymongo.synchronous.database import Database sys.path[0:0] = [""] -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import ( +from test import ( # TODO: fix sync imports in PYTHON-4528 + IntegrationTest, + UnitTest, + client_context, + unittest, +) +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, EventListener, - get_pool, - is_mongos, - rs_or_single_client, - single_client, + OvertCommandListener, wait_until, ) +from test.version import Version from bson import encode from bson.codec_options import CodecOptions @@ -46,10 +49,8 @@ from bson.regex import Regex from bson.son import SON from pymongo import ASCENDING, DESCENDING, GEO2D, GEOSPHERE, HASHED, TEXT -from pymongo.bulk import BulkWriteError -from pymongo.collection import Collection, ReturnDocument -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import CursorType +from pymongo.bulk_shared import BulkWriteError +from pymongo.cursor_shared import CursorType from pymongo.errors import ( ConfigurationError, DocumentTooLarge, @@ -62,7 +63,6 @@ WriteConcernError, ) from pymongo.message import _COMMAND_OVERHEAD, _gen_find_command -from pymongo.mongo_client import MongoClient from pymongo.operations import * from pymongo.read_concern import DEFAULT_READ_CONCERN from pymongo.read_preferences import ReadPreference @@ -72,17 +72,24 @@ InsertOneResult, UpdateResult, ) +from pymongo.synchronous.collection import Collection, ReturnDocument +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True -class TestCollectionNoConnect(unittest.TestCase): + +class TestCollectionNoConnect(UnitTest): """Test Collection features on a client that does not connect.""" db: Database + client: MongoClient - @classmethod - def setUpClass(cls): - cls.db = MongoClient(connect=False).pymongo_test + def setUp(self) -> None: + super().setUp() + self.client = self.simple_client(connect=False) + self.db = self.client.pymongo_test def test_collection(self): self.assertRaises(TypeError, Collection, self.db, 5) @@ -104,7 +111,7 @@ def make_col(base, name): def test_getattr(self): coll = self.db.test - self.assertTrue(isinstance(coll["_does_not_exist"], Collection)) + self.assertIsInstance(coll["_does_not_exist"], Collection) with self.assertRaises(AttributeError) as context: coll._does_not_exist @@ -125,10 +132,7 @@ def test_getattr(self): def test_iteration(self): coll = self.db.coll - if "PyPy" in sys.version: - msg = "'NoneType' object is not callable" - else: - msg = "'Collection' object is not iterable" + msg = "'Collection' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in coll: # type: ignore[misc] # error: "None" not callable [misc] @@ -137,10 +141,10 @@ def test_iteration(self): self.assertEqual(coll[0].name, "coll.0") self.assertEqual(coll[{}].name, "coll.{}") # next fails - with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + with self.assertRaisesRegex(TypeError, msg): _ = next(coll) # .next() fails - with self.assertRaisesRegex(TypeError, "'Collection' object is not iterable"): + with self.assertRaisesRegex(TypeError, msg): _ = coll.next() # Do not implement typing.Iterable. self.assertNotIsInstance(coll, Iterable) @@ -149,20 +153,14 @@ def test_iteration(self): class TestCollection(IntegrationTest): w: int - @classmethod - def setUpClass(cls): - super(TestCollection, cls).setUpClass() - cls.w = client_context.w # type: ignore - - @classmethod - def tearDownClass(cls): - cls.db.drop_collection("test_large_limit") - def setUp(self): - self.db.test.drop() + super().setUp() + self.w = client_context.w # type: ignore def tearDown(self): self.db.test.drop() + self.db.drop_collection("test_large_limit") + super().tearDown() @contextlib.contextmanager def write_concern_collection(self): @@ -170,13 +168,15 @@ def write_concern_collection(self): with self.assertRaises(WriteConcernError): # Unsatisfiable write concern. yield Collection( - self.db, "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) + self.db, + "test", + write_concern=WriteConcern(w=len(client_context.nodes) + 1), ) else: yield self.db.test def test_equality(self): - self.assertTrue(isinstance(self.db.test, Collection)) + self.assertIsInstance(self.db.test, Collection) self.assertEqual(self.db.test, self.db["test"]) self.assertEqual(self.db.test, Collection(self.db, "test")) self.assertEqual(self.db.test.mike, self.db["test.mike"]) @@ -189,25 +189,30 @@ def test_create(self): # No Exception. db = client_context.client.pymongo_test db.create_test_no_wc.drop() + + def lambda_test(): + return "create_test_no_wc" not in db.list_collection_names() + + def lambda_test_2(): + return "create_test_no_wc" in db.list_collection_names() + wait_until( - lambda: "create_test_no_wc" not in db.list_collection_names(), + lambda_test, "drop create_test_no_wc collection", ) - Collection(db, name="create_test_no_wc", create=True) + db.create_collection("create_test_no_wc") wait_until( - lambda: "create_test_no_wc" in db.list_collection_names(), + lambda_test_2, "create create_test_no_wc collection", ) # SERVER-33317 if not client_context.is_mongos or not client_context.version.at_least(3, 7, 0): with self.assertRaises(OperationFailure): - Collection( - db, name="create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN, create=True - ) + db.create_collection("create-test-wc", write_concern=IMPOSSIBLE_WRITE_CONCERN) def test_drop_nonexistent_collection(self): self.db.drop_collection("test") - self.assertFalse("test" in self.db.list_collection_names()) + self.assertNotIn("test", self.db.list_collection_names()) # No exception self.db.drop_collection("test") @@ -215,8 +220,10 @@ def test_drop_nonexistent_collection(self): def test_create_indexes(self): db = self.db - self.assertRaises(TypeError, db.test.create_indexes, "foo") - self.assertRaises(TypeError, db.test.create_indexes, ["foo"]) + with self.assertRaises(TypeError): + db.test.create_indexes("foo") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.create_indexes(["foo"]) # type: ignore[list-item] self.assertRaises(TypeError, IndexModel, 5) self.assertRaises(ValueError, IndexModel, []) @@ -241,7 +248,7 @@ def test_create_indexes(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_indexes([IndexModel("hello")]) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) @@ -250,12 +257,13 @@ def test_create_indexes(self): ) info = db.test.index_information() for name in names: - self.assertTrue(name in info) + self.assertIn(name, info) db.test.drop() db.test.insert_one({"a": 1}) db.test.insert_one({"a": 1}) - self.assertRaises(DuplicateKeyError, db.test.create_indexes, [IndexModel("a", unique=True)]) + with self.assertRaises(DuplicateKeyError): + db.test.create_indexes([IndexModel("a", unique=True)]) with self.write_concern_collection() as coll: coll.create_indexes([IndexModel("hello")]) @@ -277,9 +285,10 @@ def test_create_indexes_commitQuorum(self): def test_create_index(self): db = self.db - self.assertRaises(TypeError, db.test.create_index, 5) - self.assertRaises(TypeError, db.test.create_index, {"hello": 1}) - self.assertRaises(ValueError, db.test.create_index, []) + with self.assertRaises(TypeError): + db.test.create_index(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.create_index([]) db.test.drop_indexes() db.test.insert_one({}) @@ -300,21 +309,30 @@ def test_create_index(self): db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_index("hello") - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() self.assertEqual(len(db.test.index_information()), 1) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)]) - self.assertTrue("hello_-1_world_1" in db.test.index_information()) + self.assertIn("hello_-1_world_1", db.test.index_information()) + + db.test.drop_indexes() + db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], name=None) + self.assertIn("hello_-1_world_1", db.test.index_information()) db.test.drop() db.test.insert_one({"a": 1}) db.test.insert_one({"a": 1}) - self.assertRaises(DuplicateKeyError, db.test.create_index, "a", unique=True) + with self.assertRaises(DuplicateKeyError): + db.test.create_index("a", unique=True) with self.write_concern_collection() as coll: coll.create_index([("hello", DESCENDING)]) + db.test.create_index(["hello", "world"]) + db.test.create_index(["hello", ("world", DESCENDING)]) + db.test.create_index({"hello": 1}.items()) # type:ignore[arg-type] + def test_drop_index(self): db = self.db db.test.drop_indexes() @@ -326,10 +344,13 @@ def test_drop_index(self): db.test.drop_index(name) # Drop it again. - with self.assertRaises(OperationFailure): + if client_context.version < Version(8, 3, -1): + with self.assertRaises(OperationFailure): + db.test.drop_index(name) + else: db.test.drop_index(name) self.assertEqual(len(db.test.index_information()), 2) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) db.test.drop_indexes() db.test.create_index("hello") @@ -339,7 +360,7 @@ def test_drop_index(self): self.assertEqual(name, "goodbye_1") db.test.drop_index([("goodbye", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 2) - self.assertTrue("hello_1" in db.test.index_information()) + self.assertIn("hello_1", db.test.index_information()) with self.write_concern_collection() as coll: coll.drop_index("hello_1") @@ -350,12 +371,14 @@ def test_index_management_max_time_ms(self): coll = self.db.test self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="alwaysOn") try: - self.assertRaises(ExecutionTimeout, coll.create_index, "foo", maxTimeMS=1) - self.assertRaises( - ExecutionTimeout, coll.create_indexes, [IndexModel("foo")], maxTimeMS=1 - ) - self.assertRaises(ExecutionTimeout, coll.drop_index, "foo", maxTimeMS=1) - self.assertRaises(ExecutionTimeout, coll.drop_indexes, maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.create_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.create_indexes([IndexModel("foo")], maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_index("foo", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + coll.drop_indexes(maxTimeMS=1) finally: self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") @@ -365,19 +388,19 @@ def test_list_indexes(self): db.test.insert_one({}) # create collection def map_indexes(indexes): - return dict([(index["name"], index) for index in indexes]) + return {index["name"]: index for index in indexes} - indexes = list(db.test.list_indexes()) + indexes = (db.test.list_indexes()).to_list() self.assertEqual(len(indexes), 1) - self.assertTrue("_id_" in map_indexes(indexes)) + self.assertIn("_id_", map_indexes(indexes)) db.test.create_index("hello") - indexes = list(db.test.list_indexes()) + indexes = (db.test.list_indexes()).to_list() self.assertEqual(len(indexes), 2) self.assertEqual(map_indexes(indexes)["hello_1"]["key"], SON([("hello", ASCENDING)])) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) - indexes = list(db.test.list_indexes()) + indexes = (db.test.list_indexes()).to_list() self.assertEqual(len(indexes), 3) index_map = map_indexes(indexes) self.assertEqual( @@ -386,11 +409,11 @@ def map_indexes(indexes): self.assertEqual(True, index_map["hello_-1_world_1"]["unique"]) # List indexes on a collection that does not exist. - indexes = list(db.does_not_exist.list_indexes()) + indexes = (db.does_not_exist.list_indexes()).to_list() self.assertEqual(len(indexes), 0) # List indexes on a database that does not exist. - indexes = list(self.client.db_does_not_exist.coll.list_indexes()) + indexes = (db.does_not_exist.list_indexes()).to_list() self.assertEqual(len(indexes), 0) def test_index_info(self): @@ -398,26 +421,26 @@ def test_index_info(self): db.test.drop() db.test.insert_one({}) # create collection self.assertEqual(len(db.test.index_information()), 1) - self.assertTrue("_id_" in db.test.index_information()) + self.assertIn("_id_", db.test.index_information()) db.test.create_index("hello") self.assertEqual(len(db.test.index_information()), 2) - self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) db.test.create_index([("hello", DESCENDING), ("world", ASCENDING)], unique=True) - self.assertEqual(db.test.index_information()["hello_1"]["key"], [("hello", ASCENDING)]) + self.assertEqual((db.test.index_information())["hello_1"]["key"], [("hello", ASCENDING)]) self.assertEqual(len(db.test.index_information()), 3) self.assertEqual( [("hello", DESCENDING), ("world", ASCENDING)], - db.test.index_information()["hello_-1_world_1"]["key"], + (db.test.index_information())["hello_-1_world_1"]["key"], ) - self.assertEqual(True, db.test.index_information()["hello_-1_world_1"]["unique"]) + self.assertEqual(True, (db.test.index_information())["hello_-1_world_1"]["unique"]) def test_index_geo2d(self): db = self.db db.test.drop_indexes() self.assertEqual("loc_2d", db.test.create_index([("loc", GEO2D)])) - index_info = db.test.index_information()["loc_2d"] + index_info = (db.test.index_information())["loc_2d"] self.assertEqual([("loc", "2d")], index_info["key"]) # geoSearch was deprecated in 4.4 and removed in 5.0 @@ -426,22 +449,24 @@ def test_index_geo2d(self): def test_index_haystack(self): db = self.db db.test.drop() - _id = db.test.insert_one( - {"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"} + _id = ( + db.test.insert_one({"pos": {"long": 34.2, "lat": 33.3}, "type": "restaurant"}) ).inserted_id db.test.insert_one({"pos": {"long": 34.2, "lat": 37.3}, "type": "restaurant"}) db.test.insert_one({"pos": {"long": 59.1, "lat": 87.2}, "type": "office"}) db.test.create_index([("pos", "geoHaystack"), ("type", ASCENDING)], bucketSize=1) - results = db.command( - SON( - [ - ("geoSearch", "test"), - ("near", [33, 33]), - ("maxDistance", 6), - ("search", {"type": "restaurant"}), - ("limit", 30), - ] + results = ( + db.command( + SON( + [ + ("geoSearch", "test"), + ("near", [33, 33]), + ("maxDistance", 6), + ("search", {"type": "restaurant"}), + ("limit", 30), + ] + ) ) )["results"] @@ -455,8 +480,8 @@ def test_index_text(self): db = self.db db.test.drop_indexes() self.assertEqual("t_text", db.test.create_index([("t", TEXT)])) - index_info = db.test.index_information()["t_text"] - self.assertTrue("weights" in index_info) + index_info = (db.test.index_information())["t_text"] + self.assertIn("weights", index_info) db.test.insert_many( [{"t": "spam eggs and spam"}, {"t": "spam"}, {"t": "egg sausage and bacon"}] @@ -467,8 +492,8 @@ def test_index_text(self): # Sort by 'score' field. cursor.sort([("score", {"$meta": "textScore"})]) - results = list(cursor) - self.assertTrue(results[0]["score"] >= results[1]["score"]) + results = cursor.to_list() + self.assertGreaterEqual(results[0]["score"], results[1]["score"]) db.test.drop_indexes() @@ -477,7 +502,7 @@ def test_index_2dsphere(self): db.test.drop_indexes() self.assertEqual("geo_2dsphere", db.test.create_index([("geo", GEOSPHERE)])) - for dummy, info in db.test.index_information().items(): + for dummy, info in (db.test.index_information()).items(): field, idx_type = info["key"][0] if field == "geo" and idx_type == "2dsphere": break @@ -496,7 +521,7 @@ def test_index_hashed(self): db.test.drop_indexes() self.assertEqual("a_hashed", db.test.create_index([("a", HASHED)])) - for dummy, info in db.test.index_information().items(): + for dummy, info in (db.test.index_information()).items(): field, idx_type = info["key"][0] if field == "a" and idx_type == "hashed": break @@ -509,7 +534,7 @@ def test_index_sparse(self): db = self.db db.test.drop_indexes() db.test.create_index([("key", ASCENDING)], sparse=True) - self.assertTrue(db.test.index_information()["key_1"]["sparse"]) + self.assertTrue((db.test.index_information())["key_1"]["sparse"]) def test_index_background(self): db = self.db @@ -517,9 +542,9 @@ def test_index_background(self): db.test.create_index([("keya", ASCENDING)]) db.test.create_index([("keyb", ASCENDING)], background=False) db.test.create_index([("keyc", ASCENDING)], background=True) - self.assertFalse("background" in db.test.index_information()["keya_1"]) - self.assertFalse(db.test.index_information()["keyb_1"]["background"]) - self.assertTrue(db.test.index_information()["keyc_1"]["background"]) + self.assertNotIn("background", (db.test.index_information())["keya_1"]) + self.assertFalse((db.test.index_information())["keyb_1"]["background"]) + self.assertTrue((db.test.index_information())["keyc_1"]["background"]) def _drop_dups_setup(self, db): db.drop_collection("test") @@ -534,10 +559,11 @@ def test_index_dont_drop_dups(self): self._drop_dups_setup(db) # There's a duplicate - def test_create(): + def _test_create(): db.test.create_index([("i", ASCENDING)], unique=True, dropDups=False) - self.assertRaises(DuplicateKeyError, test_create) + with self.assertRaises(DuplicateKeyError): + _test_create() # Duplicate wasn't dropped self.assertEqual(4, db.test.count_documents({})) @@ -571,16 +597,12 @@ def test_index_filter(self): db.drop_collection("test") # Test bad filter spec on create. - self.assertRaises(OperationFailure, db.test.create_index, "x", partialFilterExpression=5) - self.assertRaises( - OperationFailure, - db.test.create_index, - "x", - partialFilterExpression={"x": {"$asdasd": 3}}, - ) - self.assertRaises( - OperationFailure, db.test.create_index, "x", partialFilterExpression={"$and": 5} - ) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression=5) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"x": {"$asdasd": 3}}) + with self.assertRaises(OperationFailure): + db.test.create_index("x", partialFilterExpression={"$and": 5}) self.assertEqual( "x_1", @@ -671,7 +693,7 @@ def test_field_selection(self): doc = next(db.test.find({}, {"_id": False})) l = list(doc) - self.assertFalse("_id" in l) + self.assertNotIn("_id", l) def test_options(self): db = self.db @@ -685,10 +707,10 @@ def test_insert_one(self): db = self.db db.test.drop() - document = {"_id": 1000} + document: dict[str, Any] = {"_id": 1000} result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, int)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, int) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) @@ -696,8 +718,8 @@ def test_insert_one(self): document = {"foo": "bar"} result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertTrue(result.acknowledged) self.assertIsNotNone(db.test.find_one({"_id": document["_id"]})) @@ -705,16 +727,20 @@ def test_insert_one(self): db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) - self.assertTrue(isinstance(result.inserted_id, ObjectId)) + self.assertIsInstance(result, InsertOneResult) + self.assertIsInstance(result.inserted_id, ObjectId) self.assertEqual(document["_id"], result.inserted_id) self.assertFalse(result.acknowledged) # The insert failed duplicate key... - wait_until(lambda: 2 == db.test.count_documents({}), "forcing duplicate key error") + + def async_lambda(): + return db.test.count_documents({}) == 2 + + wait_until(async_lambda, "forcing duplicate key error") document = RawBSONDocument(encode({"_id": ObjectId(), "foo": "bar"})) result = db.test.insert_one(document) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(result.inserted_id, None) def test_insert_many(self): @@ -723,38 +749,38 @@ def test_insert_many(self): docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, ObjectId)) - self.assertTrue(_id in result.inserted_ids) + self.assertIsInstance(_id, ObjectId) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [{"_id": i} for i in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual(5, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) - self.assertTrue(_id in result.inserted_ids) + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"_id": _id})) self.assertTrue(result.acknowledged) docs = [RawBSONDocument(encode({"_id": i + 5})) for i in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) - self.assertTrue(isinstance(result.inserted_ids, list)) + self.assertIsInstance(result, InsertManyResult) + self.assertIsInstance(result.inserted_ids, list) self.assertEqual([], result.inserted_ids) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) docs: list = [{} for _ in range(5)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertFalse(result.acknowledged) self.assertEqual(20, db.test.count_documents({})) @@ -785,7 +811,7 @@ def test_insert_many_invalid(self): db.test.insert_many(1) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, "documents must be a non-empty list"): - db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) # type: ignore[arg-type] + db.test.insert_many(RawBSONDocument(encode({"_id": 2}))) def test_delete_one(self): self.db.test.drop() @@ -795,23 +821,27 @@ def test_delete_one(self): self.db.test.insert_one({"z": 1}) result = self.db.test.delete_one({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(2, self.db.test.count_documents({})) result = self.db.test.delete_one({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(1, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(1, self.db.test.count_documents({})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_one({"z": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until(lambda: 0 == db.test.count_documents({}), "delete 1 documents") + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 1 documents") def test_delete_many(self): self.db.test.drop() @@ -822,25 +852,32 @@ def test_delete_many(self): self.db.test.insert_one({"y": 1}) result = self.db.test.delete_many({"x": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertEqual(2, result.deleted_count) self.assertTrue(result.acknowledged) self.assertEqual(0, self.db.test.count_documents({"x": 1})) db = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) result = db.test.delete_many({"y": 1}) - self.assertTrue(isinstance(result, DeleteResult)) + self.assertIsInstance(result, DeleteResult) self.assertRaises(InvalidOperation, lambda: result.deleted_count) self.assertFalse(result.acknowledged) - wait_until(lambda: 0 == db.test.count_documents({}), "delete 2 documents") + + def lambda_async(): + return db.test.count_documents({}) == 0 + + wait_until(lambda_async, "delete 2 documents") def test_command_document_too_large(self): large = "*" * (client_context.max_bson_size + _COMMAND_OVERHEAD) coll = self.db.test - self.assertRaises(DocumentTooLarge, coll.insert_one, {"data": large}) + with self.assertRaises(DocumentTooLarge): + coll.insert_one({"data": large}) # update_one and update_many are the same - self.assertRaises(DocumentTooLarge, coll.replace_one, {}, {"data": large}) - self.assertRaises(DocumentTooLarge, coll.delete_one, {"data": large}) + with self.assertRaises(DocumentTooLarge): + coll.replace_one({}, {"data": large}) + with self.assertRaises(DocumentTooLarge): + coll.delete_one({"data": large}) def test_write_large_document(self): max_size = client_context.max_bson_size @@ -849,19 +886,19 @@ def test_write_large_document(self): half_str = "x" * half_size self.assertEqual(max_size, 16777216) - self.assertRaises(OperationFailure, self.db.test.insert_one, {"foo": max_str}) - self.assertRaises( - OperationFailure, self.db.test.replace_one, {}, {"foo": max_str}, upsert=True - ) - self.assertRaises(OperationFailure, self.db.test.insert_many, [{"x": 1}, {"foo": max_str}]) + with self.assertRaises(OperationFailure): + self.db.test.insert_one({"foo": max_str}) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({}, {"foo": max_str}, upsert=True) + with self.assertRaises(OperationFailure): + self.db.test.insert_many([{"x": 1}, {"foo": max_str}]) self.db.test.insert_many([{"foo": half_str}, {"foo": half_str}]) self.db.test.insert_one({"bar": "x"}) # Use w=0 here to test legacy doc size checking in all server versions unack_coll = self.db.test.with_options(write_concern=WriteConcern(w=0)) - self.assertRaises( - DocumentTooLarge, unack_coll.replace_one, {"bar": "x"}, {"bar": "x" * (max_size - 14)} - ) + with self.assertRaises(DocumentTooLarge): + unack_coll.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 14)}) self.db.test.replace_one({"bar": "x"}, {"bar": "x" * (max_size - 32)}) def test_insert_bypass_document_validation(self): @@ -871,46 +908,51 @@ def test_insert_bypass_document_validation(self): db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test insert_one - self.assertRaises(OperationFailure, db.test.insert_one, {"_id": 1, "x": 100}) + with self.assertRaises(OperationFailure): + db.test.insert_one({"_id": 1, "x": 100}) result = db.test.insert_one({"_id": 1, "x": 100}, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(1, result.inserted_id) result = db.test.insert_one({"_id": 2, "a": 0}) - self.assertTrue(isinstance(result, InsertOneResult)) + self.assertIsInstance(result, InsertOneResult) self.assertEqual(2, result.inserted_id) db_w0.test.insert_one({"y": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1}), "find w:0 inserted document") + + def async_lambda(): + return db_w0.test.find_one({"y": 1}) + + wait_until(async_lambda, "find w:0 inserted document") # Test insert_many docs = [{"_id": i, "x": 100 - i} for i in range(3, 100)] - self.assertRaises(OperationFailure, db.test.insert_many, docs) + with self.assertRaises(OperationFailure): + db.test.insert_many(docs) result = db.test.insert_many(docs, bypass_document_validation=True) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) - self.assertTrue(_id in result.inserted_ids) + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"x": doc["x"]})) self.assertTrue(result.acknowledged) docs = [{"_id": i, "a": 200 - i} for i in range(100, 200)] result = db.test.insert_many(docs) - self.assertTrue(isinstance(result, InsertManyResult)) + self.assertIsInstance(result, InsertManyResult) self.assertTrue(97, len(result.inserted_ids)) for doc in docs: _id = doc["_id"] - self.assertTrue(isinstance(_id, int)) - self.assertTrue(_id in result.inserted_ids) + self.assertIsInstance(_id, int) + self.assertIn(_id, result.inserted_ids) self.assertEqual(1, db.test.count_documents({"a": doc["a"]})) self.assertTrue(result.acknowledged) - self.assertRaises( - OperationFailure, - db_w0.test.insert_many, - [{"x": 1}, {"x": 2}], - bypass_document_validation=True, - ) + with self.assertRaises(OperationFailure): + db_w0.test.insert_many( + [{"x": 1}, {"x": 2}], + bypass_document_validation=True, + ) def test_replace_bypass_document_validation(self): db = self.db @@ -920,7 +962,8 @@ def test_replace_bypass_document_validation(self): # Test replace_one db.test.insert_one({"a": 101}) - self.assertRaises(OperationFailure, db.test.replace_one, {"a": 101}, {"y": 1}) + with self.assertRaises(OperationFailure): + db.test.replace_one({"a": 101}, {"y": 1}) self.assertEqual(0, db.test.count_documents({"y": 1})) self.assertEqual(1, db.test.count_documents({"a": 101})) db.test.replace_one({"a": 101}, {"y": 1}, bypass_document_validation=True) @@ -932,7 +975,8 @@ def test_replace_bypass_document_validation(self): self.assertEqual(1, db.test.count_documents({"a": 102})) db.test.insert_one({"y": 1}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.replace_one, {"y": 1}, {"x": 101}) + with self.assertRaises(OperationFailure): + db.test.replace_one({"y": 1}, {"x": 101}) self.assertEqual(0, db.test.count_documents({"x": 101})) self.assertEqual(1, db.test.count_documents({"y": 1})) db.test.replace_one({"y": 1}, {"x": 101}, bypass_document_validation=True) @@ -944,7 +988,11 @@ def test_replace_bypass_document_validation(self): db.test.insert_one({"y": 1}, bypass_document_validation=True) db_w0.test.replace_one({"y": 1}, {"x": 1}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"x": 1}), "find w:0 replaced document") + + def predicate(): + return db_w0.test.find_one({"x": 1}) + + wait_until(predicate, "find w:0 replaced document") def test_update_bypass_document_validation(self): db = self.db @@ -954,7 +1002,8 @@ def test_update_bypass_document_validation(self): db_w0 = self.db.client.get_database(self.db.name, write_concern=WriteConcern(w=0)) # Test update_one - self.assertRaises(OperationFailure, db.test.update_one, {"z": 5}, {"$inc": {"z": -10}}) + with self.assertRaises(OperationFailure): + db.test.update_one({"z": 5}, {"$inc": {"z": -10}}) self.assertEqual(0, db.test.count_documents({"z": -5})) self.assertEqual(1, db.test.count_documents({"z": 5})) db.test.update_one({"z": 5}, {"$inc": {"z": -10}}, bypass_document_validation=True) @@ -965,7 +1014,8 @@ def test_update_bypass_document_validation(self): self.assertEqual(0, db.test.count_documents({"z": -5})) db.test.insert_one({"z": -10}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_one, {"z": -10}, {"$inc": {"z": 1}}) + with self.assertRaises(OperationFailure): + db.test.update_one({"z": -10}, {"$inc": {"z": 1}}) self.assertEqual(0, db.test.count_documents({"z": -9})) self.assertEqual(1, db.test.count_documents({"z": -10})) db.test.update_one({"z": -10}, {"$inc": {"z": 1}}, bypass_document_validation=True) @@ -977,12 +1027,17 @@ def test_update_bypass_document_validation(self): db.test.insert_one({"y": 1, "x": 0}, bypass_document_validation=True) db_w0.test.update_one({"y": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) - wait_until(lambda: db_w0.test.find_one({"y": 1, "x": 1}), "find w:0 updated document") + + def async_lambda(): + return db_w0.test.find_one({"y": 1, "x": 1}) + + wait_until(async_lambda, "find w:0 updated document") # Test update_many db.test.insert_many([{"z": i} for i in range(3, 101)]) db.test.insert_one({"y": 0}, bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": -100}}) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": -100}}) self.assertEqual(100, db.test.count_documents({"z": {"$gte": 0}})) self.assertEqual(0, db.test.count_documents({"z": {"$lt": 0}})) self.assertEqual(0, db.test.count_documents({"y": 0, "z": -100})) @@ -998,7 +1053,8 @@ def test_update_bypass_document_validation(self): self.assertEqual(50, db.test.count_documents({"z": {"$lt": 0}})) db.test.insert_many([{"z": -i} for i in range(50)], bypass_document_validation=True) - self.assertRaises(OperationFailure, db.test.update_many, {}, {"$inc": {"z": 1}}) + with self.assertRaises(OperationFailure): + db.test.update_many({}, {"$inc": {"z": 1}}) self.assertEqual(100, db.test.count_documents({"z": {"$lte": 0}})) self.assertEqual(50, db.test.count_documents({"z": {"$gt": 1}})) db.test.update_many( @@ -1015,9 +1071,11 @@ def test_update_bypass_document_validation(self): db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db.test.insert_one({"m": 1, "x": 0}, bypass_document_validation=True) db_w0.test.update_many({"m": 1}, {"$inc": {"x": 1}}, bypass_document_validation=True) - wait_until( - lambda: db_w0.test.count_documents({"m": 1, "x": 1}) == 2, "find w:0 updated documents" - ) + + def async_lambda(): + return db_w0.test.count_documents({"m": 1, "x": 1}) == 2 + + wait_until(async_lambda, "find w:0 updated documents") def test_bypass_document_validation_bulk_write(self): db = self.db @@ -1042,11 +1100,11 @@ def test_bypass_document_validation_bulk_write(self): # Assert that the operations would fail without bypass_doc_val for op in ops: - self.assertRaises(BulkWriteError, db.test.bulk_write, [op]) + with self.assertRaises(BulkWriteError): + db.test.bulk_write([op]) - self.assertRaises( - OperationFailure, db_w0.test.bulk_write, ops, bypass_document_validation=True - ) + with self.assertRaises(OperationFailure): + db_w0.test.bulk_write(ops, bypass_document_validation=True) def test_find_by_default_dct(self): db = self.db @@ -1062,23 +1120,23 @@ def test_find_w_fields(self): db.test.insert_one({"x": 1, "mike": "awesome", "extra thing": "abcdefghijklmnopqrstuvwxyz"}) self.assertEqual(1, db.test.count_documents({})) doc = next(db.test.find({})) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = next(db.test.find({})) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({})) - self.assertTrue("extra thing" in doc) + self.assertIn("extra thing", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertTrue("x" in doc) + self.assertIn("x", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({}, ["x", "mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) doc = next(db.test.find({}, ["mike"])) - self.assertFalse("x" in doc) + self.assertNotIn("x", doc) doc = next(db.test.find({}, ["mike"])) - self.assertTrue("mike" in doc) + self.assertIn("mike", doc) doc = next(db.test.find({}, ["mike"])) - self.assertFalse("extra thing" in doc) + self.assertNotIn("extra thing", doc) @no_type_check def test_fields_specifier_as_dict(self): @@ -1087,10 +1145,10 @@ def test_fields_specifier_as_dict(self): db.test.insert_one({"x": [1, 2, 3], "mike": "awesome"}) - self.assertEqual([1, 2, 3], db.test.find_one()["x"]) - self.assertEqual([2, 3], db.test.find_one(projection={"x": {"$slice": -2}})["x"]) - self.assertTrue("x" not in db.test.find_one(projection={"x": 0})) - self.assertTrue("mike" in db.test.find_one(projection={"x": 0})) + self.assertEqual([1, 2, 3], (db.test.find_one())["x"]) + self.assertEqual([2, 3], (db.test.find_one(projection={"x": {"$slice": -2}}))["x"]) + self.assertNotIn("x", db.test.find_one(projection={"x": 0})) + self.assertIn("mike", db.test.find_one(projection={"x": 0})) def test_find_w_regex(self): db = self.db @@ -1101,11 +1159,11 @@ def test_find_w_regex(self): db.test.insert_one({"x": "hello_mikey"}) db.test.insert_one({"x": "hello_test"}) - self.assertEqual(len(list(db.test.find())), 4) - self.assertEqual(len(list(db.test.find({"x": re.compile("^hello.*")}))), 4) - self.assertEqual(len(list(db.test.find({"x": re.compile("ello")}))), 4) - self.assertEqual(len(list(db.test.find({"x": re.compile("^hello$")}))), 0) - self.assertEqual(len(list(db.test.find({"x": re.compile("^hello_mi.*$")}))), 2) + self.assertEqual(len(db.test.find().to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("^hello.*")}).to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("ello")}).to_list()), 4) + self.assertEqual(len(db.test.find({"x": re.compile("^hello$")}).to_list()), 0) + self.assertEqual(len(db.test.find({"x": re.compile("^hello_mi.*$")}).to_list()), 2) def test_id_can_be_anything(self): db = self.db @@ -1113,7 +1171,7 @@ def test_id_can_be_anything(self): db.test.delete_many({}) auto_id = {"hello": "world"} db.test.insert_one(auto_id) - self.assertTrue(isinstance(auto_id["_id"], ObjectId)) + self.assertIsInstance(auto_id["_id"], ObjectId) numeric = {"_id": 240, "hello": "world"} db.test.insert_one(numeric) @@ -1125,7 +1183,7 @@ def test_id_can_be_anything(self): for x in db.test.find(): self.assertEqual(x["hello"], "world") - self.assertTrue("_id" in x) + self.assertIn("_id", x) def test_unique_index(self): db = self.db @@ -1204,14 +1262,15 @@ def test_write_error_text_handling(self): db.test.insert_one({"text": text}) # Should raise DuplicateKeyError, not InvalidBSON - self.assertRaises(DuplicateKeyError, db.test.insert_one, {"text": text}) + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"text": text}) - self.assertRaises( - DuplicateKeyError, db.test.replace_one, {"_id": ObjectId()}, {"text": text}, upsert=True - ) + with self.assertRaises(DuplicateKeyError): + db.test.replace_one({"_id": ObjectId()}, {"text": text}, upsert=True) # Should raise BulkWriteError, not InvalidBSON - self.assertRaises(BulkWriteError, db.test.insert_many, [{"text": text}]) + with self.assertRaises(BulkWriteError): + db.test.insert_many([{"text": text}]) def test_write_error_unicode(self): coll = self.db.test @@ -1233,19 +1292,21 @@ def test_wtimeout(self): collection.insert_one({"_id": 1}) coll = collection.with_options(write_concern=WriteConcern(w=1, wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) coll = collection.with_options(write_concern=WriteConcern(wtimeout=1000)) - self.assertRaises(DuplicateKeyError, coll.insert_one, {"_id": 1}) + with self.assertRaises(DuplicateKeyError): + coll.insert_one({"_id": 1}) def test_error_code(self): try: self.db.test.update_many({}, {"$thismodifierdoesntexist": 1}) except OperationFailure as exc: - self.assertTrue(exc.code in (9, 10147, 16840, 17009)) + self.assertIn(exc.code, (9, 10147, 16840, 17009)) # Just check that we set the error document. Fields # vary by MongoDB version. - self.assertTrue(exc.details is not None) + self.assertIsNotNone(exc.details) else: self.fail("OperationFailure was not raised") @@ -1262,47 +1323,49 @@ def test_index_on_subfield(self): db.test.insert_one({"hello": {"a": 4, "b": 5}}) db.test.insert_one({"hello": {"a": 7, "b": 2}}) - self.assertRaises(DuplicateKeyError, db.test.insert_one, {"hello": {"a": 4, "b": 10}}) + with self.assertRaises(DuplicateKeyError): + db.test.insert_one({"hello": {"a": 4, "b": 10}}) def test_replace_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, lambda: db.test.replace_one({}, {"$set": {"x": 1}})) + with self.assertRaises(ValueError): + db.test.replace_one({}, {"$set": {"x": 1}}) - id1 = db.test.insert_one({"x": 1}).inserted_id + id1 = (db.test.insert_one({"x": 1})).inserted_id result = db.test.replace_one({"x": 1}, {"y": 1}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 1})) self.assertEqual(0, db.test.count_documents({"x": 1})) - self.assertEqual(db.test.find_one(id1)["y"], 1) # type: ignore + self.assertEqual((db.test.find_one(id1))["y"], 1) # type: ignore replacement = RawBSONDocument(encode({"_id": id1, "z": 1})) result = db.test.replace_one({"y": 1}, replacement, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"z": 1})) self.assertEqual(0, db.test.count_documents({"y": 1})) - self.assertEqual(db.test.find_one(id1)["z"], 1) # type: ignore + self.assertEqual((db.test.find_one(id1))["z"], 1) # type: ignore result = db.test.replace_one({"x": 2}, {"y": 2}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 2})) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.replace_one({"x": 0}, {"y": 0}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1312,78 +1375,93 @@ def test_update_one(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, lambda: db.test.update_one({}, {"x": 1})) + with self.assertRaises(ValueError): + db.test.update_one({}, {"x": 1}) - id1 = db.test.insert_one({"x": 5}).inserted_id + id1 = (db.test.insert_one({"x": 5})).inserted_id result = db.test.update_one({}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 6) # type: ignore + self.assertEqual((db.test.find_one(id1))["x"], 6) # type: ignore - id2 = db.test.insert_one({"x": 1}).inserted_id + id2 = (db.test.insert_one({"x": 1})).inserted_id result = db.test.update_one({"x": 6}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) - self.assertEqual(db.test.find_one(id1)["x"], 7) # type: ignore - self.assertEqual(db.test.find_one(id2)["x"], 1) # type: ignore + self.assertEqual((db.test.find_one(id1))["x"], 7) # type: ignore + self.assertEqual((db.test.find_one(id2))["x"], 1) # type: ignore result = db.test.update_one({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) self.assertFalse(result.acknowledged) + def test_update_result(self): + db = self.db + db.drop_collection("test") + + result = db.test.update_one({"x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = db.test.update_one({"_id": None, "x": 0}, {"$inc": {"x": 1}}, upsert=True) + self.assertEqual(result.did_upsert, True) + + result = db.test.update_one({"_id": None}, {"$inc": {"x": 1}}) + self.assertEqual(result.did_upsert, False) + def test_update_many(self): db = self.db db.drop_collection("test") - self.assertRaises(ValueError, lambda: db.test.update_many({}, {"x": 1})) + with self.assertRaises(ValueError): + db.test.update_many({}, {"x": 1}) db.test.insert_one({"x": 4, "y": 3}) db.test.insert_one({"x": 5, "y": 5}) db.test.insert_one({"x": 4, "y": 4}) result = db.test.update_many({"x": 4}, {"$set": {"y": 5}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(2, result.matched_count) - self.assertTrue(result.modified_count in (None, 2)) + self.assertIn(result.modified_count, (None, 2)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(3, db.test.count_documents({"y": 5})) result = db.test.update_many({"x": 5}, {"$set": {"y": 6}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(1, result.matched_count) - self.assertTrue(result.modified_count in (None, 1)) + self.assertIn(result.modified_count, (None, 1)) self.assertIsNone(result.upserted_id) self.assertTrue(result.acknowledged) self.assertEqual(1, db.test.count_documents({"y": 6})) result = db.test.update_many({"x": 2}, {"$set": {"y": 1}}, True) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertEqual(0, result.matched_count) - self.assertTrue(result.modified_count in (None, 0)) - self.assertTrue(isinstance(result.upserted_id, ObjectId)) + self.assertIn(result.modified_count, (None, 0)) + self.assertIsInstance(result.upserted_id, ObjectId) self.assertTrue(result.acknowledged) db = db.client.get_database(db.name, write_concern=WriteConcern(w=0)) result = db.test.update_many({"x": 0}, {"$inc": {"x": 1}}) - self.assertTrue(isinstance(result, UpdateResult)) + self.assertIsInstance(result, UpdateResult) self.assertRaises(InvalidOperation, lambda: result.matched_count) self.assertRaises(InvalidOperation, lambda: result.modified_count) self.assertRaises(InvalidOperation, lambda: result.upserted_id) @@ -1401,28 +1479,26 @@ def test_update_check_keys(self): # I know this seems like testing the server but I'd like to be notified # by CI if the server's behavior changes here. doc = SON([("$set", {"foo.bar": "bim"}), ("hello", "world")]) - self.assertRaises( - OperationFailure, self.db.test.update_one, {"hello": "world"}, doc, upsert=True - ) + with self.assertRaises(OperationFailure): + self.db.test.update_one({"hello": "world"}, doc, upsert=True) # This is going to cause keys to be checked and raise InvalidDocument. # That's OK assuming the server's behavior in the previous assert # doesn't change. If the behavior changes checking the first key for # '$' in update won't be good enough anymore. doc = SON([("hello", "world"), ("$set", {"foo.bar": "bim"})]) - self.assertRaises( - OperationFailure, self.db.test.replace_one, {"hello": "world"}, doc, upsert=True - ) + with self.assertRaises(OperationFailure): + self.db.test.replace_one({"hello": "world"}, doc, upsert=True) # Replace with empty document - self.assertNotEqual(0, self.db.test.replace_one({"hello": "world"}, {}).matched_count) + self.assertNotEqual(0, (self.db.test.replace_one({"hello": "world"}, {})).matched_count) def test_acknowledged_delete(self): db = self.db db.drop_collection("test") db.test.insert_many([{"x": 1}, {"x": 1}]) - self.assertEqual(2, db.test.delete_many({}).deleted_count) - self.assertEqual(0, db.test.delete_many({}).deleted_count) + self.assertEqual(2, (db.test.delete_many({})).deleted_count) + self.assertEqual(0, (db.test.delete_many({})).deleted_count) @client_context.require_version_max(4, 9) def test_manual_last_error(self): @@ -1460,12 +1536,13 @@ def test_aggregate(self): db.drop_collection("test") db.test.insert_one({"foo": [1, 2]}) - self.assertRaises(TypeError, db.test.aggregate, "wow") + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] pipeline = {"$project": {"_id": False, "foo": True}} result = db.test.aggregate([pipeline]) - self.assertTrue(isinstance(result, CommandCursor)) - self.assertEqual([{"foo": [1, 2]}], list(result)) + self.assertIsInstance(result, CommandCursor) + self.assertEqual([{"foo": [1, 2]}], result.to_list()) # Test write concern. with self.write_concern_collection() as coll: @@ -1476,12 +1553,13 @@ def test_aggregate_raw_bson(self): db.drop_collection("test") db.test.insert_one({"foo": [1, 2]}) - self.assertRaises(TypeError, db.test.aggregate, "wow") + with self.assertRaises(TypeError): + db.test.aggregate("wow") # type: ignore[arg-type] pipeline = {"$project": {"_id": False, "foo": True}} coll = db.get_collection("test", codec_options=CodecOptions(document_class=RawBSONDocument)) result = coll.aggregate([pipeline]) - self.assertTrue(isinstance(result, CommandCursor)) + self.assertIsInstance(result, CommandCursor) first_result = next(result) self.assertIsInstance(first_result, RawBSONDocument) self.assertEqual([1, 2], list(first_result["foo"])) @@ -1490,7 +1568,7 @@ def test_aggregation_cursor_validation(self): db = self.db projection = {"$project": {"_id": "$_id"}} cursor = db.test.aggregate([projection], cursor={}) - self.assertTrue(isinstance(cursor, CommandCursor)) + self.assertIsInstance(cursor, CommandCursor) def test_aggregation_cursor(self): db = self.db @@ -1509,16 +1587,16 @@ def test_aggregation_cursor(self): # Use batchSize to ensure multiple getMore messages cursor = db.test.aggregate([{"$project": {"_id": "$_id"}}], batchSize=5) - self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor)) + self.assertEqual(expected_sum, sum(doc["_id"] for doc in cursor.to_list())) # Test that batchSize is handled properly. cursor = db.test.aggregate([], batchSize=5) - self.assertEqual(5, len(cursor._CommandCursor__data)) # type: ignore + self.assertEqual(5, len(cursor._data)) # Force a getMore - cursor._CommandCursor__data.clear() # type: ignore + cursor._data.clear() next(cursor) # batchSize - 1 - self.assertEqual(4, len(cursor._CommandCursor__data)) # type: ignore + self.assertEqual(4, len(cursor._data)) # Exhaust the cursor. There shouldn't be any errors. for _doc in cursor: pass @@ -1532,7 +1610,7 @@ def test_aggregation_cursor_alive(self): while True: cursor.next() n += 1 - if 3 == n: + if n == 3: self.assertFalse(cursor.alive) break @@ -1543,7 +1621,8 @@ def try_invalid_session(): with self.db.test.aggregate([], {}): # type:ignore pass - self.assertRaisesRegex(ValueError, "must be a ClientSession", try_invalid_session) + with self.assertRaisesRegex(ValueError, "must be a ClientSession"): + try_invalid_session() def test_large_limit(self): db = self.db @@ -1580,12 +1659,18 @@ def test_rename(self): db.drop_collection("test") db.drop_collection("foo") - self.assertRaises(TypeError, db.test.rename, 5) - self.assertRaises(InvalidName, db.test.rename, "") - self.assertRaises(InvalidName, db.test.rename, "te$t") - self.assertRaises(InvalidName, db.test.rename, ".test") - self.assertRaises(InvalidName, db.test.rename, "test.") - self.assertRaises(InvalidName, db.test.rename, "tes..t") + with self.assertRaises(TypeError): + db.test.rename(5) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + db.test.rename("") + with self.assertRaises(InvalidName): + db.test.rename("te$t") + with self.assertRaises(InvalidName): + db.test.rename(".test") + with self.assertRaises(InvalidName): + db.test.rename("test.") + with self.assertRaises(InvalidName): + db.test.rename("tes..t") self.assertEqual(0, db.test.count_documents({})) self.assertEqual(0, db.foo.count_documents({})) @@ -1605,7 +1690,8 @@ def test_rename(self): x += 1 db.test.insert_one({}) - self.assertRaises(OperationFailure, db.foo.rename, "test") + with self.assertRaises(OperationFailure): + db.foo.rename("test") db.foo.rename("test", dropTarget=True) with self.write_concern_collection() as coll: @@ -1616,29 +1702,29 @@ def test_find_one(self): db = self.db db.drop_collection("test") - _id = db.test.insert_one({"hello": "world", "foo": "bar"}).inserted_id + _id = (db.test.insert_one({"hello": "world", "foo": "bar"})).inserted_id - self.assertEqual("world", db.test.find_one()["hello"]) + self.assertEqual("world", (db.test.find_one())["hello"]) self.assertEqual(db.test.find_one(_id), db.test.find_one()) self.assertEqual(db.test.find_one(None), db.test.find_one()) self.assertEqual(db.test.find_one({}), db.test.find_one()) self.assertEqual(db.test.find_one({"hello": "world"}), db.test.find_one()) - self.assertTrue("hello" in db.test.find_one(projection=["hello"])) - self.assertTrue("hello" not in db.test.find_one(projection=["foo"])) + self.assertIn("hello", db.test.find_one(projection=["hello"])) + self.assertNotIn("hello", db.test.find_one(projection=["foo"])) - self.assertTrue("hello" in db.test.find_one(projection=("hello",))) - self.assertTrue("hello" not in db.test.find_one(projection=("foo",))) + self.assertIn("hello", db.test.find_one(projection=("hello",))) + self.assertNotIn("hello", db.test.find_one(projection=("foo",))) - self.assertTrue("hello" in db.test.find_one(projection=set(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=set(["foo"]))) + self.assertIn("hello", db.test.find_one(projection={"hello"})) + self.assertNotIn("hello", db.test.find_one(projection={"foo"})) - self.assertTrue("hello" in db.test.find_one(projection=frozenset(["hello"]))) - self.assertTrue("hello" not in db.test.find_one(projection=frozenset(["foo"]))) + self.assertIn("hello", db.test.find_one(projection=frozenset(["hello"]))) + self.assertNotIn("hello", db.test.find_one(projection=frozenset(["foo"]))) self.assertEqual(["_id"], list(db.test.find_one(projection={"_id": True}))) - self.assertTrue("hello" in list(db.test.find_one(projection={}))) - self.assertTrue("hello" in list(db.test.find_one(projection=[]))) + self.assertIn("hello", list(db.test.find_one(projection={}))) + self.assertIn("hello", list(db.test.find_one(projection=[]))) self.assertEqual(None, db.test.find_one({"hello": "foo"})) self.assertEqual(None, db.test.find_one(ObjectId())) @@ -1658,8 +1744,8 @@ def test_find_one_with_find_args(self): db.test.insert_many([{"x": i} for i in range(1, 4)]) - self.assertEqual(1, db.test.find_one()["x"]) - self.assertEqual(2, db.test.find_one(skip=1, limit=2)["x"]) + self.assertEqual(1, (db.test.find_one())["x"]) + self.assertEqual(2, (db.test.find_one(skip=1, limit=2))["x"]) def test_find_with_sort(self): db = self.db @@ -1667,9 +1753,9 @@ def test_find_with_sort(self): db.test.insert_many([{"x": 2}, {"x": 1}, {"x": 3}]) - self.assertEqual(2, db.test.find_one()["x"]) - self.assertEqual(1, db.test.find_one(sort=[("x", 1)])["x"]) - self.assertEqual(3, db.test.find_one(sort=[("x", -1)])["x"]) + self.assertEqual(2, (db.test.find_one())["x"]) + self.assertEqual(1, (db.test.find_one(sort=[("x", 1)]))["x"]) + self.assertEqual(3, (db.test.find_one(sort=[("x", -1)]))["x"]) def to_list(things): return [thing["x"] for thing in things] @@ -1678,52 +1764,58 @@ def to_list(things): self.assertEqual([1, 2, 3], to_list(db.test.find(sort=[("x", 1)]))) self.assertEqual([3, 2, 1], to_list(db.test.find(sort=[("x", -1)]))) - self.assertRaises(TypeError, db.test.find, sort=5) - self.assertRaises(TypeError, db.test.find, sort="hello") - self.assertRaises(ValueError, db.test.find, sort=["hello", 1]) + with self.assertRaises(TypeError): + db.test.find(sort=5) + with self.assertRaises(TypeError): + db.test.find(sort="hello") + with self.assertRaises(TypeError): + db.test.find(sort=["hello", 1]) # TODO doesn't actually test functionality, just that it doesn't blow up def test_cursor_timeout(self): - list(self.db.test.find(no_cursor_timeout=True)) - list(self.db.test.find(no_cursor_timeout=False)) + self.db.test.find(no_cursor_timeout=True).to_list() + self.db.test.find(no_cursor_timeout=False).to_list() def test_exhaust(self): if is_mongos(self.db.client): - self.assertRaises(InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST) + with self.assertRaises(InvalidOperation): + next(self.db.test.find(cursor_type=CursorType.EXHAUST)) return # Limit is incompatible with exhaust. - self.assertRaises( - InvalidOperation, self.db.test.find, cursor_type=CursorType.EXHAUST, limit=5 - ) + with self.assertRaises(InvalidOperation): + next(self.db.test.find(cursor_type=CursorType.EXHAUST, limit=5)) cur = self.db.test.find(cursor_type=CursorType.EXHAUST) - self.assertRaises(InvalidOperation, cur.limit, 5) + with self.assertRaises(InvalidOperation): + cur.limit(5) + cur.next() cur = self.db.test.find(limit=5) - self.assertRaises(InvalidOperation, cur.add_option, 64) + with self.assertRaises(InvalidOperation): + cur.add_option(64) cur = self.db.test.find() cur.add_option(64) - self.assertRaises(InvalidOperation, cur.limit, 5) + with self.assertRaises(InvalidOperation): + cur.limit(5) self.db.drop_collection("test") # Insert enough documents to require more than one batch self.db.test.insert_many([{"i": i} for i in range(150)]) - client = rs_or_single_client(maxPoolSize=1) - self.addCleanup(client.close) + client = self.rs_or_single_client(maxPoolSize=1) pool = get_pool(client) # Make sure the socket is returned after exhaustion. cur = client[self.db.name].test.find(cursor_type=CursorType.EXHAUST) next(cur) - self.assertEqual(0, len(pool.sockets)) + self.assertEqual(0, len(pool.conns)) for _ in cur: pass - self.assertEqual(1, len(pool.sockets)) + self.assertEqual(1, len(pool.conns)) # Same as previous but don't call next() for _ in client[self.db.name].test.find(cursor_type=CursorType.EXHAUST): pass - self.assertEqual(1, len(pool.sockets)) + self.assertEqual(1, len(pool.conns)) # If the Cursor instance is discarded before being completely iterated # and the socket has pending data (more_to_come=True) we have to close @@ -1736,15 +1828,15 @@ def test_exhaust(self): next(cur) else: next(cur) - self.assertEqual(0, len(pool.sockets)) - if sys.platform.startswith("java") or "PyPy" in sys.version: - # Don't wait for GC or use gc.collect(), it's unreliable. - cur.close() + self.assertEqual(0, len(pool.conns)) + # if sys.platform.startswith("java") or "PyPy" in sys.version: + # # Don't wait for GC or use gc.collect(), it's unreliable. + cur.close() cur = None # Wait until the background thread returns the socket. wait_until(lambda: pool.active_sockets == 0, "return socket") # The socket should be discarded. - self.assertEqual(0, len(pool.sockets)) + self.assertEqual(0, len(pool.conns)) def test_distinct(self): self.db.drop_collection("test") @@ -1783,7 +1875,7 @@ def test_query_on_query_field(self): self.db.test.insert_one({"bar": "foo"}) self.assertEqual(1, self.db.test.count_documents({"query": {"$ne": None}})) - self.assertEqual(1, len(list(self.db.test.find({"query": {"$ne": None}})))) + self.assertEqual(1, len(self.db.test.find({"query": {"$ne": None}}).to_list())) def test_min_query(self): self.db.drop_collection("test") @@ -1792,7 +1884,7 @@ def test_min_query(self): cursor = self.db.test.find({"$min": {"x": 2}, "$query": {}}, hint="x_1") - docs = list(cursor) + docs = cursor.to_list() self.assertEqual(1, len(docs)) self.assertEqual(2, docs[0]["x"]) @@ -1842,9 +1934,11 @@ def test_insert_many_large_batch(self): # 2 batches, 2nd insert fails, unacknowledged, ordered. unack_coll = db.collection_2.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_second_fails) - wait_until( - lambda: 1 == db.collection_2.count_documents({}), "insert 1 document", timeout=60 - ) + + def async_lambda(): + return db.collection_2.count_documents({}) == 1 + + wait_until(async_lambda, "insert 1 document", timeout=60) db.collection_2.drop() @@ -1871,10 +1965,11 @@ def test_insert_many_large_batch(self): unack_coll = db.collection_4.with_options(write_concern=WriteConcern(w=0)) unack_coll.insert_many(insert_two_failures, ordered=False) + def async_lambda(): + return db.collection_4.count_documents({}) == 2 + # Only the first and third documents are inserted. - wait_until( - lambda: 2 == db.collection_4.count_documents({}), "insert 2 documents", timeout=60 - ) + wait_until(async_lambda, "insert 2 documents", timeout=60) db.collection_4.drop() @@ -1885,7 +1980,7 @@ def test_messages_with_unicode_collection_names(self): db["Employés"].replace_one({"x": 1}, {"x": 2}) db["Employés"].delete_many({}) db["Employés"].find_one() - list(db["Employés"].find()) + db["Employés"].find().to_list() def test_drop_indexes_non_existent(self): self.db.drop_collection("test") @@ -1896,7 +1991,8 @@ def test_drop_indexes_non_existent(self): def test_bad_encode(self): c = self.db.test c.drop() - self.assertRaises(InvalidDocument, c.insert_one, {"x": c}) + with self.assertRaises(InvalidDocument): + c.insert_one({"x": c}) class BadGetAttr(dict): def __getattr__(self, name): @@ -1904,7 +2000,7 @@ def __getattr__(self, name): bad = BadGetAttr([("foo", "bar")]) c.insert_one({"bad": bad}) - self.assertEqual("bar", c.find_one()["bad"]["foo"]) # type: ignore + self.assertEqual("bar", (c.find_one())["bad"]["foo"]) # type: ignore def test_array_filters_validation(self): # array_filters must be a list. @@ -1977,67 +2073,58 @@ def test_find_one_and(self): c.insert_one({"j": j, "i": 0}) sort = [("j", DESCENDING)] - self.assertEqual(4, c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort)["j"]) + self.assertEqual(4, (c.find_one_and_update({}, {"$inc": {"i": 1}}, sort=sort))["j"]) def test_find_one_and_write_concern(self): - listener = EventListener() - db = single_client(event_listeners=[listener])[self.db.name] + listener = OvertCommandListener() + db = (self.single_client(event_listeners=[listener]))[self.db.name] # non-default WriteConcern. c_w0 = db.get_collection("test", write_concern=WriteConcern(w=0)) # default WriteConcern. c_default = db.get_collection("test", write_concern=WriteConcern()) - results = listener.results # Authenticate the client and throw out auth commands from the listener. db.command("ping") - results.clear() + listener.reset() c_w0.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() c_w0.find_one_and_delete({"_id": 1}) - self.assertEqual({"w": 0}, results["started"][0].command["writeConcern"]) - results.clear() + self.assertEqual({"w": 0}, listener.started_events[0].command["writeConcern"]) + listener.reset() # Test write concern errors. if client_context.is_rs: c_wc_error = db.get_collection( "test", write_concern=WriteConcern(w=len(client_context.nodes) + 1) ) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_update, - {"_id": 1}, - {"$set": {"foo": "bar"}}, - ) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_replace, - {"w": 0}, - results["started"][0].command["writeConcern"], - ) - self.assertRaises( - WriteConcernError, - c_wc_error.find_one_and_delete, - {"w": 0}, - results["started"][0].command["writeConcern"], - ) - results.clear() + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_replace( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + with self.assertRaises(WriteConcernError): + c_wc_error.find_one_and_delete( + {"w": 0}, listener.started_events[0].command["writeConcern"] + ) + listener.reset() c_default.find_one_and_update({"_id": 1}, {"$set": {"foo": "bar"}}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_replace({"_id": 1}, {"foo": "bar"}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() c_default.find_one_and_delete({"_id": 1}) - self.assertNotIn("writeConcern", results["started"][0].command) - results.clear() + self.assertNotIn("writeConcern", listener.started_events[0].command) + listener.reset() def test_find_with_nested(self): c = self.db.test @@ -2102,9 +2189,9 @@ def test_find_regex(self): c.drop() c.insert_one({"r": re.compile(".*")}) - self.assertTrue(isinstance(c.find_one()["r"], Regex)) # type: ignore + self.assertIsInstance((c.find_one())["r"], Regex) # type: ignore for doc in c.find(): - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) def test_find_command_generation(self): cmd = _gen_find_command( @@ -2119,9 +2206,7 @@ def test_find_command_generation(self): None, None, ) - self.assertEqual( - cmd.to_dict(), SON([("find", "coll"), ("$dumb", 2), ("filter", {"foo": 1})]).to_dict() - ) + self.assertEqual(cmd, {"find": "coll", "$dumb": 2, "filter": {"foo": 1}}) def test_bool(self): with self.assertRaises(NotImplementedError): @@ -2130,10 +2215,14 @@ def test_bool(self): @client_context.require_version_min(5, 0, 0) def test_helpers_with_let(self): c = self.db.test + + def afind(*args, **kwargs): + return c.find(*args, **kwargs) + helpers = [ (c.delete_many, ({}, {})), (c.delete_one, ({}, {})), - (c.find, ({})), + (afind, ({})), (c.update_many, ({}, {"$inc": {"x": 3}})), (c.update_one, ({}, {"$inc": {"x": 3}})), (c.find_one_and_delete, ({}, {})), diff --git a/test/test_collection_management.py b/test/test_collection_management.py index c5e29eda8a..063c20df8f 100644 --- a/test/test_collection_management.py +++ b/test/test_collection_management.py @@ -13,8 +13,10 @@ # limitations under the License. """Test the collection management unified spec tests.""" +from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -22,11 +24,18 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "collection_management") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "collection_management") +else: + _TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "collection_management" + ) # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/test_command_logging.py b/test/test_command_logging.py new file mode 100644 index 0000000000..cf865920ca --- /dev/null +++ b/test/test_command_logging.py @@ -0,0 +1,44 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_monitoring.py b/test/test_command_monitoring.py new file mode 100644 index 0000000000..4f5ef06f28 --- /dev/null +++ b/test/test_command_monitoring.py @@ -0,0 +1,45 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the command monitoring unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "command_monitoring") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "command_monitoring") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_command_monitoring_legacy.py b/test/test_command_monitoring_legacy.py deleted file mode 100644 index 5d9f2fe3ee..0000000000 --- a/test/test_command_monitoring_legacy.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run the command monitoring legacy-format spec tests.""" - -import os -import re -import sys - -sys.path[0:0] = [""] - -from test import client_context, unittest -from test.utils import EventListener, parse_read_preference, rs_or_single_client - -import pymongo -from bson import json_util -from pymongo import MongoClient -from pymongo.errors import OperationFailure -from pymongo.write_concern import WriteConcern - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -class TestAllScenarios(unittest.TestCase): - listener: EventListener - client: MongoClient - - @classmethod - @client_context.require_connection - def setUpClass(cls): - cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def tearDownClass(cls): - cls.client.close() - - def tearDown(self): - self.listener.results.clear() - - -def format_actual_results(results): - started = results["started"] - succeeded = results["succeeded"] - failed = results["failed"] - msg = "\nStarted: %r" % (started[0].command if len(started) else None,) - msg += "\nSucceeded: %r" % (succeeded[0].reply if len(succeeded) else None,) - msg += "\nFailed: %r" % (failed[0].failure if len(failed) else None,) - return msg - - -def create_test(scenario_def, test): - def run_scenario(self): - dbname = scenario_def["database_name"] - collname = scenario_def["collection_name"] - - coll = self.client[dbname][collname] - coll.drop() - coll.insert_many(scenario_def["data"]) - self.listener.results.clear() - name = camel_to_snake(test["operation"]["name"]) - if "read_preference" in test["operation"]: - coll = coll.with_options( - read_preference=parse_read_preference(test["operation"]["read_preference"]) - ) - if "collectionOptions" in test["operation"]: - colloptions = test["operation"]["collectionOptions"] - if "writeConcern" in colloptions: - concern = colloptions["writeConcern"] - coll = coll.with_options(write_concern=WriteConcern(**concern)) - - test_args = test["operation"]["arguments"] - if "options" in test_args: - options = test_args.pop("options") - test_args.update(options) - args = {} - for arg in test_args: - args[camel_to_snake(arg)] = test_args[arg] - - if name == "count": - self.skipTest("PyMongo does not support count") - elif name == "bulk_write": - bulk_args = [] - for request in args["requests"]: - opname = request["name"] - klass = opname[0:1].upper() + opname[1:] - arg = getattr(pymongo, klass)(**request["arguments"]) - bulk_args.append(arg) - try: - coll.bulk_write(bulk_args, args.get("ordered", True)) - except OperationFailure: - pass - elif name == "find": - if "sort" in args: - args["sort"] = list(args["sort"].items()) - if "hint" in args: - args["hint"] = list(args["hint"].items()) - for arg in "skip", "limit": - if arg in args: - args[arg] = int(args[arg]) - try: - # Iterate the cursor. - tuple(coll.find(**args)) - except OperationFailure: - pass - else: - try: - getattr(coll, name)(**args) - except OperationFailure: - pass - - res = self.listener.results - for expectation in test["expectations"]: - event_type = next(iter(expectation)) - if event_type == "command_started_event": - event = res["started"][0] if len(res["started"]) else None - if event is not None: - # The tests substitute 42 for any number other than 0. - if event.command_name == "getMore" and event.command["getMore"]: - event.command["getMore"] = 42 - elif event.command_name == "killCursors": - event.command["cursors"] = [42] - elif event.command_name == "update": - # TODO: remove this once PYTHON-1744 is done. - # Add upsert and multi fields back into - # expectations. - updates = expectation[event_type]["command"]["updates"] - for update in updates: - update.setdefault("upsert", False) - update.setdefault("multi", False) - elif event_type == "command_succeeded_event": - event = res["succeeded"].pop(0) if len(res["succeeded"]) else None - if event is not None: - reply = event.reply - # The tests substitute 42 for any number other than 0, - # and "" for any error message. - if "writeErrors" in reply: - for doc in reply["writeErrors"]: - # Remove any new fields the server adds. The tests - # only have index, code, and errmsg. - diff = set(doc) - set(["index", "code", "errmsg"]) - for field in diff: - doc.pop(field) - doc["code"] = 42 - doc["errmsg"] = "" - elif "cursor" in reply: - if reply["cursor"]["id"]: - reply["cursor"]["id"] = 42 - elif event.command_name == "killCursors": - # Make the tests continue to pass when the killCursors - # command is actually in use. - if "cursorsKilled" in reply: - reply.pop("cursorsKilled") - reply["cursorsUnknown"] = [42] - # Found succeeded event. Pop related started event. - res["started"].pop(0) - elif event_type == "command_failed_event": - event = res["failed"].pop(0) if len(res["failed"]) else None - if event is not None: - # Found failed event. Pop related started event. - res["started"].pop(0) - else: - self.fail("Unknown event type") - - if event is None: - event_name = event_type.split("_")[1] - self.fail( - "Expected %s event for %s command. Actual " - "results:%s" - % ( - event_name, - expectation[event_type]["command_name"], - format_actual_results(res), - ) - ) - - for attr, expected in expectation[event_type].items(): - if "options" in expected: - options = expected.pop("options") - expected.update(options) - actual = getattr(event, attr) - if isinstance(expected, dict): - for key, val in expected.items(): - self.assertEqual(val, actual[key]) - else: - self.assertEqual(actual, expected) - - return run_scenario - - -def create_tests(): - for dirpath, _, filenames in os.walk(os.path.join(_TEST_PATH, "legacy")): - dirname = os.path.split(dirpath)[-1] - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - scenario_def = json_util.loads(scenario_stream.read()) - assert bool(scenario_def.get("tests")), "tests cannot be empty" - # Construct test from scenario. - for test in scenario_def["tests"]: - new_test = create_test(scenario_def, test) - if "ignore_if_server_version_greater_than" in test: - version = test["ignore_if_server_version_greater_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_max(*ver)(new_test) - if "ignore_if_server_version_less_than" in test: - version = test["ignore_if_server_version_less_than"] - ver = tuple(int(elt) for elt in version.split(".")) - new_test = client_context.require_version_min(*ver)(new_test) - if "ignore_if_topology_type" in test: - types = set(test["ignore_if_topology_type"]) - if "sharded" in types: - new_test = client_context.require_no_mongos(None)(new_test) - - test_name = "test_%s_%s_%s" % ( - dirname, - os.path.splitext(filename)[0], - str(test["description"].replace(" ", "_")), - ) - new_test.__name__ = test_name - setattr(TestAllScenarios, new_test.__name__, new_test) - - -create_tests() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_command_monitoring_unified.py b/test/test_command_monitoring_unified.py deleted file mode 100644 index 46e1e4724c..0000000000 --- a/test/test_command_monitoring_unified.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2015-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run the command monitoring unified format spec tests.""" - -import os -import sys - -sys.path[0:0] = [""] - -from test import unittest -from test.unified_format import generate_test_classes - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "command_monitoring") - - -globals().update( - generate_test_classes( - os.path.join(_TEST_PATH, "unified"), - module=__name__, - ) -) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_comment.py b/test/test_comment.py index c83428fd70..bcab0061fa 100644 --- a/test/test_comment.py +++ b/test/test_comment.py @@ -14,55 +14,40 @@ """Test the keyword argument 'comment' in various helpers.""" +from __future__ import annotations + import inspect import sys -from typing import Any, Union sys.path[0:0] = [""] - +from inspect import iscoroutinefunction from test import IntegrationTest, client_context, unittest -from test.utils import EventListener, rs_or_single_client +from test.utils_shared import OvertCommandListener from bson.dbref import DBRef -from pymongo.command_cursor import CommandCursor from pymongo.operations import IndexModel +from pymongo.synchronous.command_cursor import CommandCursor - -class Empty(object): - def __getattr__(self, item): - try: - self.__dict__[item] - except KeyError: - return self.empty - - def empty(self, *args, **kwargs): - return Empty() +_IS_SYNC = True class TestComment(IntegrationTest): def _test_ops( - self, helpers, already_supported, listener, db=Empty(), coll=Empty() # noqa: B008 + self, + helpers, + already_supported, + listener, ): - results = listener.results for h, args in helpers: c = "testing comment with " + h.__name__ with self.subTest("collection-" + h.__name__ + "-comment"): for cc in [c, {"key": c}, ["any", 1]]: - results.clear() + listener.reset() kwargs = {"comment": cc} - if h == coll.rename: - _ = db.get_collection("temp_temp_temp").drop() - destruct_coll = db.get_collection("test_temp") - destruct_coll.insert_one({}) - maybe_cursor = destruct_coll.rename(*args, **kwargs) - destruct_coll.drop() - elif h == db.validate_collection: - coll = db.get_collection("test") - coll.insert_one({}) - maybe_cursor = db.validate_collection(*args, **kwargs) - else: - coll.create_index("a") + try: maybe_cursor = h(*args, **kwargs) + except Exception: + maybe_cursor = None self.assertIn( "comment", inspect.signature(h).parameters, @@ -70,21 +55,17 @@ def _test_ops( "signature of function %s" % (h.__name__), ) self.assertEqual( - inspect.signature(h).parameters["comment"].annotation, Union[Any, None] + inspect.signature(h).parameters["comment"].annotation, "Optional[Any]" ) if isinstance(maybe_cursor, CommandCursor): maybe_cursor.close() - tested = False - # For some reason collection.list_indexes creates two commands and the first - # one doesn't contain 'comment'. - for i in results["started"]: - if cc == i.command.get("comment", ""): - self.assertEqual(cc, i.command["comment"]) - tested = True - self.assertTrue(tested) - if h not in [coll.aggregate_raw_batches]: + + cmd = listener.started_events[0] + self.assertEqual(cc, cmd.command.get("comment"), msg=cmd) + + if h.__name__ != "aggregate_raw_batches": self.assertIn( - "`comment` (optional):", + ":param comment:", h.__doc__, ) if h not in already_supported: @@ -98,13 +79,13 @@ def _test_ops( h.__doc__, ) - results.clear() + listener.reset() @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set def test_database_helpers(self): - listener = EventListener() - db = rs_or_single_client(event_listeners=[listener]).db + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener])).db helpers = [ (db.watch, []), (db.command, ["hello"]), @@ -115,13 +96,13 @@ def test_database_helpers(self): (db.dereference, [DBRef("collection", 1)]), ] already_supported = [db.command, db.list_collections, db.list_collection_names] - self._test_ops(helpers, already_supported, listener, db=db, coll=db.get_collection("test")) + self._test_ops(helpers, already_supported, listener) @client_context.require_version_min(4, 7, -1) @client_context.require_replica_set def test_client_helpers(self): - listener = EventListener() - cli = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + cli = self.rs_or_single_client(event_listeners=[listener]) helpers = [ (cli.watch, []), (cli.list_databases, []), @@ -135,8 +116,8 @@ def test_client_helpers(self): @client_context.require_version_min(4, 7, -1) def test_collection_helpers(self): - listener = EventListener() - db = rs_or_single_client(event_listeners=[listener])[self.db.name] + listener = OvertCommandListener() + db = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name] coll = db.get_collection("test") helpers = [ @@ -171,7 +152,7 @@ def test_collection_helpers(self): coll.find_one_and_delete, coll.find_one_and_update, ] - self._test_ops(helpers, already_supported, listener, coll=coll, db=db) + self._test_ops(helpers, already_supported, listener) if __name__ == "__main__": diff --git a/test/test_common.py b/test/test_common.py index ff50878ea1..e69b421c9f 100644 --- a/test/test_common.py +++ b/test/test_common.py @@ -13,14 +13,14 @@ # limitations under the License. """Test the pymongo common module.""" +from __future__ import annotations import sys import uuid sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest -from test.utils import connected, rs_or_single_client, single_client +from test import IntegrationTest, client_context, connected, unittest from bson.binary import PYTHON_LEGACY, STANDARD, Binary, UuidRepresentation from bson.codec_options import CodecOptions @@ -28,10 +28,7 @@ from pymongo.errors import OperationFailure from pymongo.write_concern import WriteConcern - -@client_context.require_connection -def setUpModule(): - pass +_IS_SYNC = True class TestCommon(IntegrationTest): @@ -48,12 +45,12 @@ def test_uuid_representation(self): coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) legacy_opts = coll.codec_options coll.insert_one({"uu": uu}) - self.assertEqual(uu, coll.find_one({"uu": uu})["uu"]) # type: ignore + self.assertEqual(uu, (coll.find_one({"uu": uu}))["uu"]) # type: ignore coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) self.assertEqual(STANDARD, coll.codec_options.uuid_representation) self.assertEqual(None, coll.find_one({"uu": uu})) uul = Binary.from_uuid(uu, PYTHON_LEGACY) - self.assertEqual(uul, coll.find_one({"uu": uul})["uu"]) # type: ignore + self.assertEqual(uul, (coll.find_one({"uu": uul}))["uu"]) # type: ignore # Test count_documents self.assertEqual(0, coll.count_documents({"uu": uu})) @@ -73,9 +70,9 @@ def test_uuid_representation(self): coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=STANDARD)) coll.update_one({"_id": uu}, {"$set": {"i": 2}}) coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(1, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(1, (coll.find_one({"_id": uu}))["i"]) # type: ignore coll.update_one({"_id": uu}, {"$set": {"i": 2}}) - self.assertEqual(2, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(2, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test Cursor.distinct self.assertEqual([2], coll.find({"_id": uu}).distinct("i")) @@ -85,35 +82,39 @@ def test_uuid_representation(self): # Test findAndModify self.assertEqual(None, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})) coll = self.db.get_collection("uuid", CodecOptions(uuid_representation=PYTHON_LEGACY)) - self.assertEqual(2, coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}})["i"]) - self.assertEqual(5, coll.find_one({"_id": uu})["i"]) # type: ignore + self.assertEqual(2, (coll.find_one_and_update({"_id": uu}, {"$set": {"i": 5}}))["i"]) + self.assertEqual(5, (coll.find_one({"_id": uu}))["i"]) # type: ignore # Test command self.assertEqual( 5, - self.db.command( - "findAndModify", - "uuid", - update={"$set": {"i": 6}}, - query={"_id": uu}, - codec_options=legacy_opts, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 6}}, + query={"_id": uu}, + codec_options=legacy_opts, + ) )["value"]["i"], ) self.assertEqual( 6, - self.db.command( - "findAndModify", - "uuid", - update={"$set": {"i": 7}}, - query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ( + self.db.command( + "findAndModify", + "uuid", + update={"$set": {"i": 7}}, + query={"_id": Binary.from_uuid(uu, PYTHON_LEGACY)}, + ) )["value"]["i"], ) def test_write_concern(self): - c = rs_or_single_client(connect=False) + c = self.rs_or_single_client(connect=False) self.assertEqual(WriteConcern(), c.write_concern) - c = rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) + c = self.rs_or_single_client(connect=False, w=2, wTimeoutMS=1000) wc = WriteConcern(w=2, wtimeout=1000) self.assertEqual(wc, c.write_concern) @@ -133,40 +134,50 @@ def test_write_concern(self): def test_mongo_client(self): pair = client_context.pair - m = rs_or_single_client(w=0) + m = self.rs_or_single_client(w=0) coll = m.pymongo_test.write_concern_test coll.drop() doc = {"_id": ObjectId()} coll.insert_one(doc) self.assertTrue(coll.insert_one(doc)) coll = coll.with_options(write_concern=WriteConcern(w=1)) - self.assertRaises(OperationFailure, coll.insert_one, doc) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) - m = rs_or_single_client() + m = self.rs_or_single_client() coll = m.pymongo_test.write_concern_test new_coll = coll.with_options(write_concern=WriteConcern(w=0)) self.assertTrue(new_coll.insert_one(doc)) - self.assertRaises(OperationFailure, coll.insert_one, doc) + with self.assertRaises(OperationFailure): + coll.insert_one(doc) - m = rs_or_single_client( - "mongodb://%s/" % (pair,), replicaSet=client_context.replica_set_name + m = self.rs_or_single_client( + f"mongodb://{pair}/", replicaSet=client_context.replica_set_name ) coll = m.pymongo_test.write_concern_test - self.assertRaises(OperationFailure, coll.insert_one, doc) - m = rs_or_single_client( - "mongodb://%s/?w=0" % (pair,), replicaSet=client_context.replica_set_name + with self.assertRaises(OperationFailure): + coll.insert_one(doc) + m = self.rs_or_single_client( + f"mongodb://{pair}/?w=0", replicaSet=client_context.replica_set_name ) coll = m.pymongo_test.write_concern_test coll.insert_one(doc) # Equality tests - direct = connected(single_client(w=0)) - direct2 = connected(single_client("mongodb://%s/?w=0" % (pair,), **self.credentials)) + direct = connected(self.single_client(w=0)) + direct2 = connected(self.single_client(f"mongodb://{pair}/?w=0", **self.credentials)) self.assertEqual(direct, direct2) self.assertFalse(direct != direct2) + def test_validate_boolean(self): + self.db.test.update_one({}, {"$set": {"total": 1}}, upsert=True) + with self.assertRaisesRegex( + TypeError, "upsert must be True or False, was: upsert={'upsert': True}" + ): + self.db.test.update_one({}, {"$set": {"total": 1}}, {"upsert": True}) # type: ignore + if __name__ == "__main__": unittest.main() diff --git a/test/test_connection_logging.py b/test/test_connection_logging.py new file mode 100644 index 0000000000..253193cc43 --- /dev/null +++ b/test/test_connection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the connection logging unified format spec tests.""" +from __future__ import annotations + +import os +import pathlib +import sys + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "connection_logging") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "connection_logging") + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connection_monitoring.py b/test/test_connection_monitoring.py new file mode 100644 index 0000000000..1405824453 --- /dev/null +++ b/test/test_connection_monitoring.py @@ -0,0 +1,470 @@ +# Copyright 2019-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Execute Transactions Spec tests.""" +from __future__ import annotations + +import asyncio +import os +import sys +import time +from pathlib import Path +from test.utils import get_pool, get_pools + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, client_knobs, unittest +from test.pymongo_mocks import DummyMonitor +from test.utils_shared import ( + CMAPListener, + camel_to_snake, + wait_until, +) +from test.utils_spec_runner import SpecRunnerThread, SpecTestCreator + +from bson.objectid import ObjectId +from bson.son import SON +from pymongo.errors import ( + ConnectionFailure, + OperationFailure, + PyMongoError, + WaitQueueTimeoutError, +) +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.pool import PoolState, _PoolClosedError +from pymongo.topology_description import updated_topology_description + +_IS_SYNC = True + +OBJECT_TYPES = { + # Event types. + "ConnectionCheckedIn": ConnectionCheckedInEvent, + "ConnectionCheckedOut": ConnectionCheckedOutEvent, + "ConnectionCheckOutFailed": ConnectionCheckOutFailedEvent, + "ConnectionClosed": ConnectionClosedEvent, + "ConnectionCreated": ConnectionCreatedEvent, + "ConnectionReady": ConnectionReadyEvent, + "ConnectionCheckOutStarted": ConnectionCheckOutStartedEvent, + "ConnectionPoolCreated": PoolCreatedEvent, + "ConnectionPoolReady": PoolReadyEvent, + "ConnectionPoolCleared": PoolClearedEvent, + "ConnectionPoolClosed": PoolClosedEvent, + # Error types. + "PoolClosedError": _PoolClosedError, + "WaitQueueTimeoutError": WaitQueueTimeoutError, +} + + +class TestCMAP(IntegrationTest): + # Location of JSON test specifications. + if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "connection_monitoring") + else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "connection_monitoring") + + # Test operations: + + def start(self, op): + """Run the 'start' thread operation.""" + target = op["target"] + thread = SpecRunnerThread(target) + thread.start() + self.targets[target] = thread + + def wait(self, op): + """Run the 'wait' operation.""" + time.sleep(op["ms"] / 1000.0) + + def wait_for_thread(self, op): + """Run the 'waitForThread' operation.""" + target = op["target"] + thread = self.targets[target] + thread.stop() + thread.join() + if thread.exc: + raise thread.exc + self.assertFalse(thread.ops) + + def wait_for_event(self, op): + """Run the 'waitForEvent' operation.""" + event = OBJECT_TYPES[op["event"]] + count = op["count"] + timeout = op.get("timeout", 10000) / 1000.0 + wait_until( + lambda: self.listener.event_count(event) >= count, + f"find {count} {event} event(s)", + timeout=timeout, + ) + + def check_out(self, op): + """Run the 'checkOut' operation.""" + label = op["label"] + with self.pool.checkout() as conn: + # Call 'pin_cursor' so we can hold the socket. + conn.pin_cursor() + if label: + self.labels[label] = conn + else: + self.addCleanup(conn.close_conn, None) + + def check_in(self, op): + """Run the 'checkIn' operation.""" + label = op["connection"] + conn = self.labels[label] + self.pool.checkin(conn) + + def ready(self, op): + """Run the 'ready' operation.""" + self.pool.ready() + + def clear(self, op): + """Run the 'clear' operation.""" + if "interruptInUseConnections" in op: + self.pool.reset(interrupt_connections=op["interruptInUseConnections"]) + else: + self.pool.reset() + + def close(self, op): + """Run the 'close' operation.""" + self.pool.close() + + def run_operation(self, op): + """Run a single operation in a test.""" + op_name = camel_to_snake(op["name"]) + thread = op["thread"] + meth = getattr(self, op_name) + if thread: + self.targets[thread].schedule(lambda: meth(op)) + else: + meth(op) + + def run_operations(self, ops): + """Run a test's operations.""" + for op in ops: + self._ops.append(op) + self.run_operation(op) + + def check_object(self, actual, expected): + """Assert that the actual object matches the expected object.""" + self.assertEqual(type(actual), OBJECT_TYPES[expected["type"]]) + for attr, expected_val in expected.items(): + if attr == "type": + continue + c2s = camel_to_snake(attr) + if c2s == "interrupt_in_use_connections": + c2s = "interrupt_connections" + actual_val = getattr(actual, c2s) + if expected_val == 42: + self.assertIsNotNone(actual_val) + else: + self.assertEqual(actual_val, expected_val) + + def check_event(self, actual, expected): + """Assert that the actual event matches the expected event.""" + self.check_object(actual, expected) + + def actual_events(self, ignore): + """Return all the non-ignored events.""" + ignore = tuple(OBJECT_TYPES[name] for name in ignore) + return [event for event in self.listener.events if not isinstance(event, ignore)] + + def check_events(self, events, ignore): + """Check the events of a test.""" + actual_events = self.actual_events(ignore) + for actual, expected in zip(actual_events, events): + self.logs.append(f"Checking event actual: {actual!r} vs expected: {expected!r}") + self.check_event(actual, expected) + + if len(events) > len(actual_events): + self.fail(f"missing events: {events[len(actual_events) :]!r}") + + def check_error(self, actual, expected): + message = expected.pop("message") + self.check_object(actual, expected) + self.assertIn(message, str(actual)) + + def set_fail_point(self, command_args): + if not client_context.supports_failCommand_fail_point: + self.skipTest("failCommand fail point must be supported") + self.configure_fail_point(self.client, command_args) + + def run_scenario(self, scenario_def, test): + """Run a CMAP spec test.""" + self.logs: list = [] + self.assertEqual(scenario_def["version"], 1) + self.assertIn(scenario_def["style"], ["unit", "integration"]) + self.listener = CMAPListener() + self._ops: list = [] + + # Configure the fail point before creating the client. + if "failPoint" in test: + fp = test["failPoint"] + self.set_fail_point(fp) + self.addCleanup( + self.set_fail_point, {"configureFailPoint": fp["configureFailPoint"], "mode": "off"} + ) + + opts = test["poolOptions"].copy() + opts["event_listeners"] = [self.listener] + opts["_monitor_class"] = DummyMonitor + opts["connect"] = False + # Support backgroundThreadIntervalMS, default to 50ms. + interval = opts.pop("backgroundThreadIntervalMS", 50) + if interval < 0: + kill_cursor_frequency = 99999999 + else: + kill_cursor_frequency = interval / 1000.0 + with client_knobs(kill_cursor_frequency=kill_cursor_frequency, min_heartbeat_interval=0.05): + client = self.single_client(**opts) + # Update the SD to a known type because the DummyMonitor will not. + # Note we cannot simply call topology.on_change because that would + # internally call pool.ready() which introduces unexpected + # PoolReadyEvents. Instead, update the initial state before + # opening the Topology. + td = client_context.client._topology.description + sd = td.server_descriptions()[(client_context.host, client_context.port)] + client._topology._description = updated_topology_description( + client._topology._description, sd + ) + # When backgroundThreadIntervalMS is negative we do not start the + # background thread to ensure it never runs. + if interval < 0: + client._topology.open() + else: + client._get_topology() + self.pool = list(client._topology._servers.values())[0].pool + + # Map of target names to Thread objects. + self.targets: dict = {} + # Map of label names to Connection objects + self.labels: dict = {} + + def cleanup(): + for t in self.targets.values(): + t.stop() + for t in self.targets.values(): + t.join(5) + for conn in self.labels.values(): + conn.close_conn(None) + + self.addCleanup(cleanup) + + try: + if test["error"]: + with self.assertRaises(PyMongoError) as ctx: + self.run_operations(test["operations"]) + self.check_error(ctx.exception, test["error"]) + else: + self.run_operations(test["operations"]) + + self.check_events(test["events"], test["ignore"]) + except Exception: + # Print the events after a test failure. + print("\nFailed test: {!r}".format(test["description"])) + print("Operations:") + for op in self._ops: + print(op) + print("Threads:") + print(self.targets) + print("Connections:") + print(self.labels) + print("Events:") + for event in self.listener.events: + print(event) + print("Log:") + for log in self.logs: + print(log) + raise + + POOL_OPTIONS = { + "maxPoolSize": 50, + "minPoolSize": 1, + "maxIdleTimeMS": 10000, + "waitQueueTimeoutMS": 10000, + } + + # + # Prose tests. Numbers correspond to the prose test number in the spec. + # + def test_1_client_connection_pool_options(self): + client = self.rs_or_single_client(**self.POOL_OPTIONS) + pool_opts = (get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + def test_2_all_client_pools_have_same_options(self): + client = self.rs_or_single_client(**self.POOL_OPTIONS) + client.admin.command("ping") + # Discover at least one secondary. + if client_context.has_secondaries: + client.admin.command("ping", read_preference=ReadPreference.SECONDARY) + pools = get_pools(client) + pool_opts = pools[0].opts + + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + for pool in pools[1:]: + self.assertEqual(pool.opts, pool_opts) + + def test_3_uri_connection_pool_options(self): + opts = "&".join([f"{k}={v}" for k, v in self.POOL_OPTIONS.items()]) + uri = f"mongodb://{client_context.pair}/?{opts}" + client = self.rs_or_single_client(uri) + pool_opts = (get_pool(client)).opts + self.assertEqual(pool_opts.non_default_options, self.POOL_OPTIONS) + + def test_4_subscribe_to_events(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + self.assertEqual(listener.event_count(PoolCreatedEvent), 1) + + # Creates a new connection. + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 1) + self.assertEqual(listener.event_count(ConnectionCreatedEvent), 1) + self.assertEqual(listener.event_count(ConnectionReadyEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 1) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 1) + + # Uses the existing connection. + client.admin.command("ping") + self.assertEqual(listener.event_count(ConnectionCheckOutStartedEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedOutEvent), 2) + self.assertEqual(listener.event_count(ConnectionCheckedInEvent), 2) + + client.close() + self.assertEqual(listener.event_count(PoolClosedEvent), 1) + self.assertEqual(listener.event_count(ConnectionClosedEvent), 1) + + def test_5_check_out_fails_connection_error(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + pool = get_pool(client) + + def mock_connect(*args, **kwargs): + raise ConnectionFailure("connect failed") + + pool.connect = mock_connect + # Un-patch Pool.connect to break the cyclic reference. + self.addCleanup(delattr, pool, "connect") + + # Attempt to create a new connection. + with self.assertRaisesRegex(ConnectionFailure, "connect failed"): + client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCheckOutFailedEvent) + self.assertIsInstance(listener.events[4], PoolClearedEvent) + + failed_event = listener.events[3] + self.assertEqual(failed_event.reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + @client_context.require_no_fips + def test_5_check_out_fails_auth_error(self): + listener = CMAPListener() + client = self.single_client_noauth( + username="notauser", password="fail", event_listeners=[listener] + ) + + # Attempt to create a new connection. + with self.assertRaisesRegex(OperationFailure, "failed"): + client.admin.command("ping") + + self.assertIsInstance(listener.events[0], PoolCreatedEvent) + self.assertIsInstance(listener.events[1], PoolReadyEvent) + self.assertIsInstance(listener.events[2], ConnectionCheckOutStartedEvent) + self.assertIsInstance(listener.events[3], ConnectionCreatedEvent) + # Error happens here. + self.assertIsInstance(listener.events[4], ConnectionClosedEvent) + self.assertIsInstance(listener.events[5], ConnectionCheckOutFailedEvent) + self.assertEqual(listener.events[5].reason, ConnectionCheckOutFailedReason.CONN_ERROR) + + # + # Extra non-spec tests + # + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_events_repr(self): + host = ("localhost", 27017) + self.assertRepr(ConnectionCheckedInEvent(host, 1)) + self.assertRepr(ConnectionCheckedOutEvent(host, 1, time.monotonic())) + self.assertRepr( + ConnectionCheckOutFailedEvent( + host, ConnectionCheckOutFailedReason.POOL_CLOSED, time.monotonic() + ) + ) + self.assertRepr(ConnectionClosedEvent(host, 1, ConnectionClosedReason.POOL_CLOSED)) + self.assertRepr(ConnectionCreatedEvent(host, 1)) + self.assertRepr(ConnectionReadyEvent(host, 1, time.monotonic())) + self.assertRepr(ConnectionCheckOutStartedEvent(host)) + self.assertRepr(PoolCreatedEvent(host, {})) + self.assertRepr(PoolClearedEvent(host)) + self.assertRepr(PoolClearedEvent(host, service_id=ObjectId())) + self.assertRepr(PoolClosedEvent(host)) + + def test_close_leaves_pool_unpaused(self): + listener = CMAPListener() + client = self.single_client(event_listeners=[listener]) + client.admin.command("ping") + pool = get_pool(client) + client.close() + self.assertEqual(1, listener.event_count(PoolClosedEvent)) + self.assertEqual(PoolState.CLOSED, pool.state) + # Checking out a connection should fail + with self.assertRaises(_PoolClosedError): + with pool.checkout(): + pass + + +def create_test(scenario_def, test, name): + def run_scenario(self): + self.run_scenario(scenario_def, test) + + return run_scenario + + +class CMAPSpecTestCreator(SpecTestCreator): + def tests(self, scenario_def): + """Extract the tests from a spec file. + + CMAP tests do not have a 'tests' field. The whole file represents + a single test case. + """ + return [scenario_def] + + +test_creator = CMAPSpecTestCreator(create_test, TestCMAP, TestCMAP.TEST_PATH) +test_creator.create_tests() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_connections_survive_primary_stepdown_spec.py b/test/test_connections_survive_primary_stepdown_spec.py index fd9f126551..8e9a3b8e62 100644 --- a/test/test_connections_survive_primary_stepdown_spec.py +++ b/test/test_connections_survive_primary_stepdown_spec.py @@ -13,53 +13,50 @@ # limitations under the License. """Test compliance with the connections survive primary step down spec.""" +from __future__ import annotations import sys +from test.utils import ensure_all_connected sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test import ( + IntegrationTest, + client_context, + unittest, +) +from test.helpers import repl_set_step_down +from test.utils_shared import ( CMAPListener, - ensure_all_connected, - repl_set_step_down, - rs_or_single_client, ) from bson import SON from pymongo import monitoring -from pymongo.collection import Collection from pymongo.errors import NotPrimaryError +from pymongo.synchronous.collection import Collection from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestConnectionsSurvivePrimaryStepDown(IntegrationTest): listener: CMAPListener coll: Collection - @classmethod @client_context.require_replica_set - def setUpClass(cls): - super(TestConnectionsSurvivePrimaryStepDown, cls).setUpClass() - cls.listener = CMAPListener() - cls.client = rs_or_single_client( - event_listeners=[cls.listener], retryWrites=False, heartbeatFrequencyMS=500 + def setUp(self): + super().setUp() + self.listener = CMAPListener() + self.client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=False, heartbeatFrequencyMS=500 ) # Ensure connections to all servers in replica set. This is to test - # that the is_writable flag is properly updated for sockets that + # that the is_writable flag is properly updated for connections that # survive a replica set election. - ensure_all_connected(cls.client) - cls.listener.reset() - - cls.db = cls.client.get_database("step-down", write_concern=WriteConcern("majority")) - cls.coll = cls.db.get_collection("step-down", write_concern=WriteConcern("majority")) - - @classmethod - def tearDownClass(cls): - cls.client.close() - - def setUp(self): + ensure_all_connected(self.client) + self.db = self.client.get_database("step-down", write_concern=WriteConcern("majority")) + self.coll = self.db.get_collection("step-down", write_concern=WriteConcern("majority")) # Note that all ops use same write-concern as self.db (majority). self.db.drop_collection("step-down") self.db.create_collection("step-down") @@ -111,7 +108,7 @@ def run_scenario(self, error_code, retry, pool_status_checker): # Insert record and verify failure. with self.assertRaises(NotPrimaryError) as exc: self.coll.insert_one({"test": 1}) - self.assertEqual(exc.exception.details["code"], error_code) # type: ignore + self.assertEqual(exc.exception.details["code"], error_code) # type: ignore[call-overload] # Retry before CMAPListener assertion if retry_before=True. if retry: self.coll.insert_one({"test": 1}) @@ -125,18 +122,12 @@ def run_scenario(self, error_code, retry, pool_status_checker): def test_not_primary_keep_connection_pool(self): self.run_scenario(10107, True, self.verify_pool_not_cleared) - @client_context.require_version_min(4, 0, 0) - @client_context.require_version_max(4, 1, 0, -1) - @client_context.require_test_commands - def test_not_primary_reset_connection_pool(self): - self.run_scenario(10107, False, self.verify_pool_cleared) - - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_test_commands def test_shutdown_in_progress(self): self.run_scenario(91, False, self.verify_pool_cleared) - @client_context.require_version_min(4, 0, 0) + @client_context.require_version_min(4, 2, 0) @client_context.require_test_commands def test_interrupted_at_shutdown(self): self.run_scenario(11600, False, self.verify_pool_cleared) diff --git a/test/test_create_entities.py b/test/test_create_entities.py index ad0ac9347e..9d77a08eee 100644 --- a/test/test_create_entities.py +++ b/test/test_create_entities.py @@ -11,11 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + +import sys import unittest + +sys.path[0:0] = [""] + +from test import IntegrationTest from test.unified_format import UnifiedSpecTestMixinV1 +_IS_SYNC = True + -class TestCreateEntities(unittest.TestCase): +class TestCreateEntities(IntegrationTest): def test_store_events_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() spec = { @@ -47,6 +56,9 @@ def test_store_events_as_entities(self): self.assertGreater(len(final_entity_map["events1"]), 0) for event in final_entity_map["events1"]: self.assertIn("PoolCreatedEvent", event["name"]) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() def test_store_all_others_as_entities(self): self.scenario_runner = UnifiedSpecTestMixinV1() @@ -91,7 +103,7 @@ def test_store_all_others_as_entities(self): { "name": "insertOne", "object": "collection0", - "arguments": {"document": {"_id": 1, "x": 44}}, + "arguments": {"document": {"_id": 2, "x": 44}}, }, ], }, @@ -101,15 +113,22 @@ def test_store_all_others_as_entities(self): ], } + self.client.dat.dat.delete_many({}) self.scenario_runner.TEST_SPEC = spec self.scenario_runner.setUp() self.scenario_runner.run_scenario(spec["tests"][0]) self.scenario_runner.entity_map["client0"].close() - final_entity_map = self.scenario_runner.entity_map - for entity in ["errors", "failures"]: - self.assertIn(entity, final_entity_map) - self.assertGreaterEqual(len(final_entity_map[entity]), 0) - self.assertEqual(type(final_entity_map[entity]), list) - for entity in ["successes", "iterations"]: - self.assertIn(entity, final_entity_map) - self.assertEqual(type(final_entity_map[entity]), int) + entity_map = self.scenario_runner.entity_map + self.assertEqual(len(entity_map["errors"]), 4) + for error in entity_map["errors"]: + self.assertEqual(error["type"], "DuplicateKeyError") + self.assertEqual(entity_map["failures"], []) + self.assertEqual(entity_map["successes"], 2) + self.assertEqual(entity_map["iterations"], 5) + if self.scenario_runner.mongos_clients: + for client in self.scenario_runner.mongos_clients: + client.close() + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_crud_unified.py b/test/test_crud_unified.py index cc9a521b3b..1b1abf3600 100644 --- a/test/test_crud_unified.py +++ b/test/test_crud_unified.py @@ -13,8 +13,10 @@ # limitations under the License. """Test the CRUD unified spec tests.""" +from __future__ import annotations import os +import pathlib import sys sys.path[0:0] = [""] @@ -22,11 +24,16 @@ from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "unified") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "crud", "unified") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "crud", "unified") # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__, RUN_ON_SERVERLESS=True)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) if __name__ == "__main__": unittest.main() diff --git a/test/test_crud_v1.py b/test/test_crud_v1.py index ca4b84c26d..af5228b2c7 100644 --- a/test/test_crud_v1.py +++ b/test/test_crud_v1.py @@ -13,25 +13,14 @@ # limitations under the License. """Test the collection module.""" +from __future__ import annotations -import os import sys sys.path[0:0] = [""] -from test import IntegrationTest, unittest -from test.utils import ( - TestCreator, - camel_to_snake, - camel_to_snake_args, - camel_to_upper_camel, - drop_collections, -) +from test import unittest -from pymongo import WriteConcern, operations -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor -from pymongo.errors import PyMongoError from pymongo.operations import ( DeleteMany, DeleteOne, @@ -40,139 +29,6 @@ UpdateMany, UpdateOne, ) -from pymongo.read_concern import ReadConcern -from pymongo.results import BulkWriteResult, _WriteResult - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "crud", "v1") - - -class TestAllScenarios(IntegrationTest): - RUN_ON_SERVERLESS = True - - -def check_result(self, expected_result, result): - if isinstance(result, _WriteResult): - for res in expected_result: - prop = camel_to_snake(res) - msg = "%s : %r != %r" % (prop, expected_result, result) - # SPEC-869: Only BulkWriteResult has upserted_count. - if prop == "upserted_count" and not isinstance(result, BulkWriteResult): - if result.upserted_id is not None: # type: ignore - upserted_count = 1 - else: - upserted_count = 0 - self.assertEqual(upserted_count, expected_result[res], msg) - elif prop == "inserted_ids": - # BulkWriteResult does not have inserted_ids. - if isinstance(result, BulkWriteResult): - self.assertEqual(len(expected_result[res]), result.inserted_count) - else: - # InsertManyResult may be compared to [id1] from the - # crud spec or {"0": id1} from the retryable write spec. - ids = expected_result[res] - if isinstance(ids, dict): - ids = [ids[str(i)] for i in range(len(ids))] - self.assertEqual(ids, result.inserted_ids, msg) # type: ignore - elif prop == "upserted_ids": - # Convert indexes from strings to integers. - ids = expected_result[res] - expected_ids = {} - for str_index in ids: - expected_ids[int(str_index)] = ids[str_index] - self.assertEqual(expected_ids, result.upserted_ids, msg) # type: ignore - else: - self.assertEqual(getattr(result, prop), expected_result[res], msg) - - else: - self.assertEqual(result, expected_result) - - -def run_operation(collection, test): - # Convert command from CamelCase to pymongo.collection method. - operation = camel_to_snake(test["operation"]["name"]) - cmd = getattr(collection, operation) - - # Convert arguments to snake_case and handle special cases. - arguments = test["operation"]["arguments"] - options = arguments.pop("options", {}) - for option_name in options: - arguments[camel_to_snake(option_name)] = options[option_name] - if operation == "count": - raise unittest.SkipTest("PyMongo does not support count") - if operation == "bulk_write": - # Parse each request into a bulk write model. - requests = [] - for request in arguments["requests"]: - bulk_model = camel_to_upper_camel(request["name"]) - bulk_class = getattr(operations, bulk_model) - bulk_arguments = camel_to_snake_args(request["arguments"]) - requests.append(bulk_class(**bulk_arguments)) - arguments["requests"] = requests - else: - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - # PyMongo accepts sort as list of tuples. - if arg_name == "sort": - sort_dict = arguments[arg_name] - arguments[arg_name] = list(sort_dict.items()) - # Named "key" instead not fieldName. - if arg_name == "fieldName": - arguments["key"] = arguments.pop(arg_name) - # Aggregate uses "batchSize", while find uses batch_size. - elif arg_name == "batchSize" and operation == "aggregate": - continue - # Requires boolean returnDocument. - elif arg_name == "returnDocument": - arguments[c2s] = arguments.pop(arg_name) == "After" - else: - arguments[c2s] = arguments.pop(arg_name) - - result = cmd(**arguments) - - if isinstance(result, Cursor) or isinstance(result, CommandCursor): - return list(result) - - return result - - -def create_test(scenario_def, test, name): - def run_scenario(self): - # Cleanup state and load data (if provided). - drop_collections(self.db) - data = scenario_def.get("data") - if data: - self.db.test.with_options(write_concern=WriteConcern(w="majority")).insert_many( - scenario_def["data"] - ) - - # Run operations and check results or errors. - expected_result = test.get("outcome", {}).get("result") - expected_error = test.get("outcome", {}).get("error") - if expected_error is True: - with self.assertRaises(PyMongoError): - run_operation(self.db.test, test) - else: - result = run_operation(self.db.test, test) - if expected_result is not None: - check_result(self, expected_result, result) - - # Assert final state is expected. - expected_c = test["outcome"].get("collection") - if expected_c is not None: - expected_name = expected_c.get("name") - if expected_name is not None: - db_coll = self.db[expected_name] - else: - db_coll = self.db.test - db_coll = db_coll.with_options(read_concern=ReadConcern(level="local")) - self.assertEqual(list(db_coll.find()), expected_c["data"]) - - return run_scenario - - -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) -test_creator.create_tests() class TestWriteOpsComparison(unittest.TestCase): @@ -184,15 +40,45 @@ def test_InsertOneNotEquals(self): def test_DeleteOneEquals(self): self.assertEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 42})) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_US"}) + ) + self.assertEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) def test_DeleteOneNotEquals(self): self.assertNotEqual(DeleteOne({"foo": 42}), DeleteOne({"foo": 23})) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}), DeleteOne({"foo": 42}, {"locale": "en_GB"}) + ) + self.assertNotEqual( + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteOne({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) def test_DeleteManyEquals(self): self.assertEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 42})) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_US"}), + ) + self.assertEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + ) def test_DeleteManyNotEquals(self): self.assertNotEqual(DeleteMany({"foo": 42}), DeleteMany({"foo": 23})) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}), + DeleteMany({"foo": 42}, {"locale": "en_GB"}), + ) + self.assertNotEqual( + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 1}), + DeleteMany({"foo": 42}, {"locale": "en_US"}, {"hint": 2}), + ) def test_DeleteOneNotEqualsDeleteMany(self): self.assertNotEqual(DeleteOne({"foo": 42}), DeleteMany({"foo": 42})) diff --git a/test/test_csot.py b/test/test_csot.py index 7b82a49caf..981af1ed03 100644 --- a/test/test_csot.py +++ b/test/test_csot.py @@ -13,30 +13,38 @@ # limitations under the License. """Test the CSOT unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes +from test.utils import flaky import pymongo from pymongo import _csot from pymongo.errors import PyMongoError +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "csot") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "csot") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "csot") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) class TestCSOT(IntegrationTest): - RUN_ON_SERVERLESS = True RUN_ON_LOAD_BALANCER = True + @flaky(reason="PYTHON-3522") def test_timeout_nested(self): coll = self.db.coll self.assertEqual(_csot.get_timeout(), None) @@ -73,15 +81,15 @@ def test_timeout_nested(self): self.assertEqual(_csot.get_deadline(), float("inf")) self.assertEqual(_csot.get_rtt(), 0.0) - @client_context.require_version_min(3, 6) - @client_context.require_no_mmap - @client_context.require_no_standalone + @client_context.require_change_streams + @flaky(reason="PYTHON-3522") def test_change_stream_can_resume_after_timeouts(self): coll = self.db.test - with coll.watch(max_await_time_ms=150) as stream: + coll.insert_one({}) + with coll.watch() as stream: with pymongo.timeout(0.1): with self.assertRaises(PyMongoError) as ctx: - stream.try_next() + stream.next() self.assertTrue(ctx.exception.timeout) self.assertTrue(stream.alive) with self.assertRaises(PyMongoError) as ctx: diff --git a/test/test_cursor.py b/test/test_cursor.py index 5b4efcd391..219ca396c9 100644 --- a/test/test_cursor.py +++ b/test/test_cursor.py @@ -13,37 +13,48 @@ # limitations under the License. """Test the cursor module.""" +from __future__ import annotations + import copy import gc import itertools +import os import random import re import sys import threading import time +from typing import Any + +import pymongo sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.utils import flaky +from test.utils_shared import ( AllowListEventListener, EventListener, OvertCommandListener, + delay, ignore_deprecations, - rs_or_single_client, + wait_until, ) from bson import decode_all from bson.code import Code -from bson.son import SON +from bson.raw_bson import RawBSONDocument from pymongo import ASCENDING, DESCENDING from pymongo.collation import Collation -from pymongo.cursor import Cursor, CursorType -from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure +from pymongo.errors import ExecutionTimeout, InvalidOperation, OperationFailure, PyMongoError +from pymongo.operations import _IndexList from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.cursor import Cursor, CursorType from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestCursor(IntegrationTest): def test_deepcopy_cursor_littered_with_regexes(self): @@ -57,97 +68,99 @@ def test_deepcopy_cursor_littered_with_regexes(self): ) cursor2 = copy.deepcopy(cursor) - self.assertEqual(cursor._Cursor__spec, cursor2._Cursor__spec) # type: ignore + self.assertEqual(cursor._spec, cursor2._spec) def test_add_remove_option(self): cursor = self.db.test.find() - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) cursor.add_option(2) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) - self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.add_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.add_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT).add_option(128) - self.assertEqual(162, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(162, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) - self.assertEqual(162, cursor._Cursor__query_flags) + self.assertEqual(162, cursor._query_flags) cursor.add_option(128) - self.assertEqual(162, cursor._Cursor__query_flags) + self.assertEqual(162, cursor._query_flags) cursor.remove_option(128) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(34, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(32) cursor2 = self.db.test.find(cursor_type=CursorType.TAILABLE) - self.assertEqual(2, cursor2._Cursor__query_flags) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(2, cursor2._query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) # Timeout cursor = self.db.test.find(no_cursor_timeout=True) - self.assertEqual(16, cursor._Cursor__query_flags) + self.assertEqual(16, cursor._query_flags) cursor2 = self.db.test.find().add_option(16) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(16) - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) # Tailable / Await data cursor = self.db.test.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(34, cursor._Cursor__query_flags) + self.assertEqual(34, cursor._query_flags) cursor2 = self.db.test.find().add_option(34) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(32) - self.assertEqual(2, cursor._Cursor__query_flags) + self.assertEqual(2, cursor._query_flags) # Partial cursor = self.db.test.find(allow_partial_results=True) - self.assertEqual(128, cursor._Cursor__query_flags) + self.assertEqual(128, cursor._query_flags) cursor2 = self.db.test.find().add_option(128) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) + self.assertEqual(cursor._query_flags, cursor2._query_flags) cursor.remove_option(128) - self.assertEqual(0, cursor._Cursor__query_flags) + self.assertEqual(0, cursor._query_flags) def test_add_remove_option_exhaust(self): # Exhaust - which mongos doesn't support if client_context.is_mongos: with self.assertRaises(InvalidOperation): - self.db.test.find(cursor_type=CursorType.EXHAUST) + next(self.db.test.find(cursor_type=CursorType.EXHAUST)) else: cursor = self.db.test.find(cursor_type=CursorType.EXHAUST) - self.assertEqual(64, cursor._Cursor__query_flags) + self.assertEqual(64, cursor._query_flags) cursor2 = self.db.test.find().add_option(64) - self.assertEqual(cursor._Cursor__query_flags, cursor2._Cursor__query_flags) - self.assertTrue(cursor._Cursor__exhaust) + self.assertEqual(cursor._query_flags, cursor2._query_flags) + self.assertTrue(cursor._exhaust) cursor.remove_option(64) - self.assertEqual(0, cursor._Cursor__query_flags) - self.assertFalse(cursor._Cursor__exhaust) + self.assertEqual(0, cursor._query_flags) + self.assertFalse(cursor._exhaust) def test_allow_disk_use(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().allow_disk_use, "baz") + with self.assertRaises(TypeError): + coll.find().allow_disk_use("baz") # type: ignore[arg-type] cursor = coll.find().allow_disk_use(True) - self.assertEqual(True, cursor._Cursor__allow_disk_use) # type: ignore + self.assertEqual(True, cursor._allow_disk_use) cursor = coll.find().allow_disk_use(False) - self.assertEqual(False, cursor._Cursor__allow_disk_use) # type: ignore + self.assertEqual(False, cursor._allow_disk_use) def test_max_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.pymongo_test - self.assertRaises(TypeError, coll.find().max_time_ms, "foo") + with self.assertRaises(TypeError): + coll.find().max_time_ms("foo") # type: ignore[arg-type] coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -155,15 +168,15 @@ def test_max_time_ms(self): coll.find().max_time_ms(1) cursor = coll.find().max_time_ms(999) - self.assertEqual(999, cursor._Cursor__max_time_ms) # type: ignore + self.assertEqual(999, cursor._max_time_ms) cursor = coll.find().max_time_ms(10).max_time_ms(1000) - self.assertEqual(1000, cursor._Cursor__max_time_ms) # type: ignore + self.assertEqual(1000, cursor._max_time_ms) cursor = coll.find().max_time_ms(999) c2 = cursor.clone() - self.assertEqual(999, c2._Cursor__max_time_ms) # type: ignore - self.assertTrue("$maxTimeMS" in cursor._Cursor__query_spec()) # type: ignore - self.assertTrue("$maxTimeMS" in c2._Cursor__query_spec()) # type: ignore + self.assertEqual(999, c2._max_time_ms) + self.assertIn("$maxTimeMS", cursor._query_spec()) + self.assertIn("$maxTimeMS", c2._query_spec()) self.assertTrue(coll.find_one(max_time_ms=1000)) @@ -179,16 +192,33 @@ def test_max_time_ms(self): pass else: self.fail("ExecutionTimeout not raised") - self.assertRaises(ExecutionTimeout, coll.find_one, max_time_ms=1) + with self.assertRaises(ExecutionTimeout): + coll.find_one(max_time_ms=1) finally: client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") + def test_maxtime_ms_message(self): + db = self.db + db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + + client = self.rs_client(document_class=RawBSONDocument) + client.db.t.insert_one({"x": 1}) + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}, max_time_ms=1) + + self.assertIn("(configured timeouts: connectTimeoutMS: 20000.0ms", str(error.exception)) + def test_max_await_time_ms(self): db = self.db db.pymongo_test.drop() coll = db.create_collection("pymongo_test", capped=True, size=4096) - self.assertRaises(TypeError, coll.find().max_await_time_ms, "foo") + with self.assertRaises(TypeError): + coll.find().max_await_time_ms("foo") # type: ignore[arg-type] coll.insert_one({"amalia": 1}) coll.insert_one({"amalia": 2}) @@ -197,100 +227,107 @@ def test_max_await_time_ms(self): # When cursor is not tailable_await cursor = coll.find() - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) cursor = coll.find().max_await_time_ms(99) - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) # If cursor is tailable_await and timeout is unset cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT) - self.assertEqual(None, cursor._Cursor__max_await_time_ms) + self.assertEqual(None, cursor._max_await_time_ms) # If cursor is tailable_await and timeout is set cursor = coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99) - self.assertEqual(99, cursor._Cursor__max_await_time_ms) + self.assertEqual(99, cursor._max_await_time_ms) cursor = ( coll.find(cursor_type=CursorType.TAILABLE_AWAIT) .max_await_time_ms(10) .max_await_time_ms(90) ) - self.assertEqual(90, cursor._Cursor__max_await_time_ms) + self.assertEqual(90, cursor._max_await_time_ms) listener = AllowListEventListener("find", "getMore") - coll = rs_or_single_client(event_listeners=[listener])[self.db.name].pymongo_test - results = listener.results + coll = (self.rs_or_single_client(event_listeners=[listener]))[self.db.name].pymongo_test - # Tailable_await defaults. - list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT)) + # Tailable_defaults. + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).to_list() # find - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() - # Tailable_await with max_await_time_ms set. - list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99)) + # Tailable_with max_await_time_ms set. + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_await_time_ms(99).to_list() # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() - - # Tailable_await with max_time_ms - list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() + + # Tailable_with max_time_ms and make sure list() works on synchronous cursors + if _IS_SYNC: + list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99)) # type: ignore[call-overload] + else: + coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).to_list() # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() - # Tailable_await with both max_time_ms and max_await_time_ms - list(coll.find(cursor_type=CursorType.TAILABLE_AWAIT).max_time_ms(99).max_await_time_ms(99)) + # Tailable_with both max_time_ms and max_await_time_ms + ( + coll.find(cursor_type=CursorType.TAILABLE_AWAIT) + .max_time_ms(99) + .max_await_time_ms(99) + .to_list() + ) # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertTrue("maxTimeMS" in results["started"][1].command) - self.assertEqual(99, results["started"][1].command["maxTimeMS"]) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertIn("maxTimeMS", listener.started_events[1].command) + self.assertEqual(99, listener.started_events[1].command["maxTimeMS"]) + listener.reset() # Non tailable_await with max_await_time_ms - list(coll.find(batch_size=1).max_await_time_ms(99)) + coll.find(batch_size=1).max_await_time_ms(99).to_list() # find - self.assertEqual("find", results["started"][0].command_name) - self.assertFalse("maxTimeMS" in results["started"][0].command) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[0].command) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) - results.clear() + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) + listener.reset() # Non tailable_await with max_time_ms - list(coll.find(batch_size=1).max_time_ms(99)) + coll.find(batch_size=1).max_time_ms(99).to_list() # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) # Non tailable_await with both max_time_ms and max_await_time_ms - list(coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88)) + coll.find(batch_size=1).max_time_ms(99).max_await_time_ms(88).to_list() # find - self.assertEqual("find", results["started"][0].command_name) - self.assertTrue("maxTimeMS" in results["started"][0].command) - self.assertEqual(99, results["started"][0].command["maxTimeMS"]) + self.assertEqual("find", listener.started_events[0].command_name) + self.assertIn("maxTimeMS", listener.started_events[0].command) + self.assertEqual(99, listener.started_events[0].command["maxTimeMS"]) # getMore - self.assertEqual("getMore", results["started"][1].command_name) - self.assertFalse("maxTimeMS" in results["started"][1].command) + self.assertEqual("getMore", listener.started_events[1].command_name) + self.assertNotIn("maxTimeMS", listener.started_events[1].command) @client_context.require_test_commands @client_context.require_no_mongos @@ -306,7 +343,7 @@ def test_max_time_ms_getmore(self): try: try: # Iterate up to first getmore. - list(cursor) + cursor.to_list() except ExecutionTimeout: pass else: @@ -325,41 +362,58 @@ def test_explain(self): def test_explain_with_read_concern(self): # Do not add readConcern level to explain. listener = AllowListEventListener("explain") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) coll = client.pymongo_test.test.with_options(read_concern=ReadConcern(level="local")) self.assertTrue(coll.find().explain()) - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1) self.assertNotIn("readConcern", started[0].command) + # https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.md#14-explain-helpers-allow-users-to-specify-maxtimems + def test_explain_csot(self): + # Create a MongoClient with command monitoring enabled (referred to as client). + listener = AllowListEventListener("explain") + client = self.rs_or_single_client(event_listeners=[listener]) + + # Create a collection, referred to as collection, with the namespace explain-test.collection. + # Workaround for SERVER-108463 + names = client["explain-test"].list_collection_names() + if "collection" not in names: + collection = client["explain-test"].create_collection("collection") + else: + collection = client["explain-test"]["collection"] + + # Run an explained find on collection. The find will have the query predicate { name: 'john doe' }. Specify a maxTimeMS value of 2000ms for the explain. + with pymongo.timeout(2.0): + self.assertTrue(collection.find({"name": "john doe"}).explain()) + + # Obtain the command started event for the explain. Confirm that the top-level explain command should has a maxTimeMS value of 2000. + started = listener.started_events + self.assertEqual(len(started), 1) + assert 1500 < started[0].command["maxTimeMS"] <= 2000 + def test_hint(self): db = self.db - self.assertRaises(TypeError, db.test.find().hint, 5.5) + with self.assertRaises(TypeError): + db.test.find().hint(5.5) # type: ignore[arg-type] db.test.drop() db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) - self.assertRaises( - OperationFailure, - db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain, - ) - self.assertRaises( - OperationFailure, - db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, - ) + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("num", ASCENDING)]).explain() + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() - spec = [("num", DESCENDING)] + spec: list[Any] = [("num", DESCENDING)] _ = db.test.create_index(spec) first = next(db.test.find()) self.assertEqual(0, first.get("num")) first = next(db.test.find().hint(spec)) self.assertEqual(99, first.get("num")) - self.assertRaises( - OperationFailure, - db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain, - ) + with self.assertRaises(OperationFailure): + db.test.find({"num": 17, "foo": 17}).hint([("foo", ASCENDING)]).explain() a = db.test.find({"num": 17}) a.hint(spec) @@ -367,6 +421,21 @@ def test_hint(self): break self.assertRaises(InvalidOperation, a.hint, spec) + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec: _IndexList = ["num", ("foo", DESCENDING)] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + self.assertEqual(0, first.get("foo")) + + db.test.drop() + db.test.insert_many([{"num": i, "foo": i} for i in range(100)]) + spec = ["num"] + db.test.create_index(spec) + first = next(db.test.find().hint(spec)) + self.assertEqual(0, first.get("num")) + def test_hint_by_name(self): db = self.db db.test.drop() @@ -382,10 +451,13 @@ def test_hint_by_name(self): def test_limit(self): db = self.db - self.assertRaises(TypeError, db.test.find().limit, None) - self.assertRaises(TypeError, db.test.find().limit, "hello") - self.assertRaises(TypeError, db.test.find().limit, 5.5) - self.assertTrue(db.test.find().limit(5)) + with self.assertRaises(TypeError): + db.test.find().limit(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().limit(5.5) # type: ignore[arg-type] + self.assertTrue((db.test.find()).limit(5)) db.test.drop() db.test.insert_many([{"x": i} for i in range(100)]) @@ -424,7 +496,8 @@ def test_limit(self): a.limit(10) for _ in a: break - self.assertRaises(InvalidOperation, a.limit, 5) + with self.assertRaises(InvalidOperation): + a.limit(5) def test_max(self): db = self.db @@ -438,28 +511,31 @@ def find(max_spec, expected_index): return db.test.find().max(max_spec).hint(expected_index) cursor = find([("j", 3)], j_index) - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Tuple. cursor = find((("j", 3),), j_index) - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Compound index. index_keys = [("j", ASCENDING), ("k", ASCENDING)] db.test.create_index(index_keys) cursor = find([("j", 3), ("k", 3)], index_keys) - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Wrong order. cursor = find([("k", 3), ("j", 3)], index_keys) - self.assertRaises(OperationFailure, list, cursor) + with self.assertRaises(OperationFailure): + cursor.to_list() # No such index. cursor = find([("k", 3)], "k") - self.assertRaises(OperationFailure, list, cursor) - - self.assertRaises(TypeError, db.test.find().max, 10) - self.assertRaises(TypeError, db.test.find().max, {"j": 10}) + with self.assertRaises(OperationFailure): + cursor.to_list() + with self.assertRaises(TypeError): + db.test.find().max(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().max({"j": 10}) # type: ignore[arg-type] def test_min(self): db = self.db @@ -473,28 +549,32 @@ def find(min_spec, expected_index): return db.test.find().min(min_spec).hint(expected_index) cursor = find([("j", 3)], j_index) - self.assertEqual(len(list(cursor)), 7) + self.assertEqual(len(cursor.to_list()), 7) # Tuple. cursor = find((("j", 3),), j_index) - self.assertEqual(len(list(cursor)), 7) + self.assertEqual(len(cursor.to_list()), 7) # Compound index. index_keys = [("j", ASCENDING), ("k", ASCENDING)] db.test.create_index(index_keys) cursor = find([("j", 3), ("k", 3)], index_keys) - self.assertEqual(len(list(cursor)), 7) + self.assertEqual(len(cursor.to_list()), 7) # Wrong order. cursor = find([("k", 3), ("j", 3)], index_keys) - self.assertRaises(OperationFailure, list, cursor) + with self.assertRaises(OperationFailure): + cursor.to_list() # No such index. cursor = find([("k", 3)], "k") - self.assertRaises(OperationFailure, list, cursor) + with self.assertRaises(OperationFailure): + cursor.to_list() - self.assertRaises(TypeError, db.test.find().min, 10) - self.assertRaises(TypeError, db.test.find().min, {"j": 10}) + with self.assertRaises(TypeError): + db.test.find().min(10) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().min({"j": 10}) # type: ignore[arg-type] def test_min_max_without_hint(self): coll = self.db.test @@ -502,20 +582,24 @@ def test_min_max_without_hint(self): coll.create_index(j_index) with self.assertRaises(InvalidOperation): - list(coll.find().min([("j", 3)])) + coll.find().min([("j", 3)]).to_list() with self.assertRaises(InvalidOperation): - list(coll.find().max([("j", 3)])) + coll.find().max([("j", 3)]).to_list() def test_batch_size(self): db = self.db db.test.drop() db.test.insert_many([{"x": x} for x in range(200)]) - self.assertRaises(TypeError, db.test.find().batch_size, None) - self.assertRaises(TypeError, db.test.find().batch_size, "hello") - self.assertRaises(TypeError, db.test.find().batch_size, 5.5) - self.assertRaises(ValueError, db.test.find().batch_size, -1) - self.assertTrue(db.test.find().batch_size(5)) + with self.assertRaises(TypeError): + db.test.find().batch_size(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().batch_size(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().batch_size(-1) + self.assertTrue((db.test.find()).batch_size(5)) a = db.test.find() for _ in a: break @@ -527,37 +611,37 @@ def cursor_count(cursor, expected_count): count += 1 self.assertEqual(expected_count, count) - cursor_count(db.test.find().batch_size(0), 200) - cursor_count(db.test.find().batch_size(1), 200) - cursor_count(db.test.find().batch_size(2), 200) - cursor_count(db.test.find().batch_size(5), 200) - cursor_count(db.test.find().batch_size(100), 200) - cursor_count(db.test.find().batch_size(500), 200) - - cursor_count(db.test.find().batch_size(0).limit(1), 1) - cursor_count(db.test.find().batch_size(1).limit(1), 1) - cursor_count(db.test.find().batch_size(2).limit(1), 1) - cursor_count(db.test.find().batch_size(5).limit(1), 1) - cursor_count(db.test.find().batch_size(100).limit(1), 1) - cursor_count(db.test.find().batch_size(500).limit(1), 1) - - cursor_count(db.test.find().batch_size(0).limit(10), 10) - cursor_count(db.test.find().batch_size(1).limit(10), 10) - cursor_count(db.test.find().batch_size(2).limit(10), 10) - cursor_count(db.test.find().batch_size(5).limit(10), 10) - cursor_count(db.test.find().batch_size(100).limit(10), 10) - cursor_count(db.test.find().batch_size(500).limit(10), 10) + cursor_count((db.test.find()).batch_size(0), 200) + cursor_count((db.test.find()).batch_size(1), 200) + cursor_count((db.test.find()).batch_size(2), 200) + cursor_count((db.test.find()).batch_size(5), 200) + cursor_count((db.test.find()).batch_size(100), 200) + cursor_count((db.test.find()).batch_size(500), 200) + + cursor_count((db.test.find()).batch_size(0).limit(1), 1) + cursor_count((db.test.find()).batch_size(1).limit(1), 1) + cursor_count((db.test.find()).batch_size(2).limit(1), 1) + cursor_count((db.test.find()).batch_size(5).limit(1), 1) + cursor_count((db.test.find()).batch_size(100).limit(1), 1) + cursor_count((db.test.find()).batch_size(500).limit(1), 1) + + cursor_count((db.test.find()).batch_size(0).limit(10), 10) + cursor_count((db.test.find()).batch_size(1).limit(10), 10) + cursor_count((db.test.find()).batch_size(2).limit(10), 10) + cursor_count((db.test.find()).batch_size(5).limit(10), 10) + cursor_count((db.test.find()).batch_size(100).limit(10), 10) + cursor_count((db.test.find()).batch_size(500).limit(10), 10) cur = db.test.find().batch_size(1) next(cur) # find command batchSize should be 1 - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) next(cur) - self.assertEqual(0, len(cur._Cursor__data)) + self.assertEqual(0, len(cur._data)) def test_limit_and_batch_size(self): db = self.db @@ -566,51 +650,51 @@ def test_limit_and_batch_size(self): curs = db.test.find().limit(0).batch_size(10) next(curs) - self.assertEqual(10, curs._Cursor__retrieved) + self.assertEqual(10, curs._retrieved) curs = db.test.find(limit=0, batch_size=10) next(curs) - self.assertEqual(10, curs._Cursor__retrieved) + self.assertEqual(10, curs._retrieved) curs = db.test.find().limit(-2).batch_size(0) next(curs) - self.assertEqual(2, curs._Cursor__retrieved) + self.assertEqual(2, curs._retrieved) curs = db.test.find(limit=-2, batch_size=0) next(curs) - self.assertEqual(2, curs._Cursor__retrieved) + self.assertEqual(2, curs._retrieved) curs = db.test.find().limit(-4).batch_size(5) next(curs) - self.assertEqual(4, curs._Cursor__retrieved) + self.assertEqual(4, curs._retrieved) curs = db.test.find(limit=-4, batch_size=5) next(curs) - self.assertEqual(4, curs._Cursor__retrieved) + self.assertEqual(4, curs._retrieved) curs = db.test.find().limit(50).batch_size(500) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find(limit=50, batch_size=500) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find().batch_size(500) next(curs) - self.assertEqual(500, curs._Cursor__retrieved) + self.assertEqual(500, curs._retrieved) curs = db.test.find(batch_size=500) next(curs) - self.assertEqual(500, curs._Cursor__retrieved) + self.assertEqual(500, curs._retrieved) curs = db.test.find().limit(50) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) curs = db.test.find(limit=50) next(curs) - self.assertEqual(50, curs._Cursor__retrieved) + self.assertEqual(50, curs._retrieved) # these two might be shaky, as the default # is set by the server. as of 2.0.0-rc0, 101 @@ -618,24 +702,28 @@ def test_limit_and_batch_size(self): # for queries without ntoreturn curs = db.test.find() next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) curs = db.test.find().limit(0).batch_size(0) next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) curs = db.test.find(limit=0, batch_size=0) next(curs) - self.assertEqual(101, curs._Cursor__retrieved) + self.assertEqual(101, curs._retrieved) def test_skip(self): db = self.db - self.assertRaises(TypeError, db.test.find().skip, None) - self.assertRaises(TypeError, db.test.find().skip, "hello") - self.assertRaises(TypeError, db.test.find().skip, 5.5) - self.assertRaises(ValueError, db.test.find().skip, -5) - self.assertTrue(db.test.find().skip(5)) + with self.assertRaises(TypeError): + db.test.find().skip(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().skip(5.5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().skip(-5) + self.assertTrue((db.test.find()).skip(5)) db.drop_collection("test") @@ -665,7 +753,7 @@ def test_skip(self): self.assertEqual(i["x"], 10) break - for i in db.test.find().skip(1000): + for _ in db.test.find().skip(1000): self.fail() a = db.test.find() @@ -677,10 +765,14 @@ def test_skip(self): def test_sort(self): db = self.db - self.assertRaises(TypeError, db.test.find().sort, 5) - self.assertRaises(ValueError, db.test.find().sort, []) - self.assertRaises(TypeError, db.test.find().sort, [], ASCENDING) - self.assertRaises(TypeError, db.test.find().sort, [("hello", DESCENDING)], DESCENDING) + with self.assertRaises(TypeError): + db.test.find().sort(5) # type: ignore[arg-type] + with self.assertRaises(ValueError): + db.test.find().sort([]) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([], ASCENDING) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.test.find().sort([("hello", DESCENDING)], DESCENDING) # type: ignore[arg-type] db.test.drop() @@ -709,13 +801,15 @@ def test_sort(self): random.shuffle(shuffled) db.test.drop() - for (a, b) in shuffled: + for a, b in shuffled: db.test.insert_one({"a": a, "b": b}) result = [ (i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), ("a", ASCENDING)]) ] self.assertEqual(result, expected) + result = [(i["a"], i["b"]) for i in db.test.find().sort([("b", DESCENDING), "a"])] + self.assertEqual(result, expected) a = db.test.find() a.sort("x", ASCENDING) @@ -728,28 +822,31 @@ def test_where(self): db.test.drop() a = db.test.find() - self.assertRaises(TypeError, a.where, 5) - self.assertRaises(TypeError, a.where, None) - self.assertRaises(TypeError, a.where, {}) + with self.assertRaises(TypeError): + a.where(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where(None) # type: ignore[arg-type] + with self.assertRaises(TypeError): + a.where({}) # type: ignore[arg-type] db.test.insert_many([{"x": i} for i in range(10)]) - self.assertEqual(3, len(list(db.test.find().where("this.x < 3")))) - self.assertEqual(3, len(list(db.test.find().where(Code("this.x < 3"))))) + self.assertEqual(3, len(db.test.find().where("this.x < 3").to_list())) + self.assertEqual(3, len(db.test.find().where(Code("this.x < 3")).to_list())) code_with_scope = Code("this.x < i", {"i": 3}) if client_context.version.at_least(4, 3, 3): # MongoDB 4.4 removed support for Code with scope. with self.assertRaises(OperationFailure): - list(db.test.find().where(code_with_scope)) + db.test.find().where(code_with_scope).to_list() code_with_empty_scope = Code("this.x < 3", {}) with self.assertRaises(OperationFailure): - list(db.test.find().where(code_with_empty_scope)) + db.test.find().where(code_with_empty_scope).to_list() else: - self.assertEqual(3, len(list(db.test.find().where(code_with_scope)))) + self.assertEqual(3, len(db.test.find().where(code_with_scope).to_list())) - self.assertEqual(10, len(list(db.test.find()))) + self.assertEqual(10, len(db.test.find().to_list())) self.assertEqual([0, 1, 2], [a["x"] for a in db.test.find().where("this.x < 3")]) self.assertEqual([], [a["x"] for a in db.test.find({"x": 5}).where("this.x < 3")]) self.assertEqual([5], [a["x"] for a in db.test.find({"x": 5}).where("this.x > 3")]) @@ -834,24 +931,26 @@ def test_clone(self): self.assertNotEqual(cursor, cursor.clone()) # Just test attributes - cursor = self.db.test.find( - {"x": re.compile("^hello.*")}, - projection={"_id": False}, - skip=1, - no_cursor_timeout=True, - cursor_type=CursorType.TAILABLE_AWAIT, - sort=[("x", 1)], - allow_partial_results=True, - oplog_replay=True, - batch_size=123, - collation={"locale": "en_US"}, - hint=[("_id", 1)], - max_scan=100, - max_time_ms=1000, - return_key=True, - show_record_id=True, - snapshot=True, - allow_disk_use=True, + cursor = ( + self.db.test.find( + {"x": re.compile("^hello.*")}, + projection={"_id": False}, + skip=1, + no_cursor_timeout=True, + cursor_type=CursorType.TAILABLE_AWAIT, + sort=[("x", 1)], + allow_partial_results=True, + oplog_replay=True, + batch_size=123, + collation={"locale": "en_US"}, + hint=[("_id", 1)], + max_scan=100, + max_time_ms=1000, + return_key=True, + show_record_id=True, + snapshot=True, + allow_disk_use=True, + ) ).limit(2) cursor.min([("a", 1)]).max([("b", 3)]) cursor.add_option(128) @@ -863,17 +962,20 @@ def test_clone(self): # Shallow copies can so can mutate cursor2 = copy.copy(cursor) - cursor2._Cursor__projection["cursor2"] = False - self.assertTrue("cursor2" in cursor._Cursor__projection) + cursor2._projection["cursor2"] = False + self.assertIsNotNone(cursor._projection) + self.assertIn("cursor2", cursor._projection.keys()) # Deepcopies and shouldn't mutate cursor3 = copy.deepcopy(cursor) - cursor3._Cursor__projection["cursor3"] = False - self.assertFalse("cursor3" in cursor._Cursor__projection) + cursor3._projection["cursor3"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor3", cursor._projection.keys()) cursor4 = cursor.clone() - cursor4._Cursor__projection["cursor4"] = False - self.assertFalse("cursor4" in cursor._Cursor__projection) + cursor4._projection["cursor4"] = False + self.assertIsNotNone(cursor._projection) + self.assertNotIn("cursor4", cursor._projection.keys()) # Test memo when deepcopying queries query = {"hello": "world"} @@ -882,16 +984,18 @@ def test_clone(self): cursor2 = copy.deepcopy(cursor) - self.assertNotEqual(id(cursor._Cursor__spec), id(cursor2._Cursor__spec)) - self.assertEqual(id(cursor2._Cursor__spec["reflexive"]), id(cursor2._Cursor__spec)) - self.assertEqual(len(cursor2._Cursor__spec), 2) + self.assertNotEqual(id(cursor._spec), id(cursor2._spec)) + self.assertEqual(id(cursor2._spec["reflexive"]), id(cursor2._spec)) + self.assertEqual(len(cursor2._spec), 2) # Ensure hints are cloned as the correct type cursor = self.db.test.find().hint([("z", 1), ("a", 1)]) cursor2 = copy.deepcopy(cursor) - self.assertTrue(isinstance(cursor2._Cursor__hint, SON)) - self.assertEqual(cursor._Cursor__hint, cursor2._Cursor__hint) + # Internal types are now dict rather than SON by default + self.assertIsInstance(cursor2._hint, dict) + self.assertEqual(cursor._hint, cursor2._hint) + @client_context.require_sync def test_clone_empty(self): self.db.test.delete_many({}) self.db.test.insert_many([{"x": i} for i in range(1, 4)]) @@ -900,11 +1004,15 @@ def test_clone_empty(self): self.assertRaises(StopIteration, cursor.next) self.assertRaises(StopIteration, cursor2.next) + # Cursors don't support slicing + @client_context.require_sync def test_bad_getitem(self): self.assertRaises(TypeError, lambda x: self.db.test.find()[x], "hello") self.assertRaises(TypeError, lambda x: self.db.test.find()[x], 5.5) self.assertRaises(TypeError, lambda x: self.db.test.find()[x], None) + # Cursors don't support slicing + @client_context.require_sync def test_getitem_slice_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) @@ -914,51 +1022,53 @@ def test_getitem_slice_index(self): self.assertRaises(IndexError, lambda: self.db.test.find()[-1:]) self.assertRaises(IndexError, lambda: self.db.test.find()[1:2:2]) - for a, b in zip(count(0), self.db.test.find()): + for a, b in zip(count(0), self.db.test.find()): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(100, len(list(self.db.test.find()[0:]))) - for a, b in zip(count(0), self.db.test.find()[0:]): + self.assertEqual(100, len(list(self.db.test.find()[0:]))) # type: ignore[call-overload] + for a, b in zip(count(0), self.db.test.find()[0:]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(80, len(list(self.db.test.find()[20:]))) - for a, b in zip(count(20), self.db.test.find()[20:]): + self.assertEqual(80, len(list(self.db.test.find()[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - for a, b in zip(count(99), self.db.test.find()[99:]): + for a, b in zip(count(99), self.db.test.find()[99:]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - for i in self.db.test.find()[1000:]: + for _i in self.db.test.find()[1000:]: self.fail() - self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - self.assertEqual(5, len(list(self.db.test.find()[20:25]))) - for a, b in zip(count(20), self.db.test.find()[20:25]): + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[20:25]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[20:25]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) - for a, b in zip(count(20), self.db.test.find()[40:45][20:]): + self.assertEqual(80, len(list(self.db.test.find()[40:45][20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45][20:]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) - for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): + self.assertEqual(80, len(list(self.db.test.find()[40:45].limit(0).skip(20)))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find()[40:45].limit(0).skip(20)): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) - for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): + self.assertEqual(80, len(list(self.db.test.find().limit(10).skip(40)[20:]))) # type: ignore[call-overload] + for a, b in zip(count(20), self.db.test.find().limit(10).skip(40)[20:]): # type: ignore[call-overload] self.assertEqual(a, b["i"]) - self.assertEqual(1, len(list(self.db.test.find()[:1]))) - self.assertEqual(5, len(list(self.db.test.find()[:5]))) + self.assertEqual(1, len(list(self.db.test.find()[:1]))) # type: ignore[call-overload] + self.assertEqual(5, len(list(self.db.test.find()[:5]))) # type: ignore[call-overload] - self.assertEqual(1, len(list(self.db.test.find()[99:100]))) - self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) - self.assertEqual(0, len(list(self.db.test.find()[10:10]))) - self.assertEqual(0, len(list(self.db.test.find()[:0]))) - self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) + self.assertEqual(1, len(list(self.db.test.find()[99:100]))) # type: ignore[call-overload] + self.assertEqual(1, len(list(self.db.test.find()[99:1000]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[10:10]))) # type: ignore[call-overload] + self.assertEqual(0, len(list(self.db.test.find()[:0]))) # type: ignore[call-overload] + self.assertEqual(80, len(list(self.db.test.find()[10:10].limit(0).skip(20)))) # type: ignore[call-overload] self.assertRaises(IndexError, lambda: self.db.test.find()[10:8]) + # Cursors don't support slicing + @client_context.require_sync def test_getitem_numeric_index(self): self.db.drop_collection("test") self.db.test.insert_many([{"i": i} for i in range(100)]) @@ -974,22 +1084,30 @@ def test_getitem_numeric_index(self): self.assertRaises(IndexError, lambda x: self.db.test.find()[x], 100) self.assertRaises(IndexError, lambda x: self.db.test.find().skip(50)[x], 50) + @client_context.require_sync + def test_iteration_with_list(self): + self.db.drop_collection("test") + self.db.test.insert_many([{"i": i} for i in range(100)]) + + cur = self.db.test.find().batch_size(10) + + self.assertEqual(100, len(list(cur))) # type: ignore[call-overload] + def test_len(self): - self.assertRaises(TypeError, len, self.db.test.find()) + with self.assertRaises(TypeError): + len(self.db.test.find()) # type: ignore[arg-type] def test_properties(self): self.assertEqual(self.db.test, self.db.test.find().collection) - def set_coll(): + with self.assertRaises(AttributeError): self.db.test.find().collection = "hello" # type: ignore - self.assertRaises(AttributeError, set_coll) - def test_get_more(self): db = self.db db.drop_collection("test") db.test.insert_many([{"i": i} for i in range(10)]) - self.assertEqual(10, len(list(db.test.find().batch_size(5)))) + self.assertEqual(10, len(db.test.find().batch_size(5).to_list())) def test_tailable(self): db = self.db @@ -1023,7 +1141,7 @@ def test_tailable(self): # have more than 3 documents. Just make sure # this doesn't raise... db.test.insert_many([{"x": i} for i in range(4, 7)]) - self.assertEqual(0, len(list(cursor))) + self.assertEqual(0, len(cursor.to_list())) # and that the cursor doesn't think it's still alive. self.assertFalse(cursor.alive) @@ -1031,27 +1149,30 @@ def test_tailable(self): self.assertEqual(3, db.test.count_documents({})) # __getitem__(index) - for cursor in ( - db.test.find(cursor_type=CursorType.TAILABLE), - db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), - ): - self.assertEqual(4, cursor[0]["x"]) - self.assertEqual(5, cursor[1]["x"]) - self.assertEqual(6, cursor[2]["x"]) - - cursor.rewind() - self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) - cursor.rewind() - self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) - cursor.rewind() - self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) - cursor.rewind() - self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) - cursor.rewind() - self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) - cursor.rewind() - self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) - + if _IS_SYNC: + for cursor in ( + db.test.find(cursor_type=CursorType.TAILABLE), + db.test.find(cursor_type=CursorType.TAILABLE_AWAIT), + ): + self.assertEqual(4, cursor[0]["x"]) + self.assertEqual(5, cursor[1]["x"]) + self.assertEqual(6, cursor[2]["x"]) + + cursor.rewind() + self.assertEqual([4], [doc["x"] for doc in cursor[0:1]]) + cursor.rewind() + self.assertEqual([5], [doc["x"] for doc in cursor[1:2]]) + cursor.rewind() + self.assertEqual([6], [doc["x"] for doc in cursor[2:3]]) + cursor.rewind() + self.assertEqual([4, 5], [doc["x"] for doc in cursor[0:2]]) + cursor.rewind() + self.assertEqual([5, 6], [doc["x"] for doc in cursor[1:3]]) + cursor.rewind() + self.assertEqual([4, 5, 6], [doc["x"] for doc in cursor[0:3]]) + + # The Async API does not support threading + @client_context.require_sync def test_concurrent_close(self): """Ensure a tailable can be closed from another thread.""" db = self.db @@ -1062,8 +1183,12 @@ def test_concurrent_close(self): def iterate_cursor(): while cursor.alive: - for doc in cursor: - pass + try: + for _doc in cursor: + pass + except OperationFailure as e: + if e.code != 237: # CursorKilled error code + raise t = threading.Thread(target=iterate_cursor) t.start() @@ -1095,15 +1220,6 @@ def test_distinct(self): self.assertEqual(["b", "c"], distinct) - @client_context.require_version_max(4, 1, 0, -1) - def test_max_scan(self): - self.db.drop_collection("test") - self.db.test.insert_many([{} for _ in range(100)]) - - self.assertEqual(100, len(list(self.db.test.find()))) - self.assertEqual(50, len(list(self.db.test.find().max_scan(50)))) - self.assertEqual(50, len(list(self.db.test.find().max_scan(90).max_scan(50)))) - def test_with_statement(self): self.db.drop_collection("test") self.db.test.insert_many([{} for _ in range(100)]) @@ -1114,7 +1230,7 @@ def test_with_statement(self): self.assertFalse(c2.alive) with self.db.test.find() as c2: - self.assertEqual(100, len(list(c2))) + self.assertEqual(100, len(c2.to_list())) self.assertFalse(c2.alive) self.assertTrue(c1.alive) @@ -1123,7 +1239,7 @@ def test_comment(self): self.client.drop_database(self.db) self.db.command("profile", 2) # Profile ALL commands. try: - list(self.db.test.find().comment("foo")) + self.db.test.find().comment("foo").to_list() count = self.db.system.profile.count_documents( {"ns": "pymongo_test.test", "op": "query", "command.comment": "foo"} ) @@ -1157,7 +1273,7 @@ def test_alive(self): while True: cursor.next() n += 1 - if 3 == n: + if n == 3: self.assertFalse(cursor.alive) break @@ -1169,16 +1285,14 @@ def test_close_kills_cursor_synchronously(self): self.client._process_periodic_tasks() listener = AllowListEventListener("killCursors") - results = listener.results - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) coll = client[self.db.name].test_close_kills_cursors # Add some test data. docs_inserted = 1000 coll.insert_many([{"i": i} for i in range(docs_inserted)]) - results.clear() + listener.reset() # Close a cursor while it's still open on the server. cursor = coll.find().batch_size(10) @@ -1187,13 +1301,13 @@ def test_close_kills_cursor_synchronously(self): cursor.close() def assertCursorKilled(): - self.assertEqual(1, len(results["started"])) - self.assertEqual("killCursors", results["started"][0].command_name) - self.assertEqual(1, len(results["succeeded"])) - self.assertEqual("killCursors", results["succeeded"][0].command_name) + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) assertCursorKilled() - results.clear() + listener.reset() # Close a command cursor while it's still open on the server. cursor = coll.aggregate([], batchSize=10) @@ -1204,7 +1318,59 @@ def assertCursorKilled(): if cursor.cursor_id: assertCursorKilled() else: - self.assertEqual(0, len(results["started"])) + self.assertEqual(0, len(listener.started_events)) + + @client_context.require_failCommand_appName + def test_timeout_kills_cursor_synchronously(self): + listener = AllowListEventListener("killCursors") + client = self.rs_or_single_client(event_listeners=[listener]) + coll = client[self.db.name].test_timeout_kills_cursor + + # Add some test data. + docs_inserted = 10 + coll.insert_many([{"i": i} for i in range(docs_inserted)]) + + listener.reset() + + cursor = coll.find({}, batch_size=1) + cursor.next() + + # Mock getMore commands timing out. + mock_timeout_errors = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "errorCode": 50, + "failCommands": ["getMore"], + }, + } + + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + def assertCursorKilled(): + wait_until( + lambda: len(listener.succeeded_events), + "find successful killCursors command", + ) + + self.assertEqual(1, len(listener.started_events)) + self.assertEqual("killCursors", listener.started_events[0].command_name) + self.assertEqual(1, len(listener.succeeded_events)) + self.assertEqual("killCursors", listener.succeeded_events[0].command_name) + + assertCursorKilled() + listener.reset() + + cursor = coll.aggregate([], batchSize=1) + cursor.next() + + with self.fail_point(mock_timeout_errors): + with self.assertRaises(ExecutionTimeout): + cursor.next() + + assertCursorKilled() def test_delete_not_initialized(self): # Creating a cursor with invalid arguments will not run __init__ @@ -1214,8 +1380,7 @@ def test_delete_not_initialized(self): def test_getMore_does_not_send_readPreference(self): listener = AllowListEventListener("find", "getMore") - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) # We never send primary read preference so override the default. coll = client[self.db.name].get_collection( "test", read_preference=ReadPreference.PRIMARY_PREFERRED @@ -1225,8 +1390,8 @@ def test_getMore_does_not_send_readPreference(self): coll.insert_many([{} for _ in range(5)]) self.addCleanup(coll.drop) - list(coll.find(batch_size=3)) - started = listener.results["started"] + coll.find(batch_size=3).to_list() + started = listener.started_events self.assertEqual(2, len(started)) self.assertEqual("find", started[0].command_name) if client_context.is_rs or client_context.is_mongos: @@ -1236,6 +1401,101 @@ def test_getMore_does_not_send_readPreference(self): self.assertEqual("getMore", started[1].command_name) self.assertNotIn("$readPreference", started[1].command) + @client_context.require_replica_set + def test_to_list_tailable(self): + oplog = self.client.local.oplog.rs + last = oplog.find().sort("$natural", pymongo.DESCENDING).limit(-1).next() + ts = last["ts"] + # Set maxAwaitTimeMS=1 to speed up the test and avoid blocking on the noop writer. + c = oplog.find( + {"ts": {"$gte": ts}}, cursor_type=pymongo.CursorType.TAILABLE_AWAIT, oplog_replay=True + ).max_await_time_ms(1) + self.addCleanup(c.close) + # Wait for the change to be read. + docs = [] + while not docs: + docs = c.to_list() + self.assertGreaterEqual(len(docs), 1) + + def test_to_list_empty(self): + c = self.db.does_not_exist.find() + docs = c.to_list() + self.assertEqual([], docs) + + def test_to_list_length(self): + coll = self.db.test + coll.insert_many([{} for _ in range(5)]) + self.addCleanup(coll.drop) + c = coll.find() + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + + c = coll.find(batch_size=2) + docs = c.to_list(3) + self.assertEqual(len(docs), 3) + docs = c.to_list(3) + self.assertEqual(len(docs), 2) + + @flaky(reason="PYTHON-3522") + def test_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + coll.insert_many([{} for _ in range(5)]) + cursor = coll.find({"$where": delay(1)}) + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + + @client_context.require_change_streams + def test_command_cursor_to_list(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.test.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) + docs = c.to_list() + self.assertGreaterEqual(len(docs), 0) + + @client_context.require_change_streams + def test_command_cursor_to_list_empty(self): + # Set maxAwaitTimeMS=1 to speed up the test. + c = self.db.does_not_exist.aggregate([{"$changeStream": {}}], maxAwaitTimeMS=1) + self.addCleanup(c.close) + docs = c.to_list() + self.assertEqual([], docs) + + @client_context.require_change_streams + def test_command_cursor_to_list_length(self): + db = self.db + db.drop_collection("test") + db.test.insert_many([{"foo": 1}, {"foo": 2}]) + + pipeline = {"$project": {"_id": False, "foo": True}} + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list()), 2) + + result = db.test.aggregate([pipeline]) + self.assertEqual(len(result.to_list(1)), 1) + + @client_context.require_failCommand_blockConnection + @flaky(reason="PYTHON-3522") + def test_command_cursor_to_list_csot_applied(self): + client = self.single_client(timeoutMS=500, w=1) + coll = client.pymongo.test + # Initialize the client with a larger timeout to help make test less flaky + with pymongo.timeout(10): + coll.insert_many([{} for _ in range(5)]) + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 5}, + "data": {"failCommands": ["getMore"], "blockConnection": True, "blockTimeMS": 1000}, + } + cursor = coll.aggregate([], batchSize=1) + with self.fail_point(fail_command): + with self.assertRaises(PyMongoError) as ctx: + cursor.to_list() + self.assertTrue(ctx.exception.timeout) + class TestRawBatchCursor(IntegrationTest): def test_find_raw(self): @@ -1243,7 +1503,7 @@ def test_find_raw(self): c.drop() docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.find_raw_batches().sort("_id")) + batches = c.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1255,19 +1515,19 @@ def test_find_raw_transaction(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list( + batches = ( client[self.db.name].test.find_raw_batches(session=session).sort("_id") - ) - cmd = listener.results["started"][0] + ).to_list() + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "find") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1285,16 +1545,16 @@ def test_find_raw_retryable_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["find"], "closeConnection": True}} ): - batches = list(client[self.db.name].test.find_raw_batches().sort("_id")) + batches = client[self.db.name].test.find_raw_batches().sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 2) - for cmd in listener.results["started"]: + self.assertEqual(len(listener.started_events), 2) + for cmd in listener.started_events: self.assertEqual(cmd.command_name, "find") @client_context.require_version_min(5, 0, 0) @@ -1306,15 +1566,15 @@ def test_find_raw_snapshot_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: db.test.distinct("x", {}, session=session) - batches = list(db.test.find_raw_batches(session=session).sort("_id")) + batches = db.test.find_raw_batches(session=session).sort("_id").to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1342,7 +1602,7 @@ def test_exhaust(self): c = self.db.test c.drop() c.insert_many({"_id": i} for i in range(200)) - result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST)) + result = b"".join(c.find_raw_batches(cursor_type=CursorType.EXHAUST).to_list()) self.assertEqual([{"_id": i} for i in range(200)], decode_all(result)) def test_server_error(self): @@ -1359,28 +1619,27 @@ def test_get_item(self): def test_collation(self): next(self.db.test.find_raw_batches(collation=Collation("en_US"))) - @client_context.require_no_mmap # MMAPv1 does not support read concern def test_read_concern(self): self.db.get_collection("test", write_concern=WriteConcern(w="majority")).insert_one({}) c = self.db.get_collection("test", read_concern=ReadConcern("majority")) next(c.find_raw_batches()) def test_monitoring(self): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.find_raw_batches(batch_size=4) # First raw batch of 4 documents. next(cursor) - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("find", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("find", succeeded.command_name) @@ -1389,17 +1648,16 @@ def test_monitoring(self): # The batch is a list of one raw bytes object. self.assertEqual(len(csr["firstBatch"]), 1) - self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(0, 4)]) + self.assertEqual(decode_all(csr["firstBatch"][0]), [{"_id": i} for i in range(4)]) - listener.results.clear() + listener.reset() # Next raw batch of 4 documents. next(cursor) try: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1409,20 +1667,16 @@ def test_monitoring(self): self.assertEqual(decode_all(csr["nextBatch"][0]), [{"_id": i} for i in range(4, 8)]) finally: # Finish the cursor. - tuple(cursor) + cursor.close() class TestRawBatchCommandCursor(IntegrationTest): - @classmethod - def setUpClass(cls): - super(TestRawBatchCommandCursor, cls).setUpClass() - def test_aggregate_raw(self): c = self.db.test c.drop() docs = [{"_id": i, "x": 3.0 * i} for i in range(10)] c.insert_many(docs) - batches = list(c.aggregate_raw_batches([{"$sort": {"_id": 1}}])) + batches = (c.aggregate_raw_batches([{"$sort": {"_id": 1}}])).to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) @@ -1434,21 +1688,21 @@ def test_aggregate_raw_transaction(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) with client.start_session() as session: with session.start_transaction(): - batches = list( + batches = ( client[self.db.name].test.aggregate_raw_batches( [{"$sort": {"_id": 1}}], session=session ) - ) - cmd = listener.results["started"][0] + ).to_list() + cmd = listener.started_events[0] self.assertEqual(cmd.command_name, "aggregate") self.assertIn("$clusterTime", cmd.command) self.assertEqual(cmd.command["startTransaction"], True) self.assertEqual(cmd.command["txnNumber"], 1) # Ensure we update $clusterTime from the command response. - last_cmd = listener.results["succeeded"][-1] + last_cmd = listener.succeeded_events[-1] self.assertEqual( last_cmd.reply["$clusterTime"]["clusterTime"], session.cluster_time["clusterTime"], @@ -1465,16 +1719,18 @@ def test_aggregate_raw_retryable_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) with self.fail_point( {"mode": {"times": 1}, "data": {"failCommands": ["aggregate"], "closeConnection": True}} ): - batches = list(client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}])) + batches = ( + client[self.db.name].test.aggregate_raw_batches([{"$sort": {"_id": 1}}]) + ).to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - self.assertEqual(len(listener.results["started"]), 3) - cmds = listener.results["started"] + self.assertEqual(len(listener.started_events), 3) + cmds = listener.started_events self.assertEqual(cmds[0].command_name, "aggregate") self.assertEqual(cmds[1].command_name, "aggregate") @@ -1487,15 +1743,17 @@ def test_aggregate_raw_snapshot_reads(self): c.insert_many(docs) listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener], retryReads=True) + client = self.rs_or_single_client(event_listeners=[listener], retryReads=True) db = client[self.db.name] with client.start_session(snapshot=True) as session: db.test.distinct("x", {}, session=session) - batches = list(db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session)) + batches = ( + db.test.aggregate_raw_batches([{"$sort": {"_id": 1}}], session=session) + ).to_list() self.assertEqual(1, len(batches)) self.assertEqual(docs, decode_all(batches[0])) - find_cmd = listener.results["started"][1].command + find_cmd = listener.started_events[1].command self.assertEqual(find_cmd["readConcern"]["level"], "snapshot") self.assertIsNotNone(find_cmd["readConcern"]["atClusterTime"]) @@ -1507,7 +1765,7 @@ def test_server_error(self): c.insert_one({"_id": 10, "x": "not a number"}) with self.assertRaises(OperationFailure) as exc: - list( + ( self.db.test.aggregate_raw_batches( [ { @@ -1517,32 +1775,32 @@ def test_server_error(self): ], batchSize=4, ) - ) + ).to_list() # The server response was decoded, not left raw. self.assertIsInstance(exc.exception.details, dict) def test_get_item(self): with self.assertRaises(InvalidOperation): - self.db.test.aggregate_raw_batches([])[0] + (self.db.test.aggregate_raw_batches([]))[0] def test_collation(self): next(self.db.test.aggregate_raw_batches([], collation=Collation("en_US"))) def test_monitoring(self): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) c = client.pymongo_test.test c.drop() c.insert_many([{"_id": i} for i in range(10)]) - listener.results.clear() + listener.reset() cursor = c.aggregate_raw_batches([{"$sort": {"_id": 1}}], batchSize=4) # Start cursor, no initial batch. - started = listener.results["started"][0] - succeeded = listener.results["succeeded"][0] - self.assertEqual(0, len(listener.results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("aggregate", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("aggregate", succeeded.command_name) @@ -1551,15 +1809,14 @@ def test_monitoring(self): # First batch is empty. self.assertEqual(len(csr["firstBatch"]), 0) - listener.results.clear() + listener.reset() # Batches of 4 documents. n = 0 for batch in cursor: - results = listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertEqual("getMore", started.command_name) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getMore", succeeded.command_name) @@ -1570,7 +1827,29 @@ def test_monitoring(self): self.assertEqual(decode_all(batch), [{"_id": i} for i in range(n, min(n + 4, 10))]) n += 4 - listener.results.clear() + listener.reset() + + @client_context.require_version_min(5, 0, -1) + @client_context.require_no_mongos + @client_context.require_sync + def test_exhaust_cursor_db_set(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) + c = client.pymongo_test.test + c.delete_many({}) + c.insert_many([{"_id": i} for i in range(3)]) + + listener.reset() + + result = list(c.find({}, cursor_type=pymongo.CursorType.EXHAUST, batch_size=1)) + + self.assertEqual(len(result), 3) + + self.assertEqual( + listener.started_command_names(), ["find", "getMore", "getMore", "getMore"] + ) + for cmd in listener.started_events: + self.assertEqual(cmd.command["$db"], "pymongo_test") if __name__ == "__main__": diff --git a/test/test_custom_types.py b/test/test_custom_types.py index e11b5ebe00..02f3127165 100644 --- a/test/test_custom_types.py +++ b/test/test_custom_types.py @@ -13,6 +13,7 @@ # limitations under the License. """Test support for callbacks to encode/decode custom types.""" +from __future__ import annotations import datetime import sys @@ -22,11 +23,12 @@ from random import random from typing import Any, Tuple, Type, no_type_check +from bson.decimal128 import DecimalDecoder, DecimalEncoder +from gridfs.synchronous.grid_file import GridIn, GridOut + sys.path[0:0] = [""] -from test import client_context, unittest -from test.test_client import IntegrationTest -from test.utils import rs_client +from test import IntegrationTest, client_context, unittest from bson import ( _BUILT_IN_TYPES, @@ -50,38 +52,17 @@ from bson.errors import InvalidDocument from bson.int64 import Int64 from bson.raw_bson import RawBSONDocument -from gridfs import GridIn, GridOut -from pymongo.collection import ReturnDocument from pymongo.errors import DuplicateKeyError from pymongo.message import _CursorAddress +from pymongo.synchronous.collection import ReturnDocument +_IS_SYNC = True -class DecimalEncoder(TypeEncoder): - @property - def python_type(self): - return Decimal - - def transform_python(self, value): - return Decimal128(value) - - -class DecimalDecoder(TypeDecoder): - @property - def bson_type(self): - return Decimal128 - - def transform_bson(self, value): - return value.to_decimal() - - -class DecimalCodec(DecimalDecoder, DecimalEncoder): - pass +DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalEncoder(), DecimalDecoder()])) -DECIMAL_CODECOPTS = CodecOptions(type_registry=TypeRegistry([DecimalCodec()])) - -class UndecipherableInt64Type(object): +class UndecipherableInt64Type: def __init__(self, value): self.value = value @@ -146,7 +127,7 @@ def transform_bson(self, value): return ResumeTokenToNanDecoder -class CustomBSONTypeTests(object): +class CustomBSONTypeTests: @no_type_check def roundtrip(self, doc): bsonbytes = encode(doc, codec_options=self.codecopts) @@ -164,9 +145,9 @@ def test_encode_decode_roundtrip(self): def test_decode_all(self): documents = [] for dec in range(3): - documents.append({"average": Decimal("56.4%s" % (dec,))}) + documents.append({"average": Decimal(f"56.4{dec}")}) - bsonstream = bytes() + bsonstream = b"" for doc in documents: bsonstream += encode(doc, codec_options=self.codecopts) @@ -274,6 +255,22 @@ def fallback_encoder(value): with self.assertRaises(TypeError): encode(document, codec_options=codecopts) + def test_call_only_once_for_not_handled_big_integers(self): + called_with = [] + + def fallback_encoder(value): + called_with.append(value) + return value + + codecopts = self._get_codec_options(fallback_encoder) + document = {"a": {"b": {"c": 2 << 65}}} + + msg = "MongoDB can only handle up to 8-byte ints" + with self.assertRaises(OverflowError, msg=msg): + encode(document, codec_options=codecopts) + + self.assertEqual(called_with, [2 << 65]) + class TestBSONTypeEnDeCodecs(unittest.TestCase): def test_instantiation(self): @@ -287,7 +284,7 @@ def run_test(base, attrs, fail): else: codec() - class MyType(object): + class MyType: pass run_test( @@ -339,7 +336,6 @@ def test_type_checks(self): class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): - TypeA: Any TypeB: Any fallback_encoder_A2B: Any @@ -350,11 +346,11 @@ class TestBSONCustomTypeEncoderAndFallbackEncoderTandem(unittest.TestCase): @classmethod def setUpClass(cls): - class TypeA(object): + class TypeA: def __init__(self, x): self.value = x - class TypeB(object): + class TypeB: def __init__(self, x): self.value = x @@ -442,12 +438,12 @@ class TestTypeRegistry(unittest.TestCase): @classmethod def setUpClass(cls): - class MyIntType(object): + class MyIntType: def __init__(self, x): assert isinstance(x, int) self.x = x - class MyStrType(object): + class MyStrType: def __init__(self, x): assert isinstance(x, str) self.x = x @@ -541,7 +537,8 @@ def transform_bson(self, value): {MyIntEncoder.python_type: codec_instances[1].transform_python}, ) self.assertEqual( - type_registry._decoder_map, {MyIntDecoder.bson_type: codec_instances[0].transform_bson} + type_registry._decoder_map, + {MyIntDecoder.bson_type: codec_instances[0].transform_bson}, ) def test_initialize_fail(self): @@ -552,18 +549,27 @@ def test_initialize_fail(self): with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([type("AnyType", (object,), {})()]) - err_msg = "fallback_encoder %r is not a callable" % (True,) + err_msg = f"fallback_encoder {True!r} is not a callable" with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry([], True) # type: ignore[arg-type] - err_msg = "fallback_encoder %r is not a callable" % ("hello",) + err_msg = "fallback_encoder {!r} is not a callable".format("hello") with self.assertRaisesRegex(TypeError, err_msg): TypeRegistry(fallback_encoder="hello") # type: ignore[arg-type] + def test_type_registry_codecs(self): + codec_instances = [codec() for codec in self.codecs] + type_registry = TypeRegistry(codec_instances) + self.assertEqual(type_registry.codecs, codec_instances) + + def test_type_registry_fallback(self): + type_registry = TypeRegistry(fallback_encoder=self.fallback_encoder) + self.assertEqual(type_registry.fallback_encoder, self.fallback_encoder) + def test_type_registry_repr(self): codec_instances = [codec() for codec in self.codecs] type_registry = TypeRegistry(codec_instances) - r = "TypeRegistry(type_codecs=%r, fallback_encoder=%r)" % (codec_instances, None) + r = f"TypeRegistry(type_codecs={codec_instances!r}, fallback_encoder={None!r})" self.assertEqual(r, repr(type_registry)) def test_type_registry_eq(self): @@ -617,11 +623,21 @@ class MyType(pytype): # type: ignore class TestCollectionWCustomType(IntegrationTest): def setUp(self): + super().setUp() self.db.test.drop() def tearDown(self): self.db.test.drop() + def test_overflow_int_w_custom_decoder(self): + type_registry = TypeRegistry(fallback_encoder=lambda val: str(val)) + codec_options = CodecOptions(type_registry=type_registry) + collection = self.db.get_collection("test", codec_options=codec_options) + + collection.insert_one({"_id": 1, "data": 2**520}) + ret = collection.find_one() + self.assertEqual(ret["data"], str(2**520)) + def test_command_errors_w_custom_type_decoder(self): db = self.db test_doc = {"_id": 1, "data": "a"} @@ -681,7 +697,7 @@ def test_aggregate_w_custom_type_decoder(self): ] result = test.aggregate(pipeline) - res = list(result)[0] + res = (result.to_list())[0] self.assertEqual(res["_id"], "complete") self.assertIsInstance(res["total_qty"], UndecipherableInt64Type) self.assertEqual(res["total_qty"].value, 20) @@ -729,6 +745,7 @@ def test_find_one_and__w_custom_type_decoder(self): class TestGridFileCustomType(IntegrationTest): def setUp(self): + super().setUp() self.db.drop_collection("fs.files") self.db.drop_collection("fs.chunks") @@ -738,9 +755,7 @@ def test_grid_out_custom_opts(self): db.fs, _id=5, filename="my_file", - contentType="text/html", chunkSize=1000, - aliases=["foo"], metadata={"foo": "red", "bar": "blue"}, bar=3, baz="hello", @@ -749,18 +764,16 @@ def test_grid_out_custom_opts(self): one.close() two = GridOut(db.fs, 5) + two.open() self.assertEqual("my_file", two.name) self.assertEqual("my_file", two.filename) self.assertEqual(5, two._id) self.assertEqual(11, two.length) - self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) - self.assertEqual(["foo"], two.aliases) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual({"foo": "red", "bar": "blue"}, two.metadata) self.assertEqual(3, two.bar) - self.assertEqual(None, two.md5) for attr in [ "_id", @@ -776,10 +789,12 @@ def test_grid_out_custom_opts(self): self.assertRaises(AttributeError, setattr, two, attr, 5) -class ChangeStreamsWCustomTypesTestMixin(object): +class ChangeStreamsWCustomTypesTestMixin: @no_type_check def change_stream(self, *args, **kwargs): - return self.watched_target.watch(*args, **kwargs) + stream = self.watched_target.watch(*args, max_await_time_ms=1, **kwargs) + self.addCleanup(stream.close) + return stream @no_type_check def insert_and_check(self, change_stream, insert_doc, expected_doc): @@ -791,7 +806,7 @@ def insert_and_check(self, change_stream, insert_doc, expected_doc): def kill_change_stream_cursor(self, change_stream): # Cause a cursor not found error on the next getMore. cursor = change_stream._cursor - address = _CursorAddress(cursor.address, cursor._CommandCursor__ns) + address = _CursorAddress(cursor.address, cursor._ns) client = self.input_target.database.client client._close_cursor_now(cursor.cursor_id, address) @@ -895,12 +910,10 @@ def run_test(doc_cls): class TestCollectionChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestCollectionChangeStreamsWCustomTypes, cls).setUpClass() - cls.db.test.delete_many({}) + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -914,13 +927,11 @@ def create_targets(self, *args, **kwargs): class TestDatabaseChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod - @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestDatabaseChangeStreamsWCustomTypes, cls).setUpClass() - cls.db.test.delete_many({}) + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -934,13 +945,11 @@ def create_targets(self, *args, **kwargs): class TestClusterChangeStreamsWCustomTypes(IntegrationTest, ChangeStreamsWCustomTypesTestMixin): - @classmethod - @client_context.require_version_min(4, 0, 0) - @client_context.require_no_mmap - @client_context.require_no_standalone - def setUpClass(cls): - super(TestClusterChangeStreamsWCustomTypes, cls).setUpClass() - cls.db.test.delete_many({}) + @client_context.require_version_min(4, 2, 0) + @client_context.require_change_streams + def setUp(self): + super().setUp() + self.db.test.delete_many({}) def tearDown(self): self.input_target.drop() @@ -951,8 +960,7 @@ def create_targets(self, *args, **kwargs): if codec_options: kwargs["type_registry"] = codec_options.type_registry kwargs["document_class"] = codec_options.document_class - self.watched_target = rs_client(*args, **kwargs) - self.addCleanup(self.watched_target.close) + self.watched_target = self.rs_client(*args, **kwargs) self.input_target = self.watched_target[self.db.name].test # Insert a record to ensure db, coll are created. self.input_target.insert_one({"data": "dummy"}) diff --git a/test/test_data_lake.py b/test/test_data_lake.py deleted file mode 100644 index fbf79994d3..0000000000 --- a/test/test_data_lake.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test Atlas Data Lake.""" - -import os -import sys - -sys.path[0:0] = [""] - -from test import IntegrationTest, client_context, unittest -from test.crud_v2_format import TestCrudV2 -from test.utils import ( - OvertCommandListener, - TestCreator, - rs_client_noauth, - rs_or_single_client, -) - -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data_lake") - - -class TestDataLakeMustConnect(IntegrationTest): - def test_connected_to_data_lake(self): - data_lake = os.environ.get("DATA_LAKE") - if not data_lake: - self.skipTest("DATA_LAKE is not set") - - self.assertTrue( - client_context.is_data_lake, - "client context.is_data_lake must be True when DATA_LAKE is set", - ) - - -class TestDataLakeProse(IntegrationTest): - # Default test database and collection names. - TEST_DB = "test" - TEST_COLLECTION = "driverdata" - - @classmethod - @client_context.require_data_lake - def setUpClass(cls): - super(TestDataLakeProse, cls).setUpClass() - - # Test killCursors - def test_1(self): - listener = OvertCommandListener() - client = rs_or_single_client(event_listeners=[listener]) - cursor = client[self.TEST_DB][self.TEST_COLLECTION].find({}, batch_size=2) - next(cursor) - - # find command assertions - find_cmd = listener.results["succeeded"][-1] - self.assertEqual(find_cmd.command_name, "find") - cursor_id = find_cmd.reply["cursor"]["id"] - cursor_ns = find_cmd.reply["cursor"]["ns"] - - # killCursors command assertions - cursor.close() - started = listener.results["started"][-1] - self.assertEqual(started.command_name, "killCursors") - succeeded = listener.results["succeeded"][-1] - self.assertEqual(succeeded.command_name, "killCursors") - - self.assertIn(cursor_id, started.command["cursors"]) - target_ns = ".".join([started.command["$db"], started.command["killCursors"]]) - self.assertEqual(cursor_ns, target_ns) - - self.assertIn(cursor_id, succeeded.reply["cursorsKilled"]) - - # Test no auth - def test_2(self): - client = rs_client_noauth() - client.admin.command("ping") - - # Test with auth - def test_3(self): - for mechanism in ["SCRAM-SHA-1", "SCRAM-SHA-256"]: - client = rs_or_single_client(authMechanism=mechanism) - client[self.TEST_DB][self.TEST_COLLECTION].find_one() - - -class DataLakeTestSpec(TestCrudV2): - # Default test database and collection names. - TEST_DB = "test" - TEST_COLLECTION = "driverdata" - - @classmethod - @client_context.require_data_lake - def setUpClass(cls): - super(DataLakeTestSpec, cls).setUpClass() - - def setup_scenario(self, scenario_def): - # Spec tests MUST NOT insert data/drop collection for - # data lake testing. - pass - - -def create_test(scenario_def, test, name): - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -TestCreator(create_test, DataLakeTestSpec, _TEST_PATH).create_tests() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_database.py b/test/test_database.py index d49ac8324f..ebbf6e55c6 100644 --- a/test/test_database.py +++ b/test/test_database.py @@ -13,19 +13,21 @@ # limitations under the License. """Test the database module.""" +from __future__ import annotations import re import sys -from typing import Any, Iterable, List, Mapping +from typing import Any, Iterable, List, Mapping, Union + +from pymongo.synchronous.command_cursor import CommandCursor sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest from test.test_custom_types import DECIMAL_CODECOPTS -from test.utils import ( +from test.utils_shared import ( IMPOSSIBLE_WRITE_CONCERN, OvertCommandListener, - rs_or_single_client, wait_until, ) @@ -35,21 +37,25 @@ from bson.objectid import ObjectId from bson.regex import Regex from bson.son import SON -from pymongo import auth, helpers -from pymongo.collection import Collection -from pymongo.database import Database +from pymongo import helpers_shared from pymongo.errors import ( CollectionInvalid, ExecutionTimeout, InvalidName, + InvalidOperation, OperationFailure, WriteConcernError, ) -from pymongo.mongo_client import MongoClient from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import auth +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestDatabaseNoConnect(unittest.TestCase): """Test Database features on a client that does not connect.""" @@ -83,7 +89,7 @@ def test_get_collection(self): def test_getattr(self): db = self.client.pymongo_test - self.assertTrue(isinstance(db["_does_not_exist"], Collection)) + self.assertIsInstance(db["_does_not_exist"], Collection) with self.assertRaises(AttributeError) as context: db._does_not_exist @@ -95,10 +101,7 @@ def test_getattr(self): def test_iteration(self): db = self.client.pymongo_test - if "PyPy" in sys.version: - msg = "'NoneType' object is not callable" - else: - msg = "'Database' object is not iterable" + msg = "'Database' object is not iterable" # Iteration fails with self.assertRaisesRegex(TypeError, msg): for _ in db: # type: ignore[misc] # error: "None" not callable [misc] @@ -135,32 +138,38 @@ def test_get_coll(self): self.assertEqual(db.test.mike, db["test.mike"]) def test_repr(self): + name = "Database" self.assertEqual( repr(Database(self.client, "pymongo_test")), - "Database(%r, %s)" % (self.client, repr("pymongo_test")), + "{}({!r}, {})".format(name, self.client, repr("pymongo_test")), ) def test_create_collection(self): db = Database(self.client, "pymongo_test") db.test.insert_one({"hello": "world"}) - self.assertRaises(CollectionInvalid, db.create_collection, "test") + with self.assertRaises(CollectionInvalid): + db.create_collection("test") db.drop_collection("test") - self.assertRaises(TypeError, db.create_collection, 5) - self.assertRaises(TypeError, db.create_collection, None) - self.assertRaises(InvalidName, db.create_collection, "coll..ection") + with self.assertRaises(TypeError): + db.create_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.create_collection(None) # type: ignore[arg-type] + with self.assertRaises(InvalidName): + db.create_collection("coll..ection") # type: ignore[arg-type] test = db.create_collection("test") - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) test.insert_one({"hello": "world"}) - self.assertEqual(db.test.find_one()["hello"], "world") # type: ignore + self.assertEqual((db.test.find_one())["hello"], "world") db.drop_collection("test.foo") db.create_collection("test.foo") - self.assertTrue("test.foo" in db.list_collection_names()) - self.assertRaises(CollectionInvalid, db.create_collection, "test.foo") + self.assertIn("test.foo", db.list_collection_names()) + with self.assertRaises(CollectionInvalid): + db.create_collection("test.foo") def test_list_collection_names(self): db = Database(self.client, "pymongo_test") @@ -168,17 +177,17 @@ def test_list_collection_names(self): db.test.mike.insert_one({"dummy": "object"}) colls = db.list_collection_names() - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) db.systemcoll.test.insert_one({}) no_system_collections = db.list_collection_names( filter={"name": {"$regex": r"^(?!system\.)"}} ) for coll in no_system_collections: - self.assertTrue(not coll.startswith("system.")) + self.assertFalse(coll.startswith("system.")) self.assertIn("systemcoll.test", no_system_collections) # Force more than one batch. @@ -193,38 +202,34 @@ def test_list_collection_names(self): def test_list_collection_names_filter(self): listener = OvertCommandListener() - results = listener.results - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.capped.drop() db.create_collection("capped", capped=True, size=4096) db.capped.insert_one({}) db.non_capped.insert_one({}) self.addCleanup(client.drop_database, db.name) - + filter: Union[None, Mapping[str, Any]] # Should not send nameOnly. for filter in ({"options.capped": True}, {"options.capped": True, "name": "capped"}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertEqual(names, ["capped"]) - self.assertNotIn("nameOnly", results["started"][0].command) + self.assertNotIn("nameOnly", listener.started_events[0].command) # Should send nameOnly (except on 2.6). - filter: Any for filter in (None, {}, {"name": {"$in": ["capped", "non_capped"]}}): - results.clear() + listener.reset() names = db.list_collection_names(filter=filter) self.assertIn("capped", names) self.assertIn("non_capped", names) - command = results["started"][0].command + command = listener.started_events[0].command self.assertIn("nameOnly", command) self.assertTrue(command["nameOnly"]) def test_check_exists(self): listener = OvertCommandListener() - results = listener.results - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) db = client[self.db.name] db.drop_collection("unique") db.create_collection("unique", check_exists=True) @@ -232,7 +237,7 @@ def test_check_exists(self): listener.reset() db.drop_collection("unique") db.create_collection("unique", check_exists=False) - self.assertTrue(len(results["started"]) > 0) + self.assertGreater(len(listener.started_events), 0) self.assertNotIn("listCollections", listener.started_command_names()) def test_list_collections(self): @@ -245,12 +250,12 @@ def test_list_collections(self): colls = [result["name"] for result in results] # All the collections present. - self.assertTrue("test" in colls) - self.assertTrue("test.mike" in colls) + self.assertIn("test", colls) + self.assertIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt: dict = {} @@ -258,25 +263,19 @@ def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt: dict = {} - # Checking if is there any collection which don't exists. - if ( - len(set(colls) - set(["test", "test.mike"])) == 0 - or len(set(colls) - set(["test", "test.mike", "system.indexes"])) == 0 - ): - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "test.mike", "system.indexes"}) - colls = db.list_collections(filter={"name": {"$regex": "^test$"}}) - self.assertEqual(1, len(list(colls))) + colls = (db.list_collections(filter={"name": {"$regex": "^test$"}})).to_list() + self.assertEqual(1, len(colls)) - colls = db.list_collections(filter={"name": {"$regex": "^test.mike$"}}) - self.assertEqual(1, len(list(colls))) + colls = (db.list_collections(filter={"name": {"$regex": "^test.mike$"}})).to_list() + self.assertEqual(1, len(colls)) db.drop_collection("test") @@ -285,12 +284,12 @@ def test_list_collections(self): colls = [result["name"] for result in results] # Checking only capped collections are present - self.assertTrue("test" in colls) - self.assertFalse("test.mike" in colls) + self.assertIn("test", colls) + self.assertNotIn("test.mike", colls) # No collection containing a '$'. for coll in colls: - self.assertTrue("$" not in coll) + self.assertNotIn("$", coll) # Duplicate check. coll_cnt = {} @@ -298,24 +297,18 @@ def test_list_collections(self): try: # Found duplicate. coll_cnt[coll] += 1 - self.assertTrue(False) + self.fail("Found duplicate") except KeyError: coll_cnt[coll] = 1 coll_cnt = {} - # Checking if is there any collection which don't exists. - if ( - len(set(colls) - set(["test"])) == 0 - or len(set(colls) - set(["test", "system.indexes"])) == 0 - ): - self.assertTrue(True) - else: - self.assertTrue(False) + # Check if there are any collections which don't exist. + self.assertLessEqual(set(colls), {"test", "system.indexes"}) self.client.drop_database("pymongo_test") def test_list_collection_names_single_socket(self): - client = rs_or_single_client(maxPoolSize=1) + client = self.rs_or_single_client(maxPoolSize=1) client.drop_database("test_collection_names_single_socket") db = client.test_collection_names_single_socket for i in range(200): @@ -327,28 +320,30 @@ def test_list_collection_names_single_socket(self): def test_drop_collection(self): db = Database(self.client, "pymongo_test") - self.assertRaises(TypeError, db.drop_collection, 5) - self.assertRaises(TypeError, db.drop_collection, None) + with self.assertRaises(TypeError): + db.drop_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.drop_collection(None) # type: ignore[arg-type] db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection("test") - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection("test") - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.drop_collection(db.test) - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.insert_one({"dummy": "object"}) - self.assertTrue("test" in db.list_collection_names()) + self.assertIn("test", db.list_collection_names()) db.test.drop() - self.assertFalse("test" in db.list_collection_names()) + self.assertNotIn("test", db.list_collection_names()) db.test.drop() db.drop_collection(db.test.doesnotexist) @@ -361,13 +356,17 @@ def test_drop_collection(self): def test_validate_collection(self): db = self.client.pymongo_test - self.assertRaises(TypeError, db.validate_collection, 5) - self.assertRaises(TypeError, db.validate_collection, None) + with self.assertRaises(TypeError): + db.validate_collection(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.validate_collection(None) # type: ignore[arg-type] db.test.insert_one({"dummy": "object"}) - self.assertRaises(OperationFailure, db.validate_collection, "test.doesnotexist") - self.assertRaises(OperationFailure, db.validate_collection, db.test.doesnotexist) + with self.assertRaises(OperationFailure): + db.validate_collection("test.doesnotexist") + with self.assertRaises(OperationFailure): + db.validate_collection(db.test.doesnotexist) self.assertTrue(db.validate_collection("test")) self.assertTrue(db.validate_collection(db.test)) @@ -377,13 +376,16 @@ def test_validate_collection(self): self.assertTrue(db.validate_collection(db.test, True, True)) @client_context.require_version_min(4, 3, 3) + @client_context.require_no_standalone def test_validate_collection_background(self): - db = self.client.pymongo_test + db = self.client.pymongo_test.with_options(write_concern=WriteConcern(w="majority")) db.test.insert_one({"dummy": "object"}) coll = db.test self.assertTrue(db.validate_collection(coll, background=False)) # The inMemory storage engine does not support background=True. if client_context.storage_engine != "inMemory": + # background=True requires the collection exist in a checkpoint. + self.client.admin.command("fsync") self.assertTrue(db.validate_collection(coll, background=True)) self.assertTrue(db.validate_collection(coll, scandata=True, background=True)) # The server does not support background=True with full=True. @@ -411,14 +413,51 @@ def test_command_with_regex(self): result = db.command("aggregate", "test", pipeline=[], cursor={}) for doc in result["cursor"]["firstBatch"]: - self.assertTrue(isinstance(doc["r"], Regex)) + self.assertIsInstance(doc["r"], Regex) + + def test_command_bulkWrite(self): + # Ensure bulk write commands can be run directly via db.command(). + if client_context.version.at_least(8, 0): + self.client.admin.command( + { + "bulkWrite": 1, + "nsInfo": [{"ns": self.db.test.full_name}], + "ops": [{"insert": 0, "document": {}}], + } + ) + self.db.command({"insert": "test", "documents": [{}]}) + self.db.command({"update": "test", "updates": [{"q": {}, "u": {"$set": {"x": 1}}}]}) + self.db.command({"delete": "test", "deletes": [{"q": {}, "limit": 1}]}) + self.db.test.drop() + + def test_cursor_command(self): + db = self.client.pymongo_test + db.test.drop() + + docs = [{"_id": i, "doc": i} for i in range(3)] + db.test.insert_many(docs) + + cursor = db.cursor_command("find", "test") + self.assertIsInstance(cursor, CommandCursor) + + result_docs = cursor.to_list() + self.assertEqual(docs, result_docs) + + def test_cursor_command_invalid(self): + with self.assertRaises(InvalidOperation): + self.db.cursor_command("usersInfo", "test") + + @client_context.require_no_fips def test_password_digest(self): - self.assertRaises(TypeError, auth._password_digest, 5) - self.assertRaises(TypeError, auth._password_digest, True) - self.assertRaises(TypeError, auth._password_digest, None) + with self.assertRaises(TypeError): + auth._password_digest(5) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(True) # type: ignore[arg-type, call-arg] + with self.assertRaises(TypeError): + auth._password_digest(None) # type: ignore[arg-type, call-arg] - self.assertTrue(isinstance(auth._password_digest("mike", "password"), str)) + self.assertIsInstance(auth._password_digest("mike", "password"), str) self.assertEqual( auth._password_digest("mike", "password"), "cd7e45b3b2767dc2fa9b6b548457ed00" ) @@ -438,11 +477,11 @@ def test_id_ordering(self): db.test.insert_one(SON([("hello", "world"), ("_id", 5)])) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) cursor = db.test.find() for x in cursor: - for (k, v) in x.items(): + for k, _v in x.items(): self.assertEqual(k, "_id") break @@ -450,16 +489,20 @@ def test_deref(self): db = self.client.pymongo_test db.test.drop() - self.assertRaises(TypeError, db.dereference, 5) - self.assertRaises(TypeError, db.dereference, "hello") - self.assertRaises(TypeError, db.dereference, None) + with self.assertRaises(TypeError): + db.dereference(5) # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.dereference("hello") # type: ignore[arg-type] + with self.assertRaises(TypeError): + db.dereference(None) # type: ignore[arg-type] self.assertEqual(None, db.dereference(DBRef("test", ObjectId()))) - obj = {"x": True} - key = db.test.insert_one(obj).inserted_id + obj: dict[str, Any] = {"x": True} + key = (db.test.insert_one(obj)).inserted_id self.assertEqual(obj, db.dereference(DBRef("test", key))) self.assertEqual(obj, db.dereference(DBRef("test", key, "pymongo_test"))) - self.assertRaises(ValueError, db.dereference, DBRef("test", key, "foo")) + with self.assertRaises(ValueError): + db.dereference(DBRef("test", key, "foo")) self.assertEqual(None, db.dereference(DBRef("test", 4))) obj = {"_id": 4} @@ -472,7 +515,7 @@ def test_deref_kwargs(self): db.test.insert_one({"_id": 4, "foo": "bar"}) db = self.client.get_database( - "pymongo_test", codec_options=CodecOptions(document_class=SON) + "pymongo_test", codec_options=CodecOptions(document_class=SON[str, Any]) ) self.assertEqual( SON([("foo", "bar")]), db.dereference(DBRef("test", 4), projection={"_id": False}) @@ -484,8 +527,8 @@ def test_insert_find_one(self): db.test.drop() a_doc = SON({"hello": "world"}) - a_key = db.test.insert_one(a_doc).inserted_id - self.assertTrue(isinstance(a_doc["_id"], ObjectId)) + a_key = (db.test.insert_one(a_doc)).inserted_id + self.assertIsInstance(a_doc["_id"], ObjectId) self.assertEqual(a_doc["_id"], a_key) self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]})) self.assertEqual(a_doc, db.test.find_one(a_key)) @@ -511,12 +554,12 @@ def test_long(self): db = self.client.pymongo_test db.test.drop() db.test.insert_one({"x": 9223372036854775807}) - retrieved = db.test.find_one()["x"] # type: ignore + retrieved = (db.test.find_one())["x"] self.assertEqual(Int64(9223372036854775807), retrieved) self.assertIsInstance(retrieved, Int64) db.test.delete_many({}) db.test.insert_one({"x": Int64(1)}) - retrieved = db.test.find_one()["x"] # type: ignore + retrieved = (db.test.find_one())["x"] self.assertEqual(Int64(1), retrieved) self.assertIsInstance(retrieved, Int64) @@ -558,10 +601,11 @@ def test_command_response_without_ok(self): # Sometimes (SERVER-10891) the server's response to a badly-formatted # command document will have no 'ok' field. We should raise # OperationFailure instead of KeyError. - self.assertRaises(OperationFailure, helpers._check_command_response, {}, None) + with self.assertRaises(OperationFailure): + helpers_shared._check_command_response({}, None) try: - helpers._check_command_response({"$err": "foo"}, None) + helpers_shared._check_command_response({"$err": "foo"}, None) except OperationFailure as e: self.assertEqual(e.args[0], "foo, full error: {'$err': 'foo'}") else: @@ -575,7 +619,7 @@ def test_mongos_response(self): } with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document, None) + helpers_shared._check_command_response(error_document, None) self.assertIn("inner", str(context.exception)) @@ -585,7 +629,7 @@ def test_mongos_response(self): error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {}}} with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document, None) + helpers_shared._check_command_response(error_document, None) self.assertIn("outer", str(context.exception)) @@ -593,7 +637,7 @@ def test_mongos_response(self): error_document = {"ok": 0, "errmsg": "outer", "raw": {"shard0/host0,host1": {"ok": 0}}} with self.assertRaises(OperationFailure) as context: - helpers._check_command_response(error_document, None) + helpers_shared._check_command_response(error_document, None) self.assertIn("outer", str(context.exception)) @@ -604,22 +648,23 @@ def test_command_max_time_ms(self): try: db = self.client.pymongo_test db.command("count", "test") - self.assertRaises(ExecutionTimeout, db.command, "count", "test", maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + db.command("count", "test", maxTimeMS=1) pipeline = [{"$project": {"name": 1, "count": 1}}] # Database command helper. db.command("aggregate", "test", pipeline=pipeline, cursor={}) - self.assertRaises( - ExecutionTimeout, - db.command, - "aggregate", - "test", - pipeline=pipeline, - cursor={}, - maxTimeMS=1, - ) + with self.assertRaises(ExecutionTimeout): + db.command( + "aggregate", + "test", + pipeline=pipeline, + cursor={}, + maxTimeMS=1, + ) # Collection helper. db.test.aggregate(pipeline=pipeline) - self.assertRaises(ExecutionTimeout, db.test.aggregate, pipeline, maxTimeMS=1) + with self.assertRaises(ExecutionTimeout): + db.test.aggregate(pipeline, maxTimeMS=1) finally: self.client.admin.command("configureFailPoint", "maxTimeAlwaysTimeOut", mode="off") @@ -659,13 +704,14 @@ def test_with_options(self): "write_concern": WriteConcern(w=1), "read_concern": ReadConcern(level="local"), } - db2 = db1.with_options(**newopts) # type: ignore[arg-type] + db2 = db1.with_options(**newopts) # type: ignore[arg-type, call-overload] for opt in newopts: self.assertEqual(getattr(db2, opt), newopts.get(opt, getattr(db1, opt))) class TestDatabaseAggregation(IntegrationTest): def setUp(self): + super().setUp() self.pipeline: List[Mapping[str, Any]] = [ {"$listLocalSessions": {}}, {"$limit": 1}, @@ -703,7 +749,10 @@ def test_database_aggregation_fake_cursor(self): with self.assertRaises(StopIteration): next(cursor) - result = wait_until(output_coll.find_one, "read unacknowledged write") + def lambda_fn(): + return output_coll.find_one() + + result = wait_until(lambda_fn, "read unacknowledged write") self.assertEqual(result["dummy"], self.result["dummy"]) def test_bool(self): diff --git a/test/test_dbref.py b/test/test_dbref.py index 281aef473f..ac2767a1ce 100644 --- a/test/test_dbref.py +++ b/test/test_dbref.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the dbref module.""" +from __future__ import annotations import pickle import sys @@ -64,7 +65,7 @@ def test_repr(self): ) self.assertEqual( repr(DBRef("coll", ObjectId("1234567890abcdef12345678"))), - "DBRef(%s, ObjectId('1234567890abcdef12345678'))" % (repr("coll"),), + "DBRef({}, ObjectId('1234567890abcdef12345678'))".format(repr("coll")), ) self.assertEqual(repr(DBRef("coll", 5, foo="bar")), "DBRef('coll', 5, foo='bar')") self.assertEqual( @@ -127,7 +128,7 @@ def test_dbref_hash(self): self.assertNotEqual(hash(dbref_1a), hash(dbref_2a)) -# https://github.com/mongodb/specifications/blob/master/source/dbref.rst#test-plan +# https://github.com/mongodb/specifications/blob/master/source/dbref/dbref.md#test-plan class TestDBRefSpec(unittest.TestCase): def test_decoding_1_2_3(self): doc: Any diff --git a/test/test_decimal128.py b/test/test_decimal128.py index b46f94f594..46819dd587 100644 --- a/test/test_decimal128.py +++ b/test/test_decimal128.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for Decimal128.""" +from __future__ import annotations import pickle import sys diff --git a/test/test_default_exports.py b/test/test_default_exports.py index 42e5831646..adc3882a36 100644 --- a/test/test_default_exports.py +++ b/test/test_default_exports.py @@ -13,6 +13,8 @@ # limitations under the License. """Test the default exports of the top level packages.""" +from __future__ import annotations + import inspect import unittest @@ -24,7 +26,7 @@ GRIDFS_IGNORE = [ "ASCENDING", "DESCENDING", - "ClientSession", + "AsyncClientSession", "Collection", "ObjectId", "validate_string", @@ -33,7 +35,7 @@ "WriteConcern", ] PYMONGO_IGNORE = [] -GLOBAL_INGORE = ["TYPE_CHECKING"] +GLOBAL_INGORE = ["TYPE_CHECKING", "annotations"] class TestDefaultExports(unittest.TestCase): @@ -65,6 +67,174 @@ def test_gridfs(self): def test_bson(self): self.check_module(bson, BSON_IGNORE) + def test_pymongo_imports(self): + import pymongo + from pymongo.auth import MECHANISMS + from pymongo.auth_oidc import ( + OIDCCallback, + OIDCCallbackContext, + OIDCCallbackResult, + OIDCIdPInfo, + ) + from pymongo.change_stream import ( + ChangeStream, + ClusterChangeStream, + CollectionChangeStream, + DatabaseChangeStream, + ) + from pymongo.client_options import ClientOptions + from pymongo.client_session import ClientSession, SessionOptions, TransactionOptions + from pymongo.collation import ( + Collation, + CollationAlternate, + CollationCaseFirst, + CollationMaxVariable, + CollationStrength, + validate_collation_or_none, + ) + from pymongo.collection import Collection, ReturnDocument + from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor + from pymongo.cursor import Cursor, RawBatchCursor + from pymongo.database import Database + from pymongo.driver_info import DriverInfo + from pymongo.encryption import ( + Algorithm, + ClientEncryption, + QueryType, + RewrapManyDataKeyResult, + ) + from pymongo.encryption_options import AutoEncryptionOpts, RangeOpts + from pymongo.errors import ( + AutoReconnect, + BulkWriteError, + CollectionInvalid, + ConfigurationError, + ConnectionFailure, + CursorNotFound, + DocumentTooLarge, + DuplicateKeyError, + EncryptedCollectionError, + EncryptionError, + ExecutionTimeout, + InvalidName, + InvalidOperation, + NetworkTimeout, + NotPrimaryError, + OperationFailure, + ProtocolError, + PyMongoError, + ServerSelectionTimeoutError, + WaitQueueTimeoutError, + WriteConcernError, + WriteError, + WTimeoutError, + ) + from pymongo.event_loggers import ( + CommandLogger, + ConnectionPoolLogger, + HeartbeatLogger, + ServerLogger, + TopologyLogger, + ) + from pymongo.mongo_client import MongoClient + from pymongo.monitoring import ( + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutFailedReason, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionClosedReason, + ConnectionCreatedEvent, + ConnectionPoolListener, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + register, + ) + from pymongo.operations import ( + DeleteMany, + DeleteOne, + IndexModel, + SearchIndexModel, + UpdateMany, + UpdateOne, + ) + from pymongo.pool import PoolOptions + from pymongo.read_concern import ReadConcern + from pymongo.read_preferences import ( + Nearest, + Primary, + PrimaryPreferred, + ReadPreference, + SecondaryPreferred, + ) + from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, + ) + from pymongo.server_api import ServerApi, ServerApiVersion + from pymongo.server_description import ServerDescription + from pymongo.topology_description import TopologyDescription + from pymongo.uri_parser import ( + parse_host, + parse_ipv6_literal_host, + parse_uri, + parse_userinfo, + split_hosts, + split_options, + validate_options, + ) + from pymongo.write_concern import WriteConcern, validate_boolean + + def test_pymongo_submodule_attributes(self): + import pymongo + + self.assertTrue(hasattr(pymongo, "uri_parser")) + self.assertTrue(pymongo.uri_parser) + self.assertTrue(pymongo.uri_parser.parse_uri) + self.assertTrue(pymongo.change_stream) + self.assertTrue(pymongo.client_session) + self.assertTrue(pymongo.collection) + self.assertTrue(pymongo.cursor) + self.assertTrue(pymongo.command_cursor) + self.assertTrue(pymongo.database) + + def test_gridfs_imports(self): + import gridfs + from gridfs.errors import CorruptGridFile, FileExists, GridFSError, NoFile + from gridfs.grid_file import ( + GridFS, + GridFSBucket, + GridIn, + GridOut, + GridOutChunkIterator, + GridOutCursor, + GridOutIterator, + ) + if __name__ == "__main__": unittest.main() diff --git a/test/test_discovery_and_monitoring.py b/test/test_discovery_and_monitoring.py index 39979c2d10..67a82996bd 100644 --- a/test/test_discovery_and_monitoring.py +++ b/test/test_discovery_and_monitoring.py @@ -13,29 +13,48 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations +import asyncio import os +import socketserver import sys import threading import time +from asyncio import StreamReader, StreamWriter +from pathlib import Path +from test.helpers import ConcurrentRunner +from test.utils import flaky + +from pymongo.operations import _Op +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import Connection sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import ( + IntegrationTest, + PyMongoTestCase, + UnitTest, + client_context, + unittest, +) from test.pymongo_mocks import DummyMonitor +from test.unified_format import generate_test_classes from test.utils import ( + get_pool, +) +from test.utils_shared import ( CMAPListener, HeartbeatEventListener, - TestCreator, + HeartbeatEventsListListener, assertion_context, - client_context, - get_pool, - rs_or_single_client, + barrier_wait, + create_barrier, server_name_to_type, - single_client, wait_until, ) -from test.utils_spec_runner import SpecRunner, SpecRunnerThread +from unittest.mock import patch from bson import Timestamp, json_util from pymongo import common, monitoring @@ -47,15 +66,24 @@ OperationFailure, ) from pymongo.hello import Hello, HelloCompat -from pymongo.helpers import _check_command_response, _check_write_command_response +from pymongo.helpers_shared import _check_command_response, _check_write_command_response +from pymongo.monitoring import ServerHeartbeatFailedEvent, ServerHeartbeatStartedEvent from pymongo.server_description import SERVER_TYPE, ServerDescription -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext +from pymongo.synchronous.uri_parser import parse_uri from pymongo.topology_description import TOPOLOGY_TYPE -from pymongo.uri_parser import parse_uri + +_IS_SYNC = True # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring") +if _IS_SYNC: + SDAM_PATH = os.path.join(Path(__file__).resolve().parent, "discovery_and_monitoring") +else: + SDAM_PATH = os.path.join( + Path(__file__).resolve().parent.parent, + "discovery_and_monitoring", + ) def create_mock_topology(uri, monitor_class=DummyMonitor): @@ -63,8 +91,8 @@ def create_mock_topology(uri, monitor_class=DummyMonitor): replica_set_name = None direct_connection = None load_balanced = None - if "replicaset" in parsed_uri["options"]: - replica_set_name = parsed_uri["options"]["replicaset"] + if "replicaSet" in parsed_uri["options"]: + replica_set_name = parsed_uri["options"]["replicaSet"] if "directConnection" in parsed_uri["options"]: direct_connection = parsed_uri["options"]["directConnection"] if "loadBalanced" in parsed_uri["options"]: @@ -96,7 +124,7 @@ def got_app_error(topology, app_error): when = app_error["when"] max_wire_version = app_error["maxWireVersion"] # XXX: We could get better test coverage by mocking the errors on the - # Pool/SocketInfo. + # Pool/Connection. try: if error_type == "command": _check_command_response(app_error["response"], max_wire_version) @@ -106,15 +134,15 @@ def got_app_error(topology, app_error): elif error_type == "timeout": raise NetworkTimeout("mock network timeout error") else: - raise AssertionError("unknown error type: %s" % (error_type,)) - assert False + raise AssertionError(f"unknown error type: {error_type}") + raise AssertionError except (AutoReconnect, NotPrimaryError, OperationFailure) as e: if when == "beforeHandshakeCompletes": completed_handshake = False elif when == "afterHandshakeCompletes": completed_handshake = True else: - assert False, "Unknown when field %s" % (when,) + raise AssertionError(f"Unknown when field {when}") topology.handle_error( server_address, @@ -127,7 +155,7 @@ def get_type(topology, hostname): return description.server_type -class TestAllScenarios(unittest.TestCase): +class TestAllScenarios(UnitTest): pass @@ -165,6 +193,9 @@ def check_outcome(self, topology, outcome): server_type_name(expected_server_type), server_type_name(actual_server_description.server_type), ) + expected_error = expected_server.get("error") + if expected_error: + self.assertIn(expected_error, str(actual_server_description.error)) self.assertEqual(expected_server.get("setName"), actual_server_description.replica_set_name) @@ -203,7 +234,7 @@ def run_scenario(self): for i, phase in enumerate(scenario_def["phases"]): # Including the phase description makes failures easier to debug. description = phase.get("description", str(i)) - with assertion_context("phase: %s" % (description,)): + with assertion_context(f"phase: {description}"): for response in phase.get("responses", []): got_hello(c, common.partition_node(response[0]), response[1]) @@ -216,8 +247,11 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(SDAM_PATH): dirname = os.path.split(dirpath)[-1] + # SDAM unified tests are handled separately. + if dirname == "unified": + continue for filename in filenames: if os.path.splitext(filename)[1] != ".json": @@ -227,7 +261,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -236,52 +270,56 @@ def create_tests(): create_tests() -class TestClusterTimeComparison(unittest.TestCase): +class TestClusterTimeComparison(PyMongoTestCase): def test_cluster_time_comparison(self): t = create_mock_topology("mongodb://host") - def send_cluster_time(time, inc, should_update): + def send_cluster_time(time, inc): old = t.max_cluster_time() new = {"clusterTime": Timestamp(time, inc)} got_hello( t, ("host", 27017), - {"ok": 1, "minWireVersion": 0, "maxWireVersion": 6, "$clusterTime": new}, + { + "ok": 1, + "minWireVersion": 0, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + "$clusterTime": new, + }, ) actual = t.max_cluster_time() - if should_update: - self.assertEqual(actual, new) - else: - self.assertEqual(actual, old) + # We never update $clusterTime from monitoring connections. + self.assertEqual(actual, old) - send_cluster_time(0, 1, True) - send_cluster_time(2, 2, True) - send_cluster_time(2, 1, False) - send_cluster_time(1, 3, False) - send_cluster_time(2, 3, True) + send_cluster_time(0, 1) + send_cluster_time(2, 2) + send_cluster_time(2, 1) + send_cluster_time(1, 3) + send_cluster_time(2, 3) class TestIgnoreStaleErrors(IntegrationTest): def test_ignore_stale_connection_errors(self): - N_THREADS = 5 - barrier = threading.Barrier(N_THREADS, timeout=30) - client = rs_or_single_client(minPoolSize=N_THREADS) - self.addCleanup(client.close) + if not _IS_SYNC and sys.version_info < (3, 11): + self.skipTest("Test requires asyncio.Barrier (added in Python 3.11)") + N_TASKS = 5 + barrier = create_barrier(N_TASKS) + client = self.rs_or_single_client(minPoolSize=N_TASKS) # Wait for initial discovery. client.admin.command("ping") pool = get_pool(client) starting_generation = pool.gen.get_overall() - wait_until(lambda: len(pool.sockets) == N_THREADS, "created sockets") + wait_until(lambda: len(pool.conns) == N_TASKS, "created conns") def mock_command(*args, **kwargs): - # Synchronize all threads to ensure they use the same generation. - barrier.wait() - raise AutoReconnect("mock SocketInfo.command error") + # Synchronize all tasks to ensure they use the same generation. + barrier_wait(barrier, timeout=30) + raise AutoReconnect("mock Connection.command error") - for sock in pool.sockets: - sock.command = mock_command + for conn in pool.conns: + conn.command = mock_command def insert_command(i): try: @@ -289,12 +327,12 @@ def insert_command(i): except AutoReconnect: pass - threads = [] - for i in range(N_THREADS): - threads.append(threading.Thread(target=insert_command, args=(i,))) - for t in threads: + tasks = [] + for i in range(N_TASKS): + tasks.append(ConcurrentRunner(target=insert_command, args=(i,))) + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() # Expect a single pool reset for the network error @@ -313,10 +351,9 @@ class TestPoolManagement(IntegrationTest): def test_pool_unpause(self): # This test implements the prose test "Connection Pool Management" listener = CMAPHeartbeatListener() - client = single_client( + _ = self.single_client( appName="SDAMPoolManagementTest", heartbeatFrequencyMS=500, event_listeners=[listener] ) - self.addCleanup(client.close) # Assert that ConnectionPoolReadyEvent occurs after the first # ServerHeartbeatSucceededEvent. listener.wait_for_event(monitoring.PoolReadyEvent, 1) @@ -339,108 +376,209 @@ def test_pool_unpause(self): listener.wait_for_event(monitoring.ServerHeartbeatSucceededEvent, 1) listener.wait_for_event(monitoring.PoolReadyEvent, 1) - -class TestIntegration(SpecRunner): - # Location of JSON test specifications. - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "discovery_and_monitoring_integration" - ) - - def _event_count(self, event): - if event == "ServerMarkedUnknownEvent": - - def marked_unknown(e): - return ( - isinstance(e, monitoring.ServerDescriptionChangedEvent) - and not e.new_description.is_server_type_known - ) - - assert self.server_listener is not None - return len(self.server_listener.matching(marked_unknown)) - # Only support CMAP events for now. - self.assertTrue(event.startswith("Pool") or event.startswith("Conn")) - event_type = getattr(monitoring, event) - assert self.pool_listener is not None - return self.pool_listener.event_count(event_type) - - def assert_event_count(self, event, count): - """Run the assertEventCount test operation. - - Assert the given event was published exactly `count` times. - """ - self.assertEqual(self._event_count(event), count, "expected %s not %r" % (count, event)) - - def wait_for_event(self, event, count): - """Run the waitForEvent test operation. - - Wait for a number of events to be published, or fail. - """ + @client_context.require_failCommand_appName + @client_context.require_test_commands + @client_context.require_async + @flaky(reason="PYTHON-5428") + def test_connection_close_does_not_block_other_operations(self): + listener = CMAPHeartbeatListener() + client = self.single_client( + appName="SDAMConnectionCloseTest", + event_listeners=[listener], + heartbeatFrequencyMS=500, + minPoolSize=10, + ) + server = (client._get_topology()).select_server(writable_server_selector, _Op.TEST) wait_until( - lambda: self._event_count(event) >= count, "find %s %s event(s)" % (count, event) + lambda: len(server._pool.conns) == 10, + "pool initialized with 10 connections", ) - def configure_fail_point(self, fail_point): - """Run the configureFailPoint test operation.""" - self.set_fail_point(fail_point) - self.addCleanup( - self.set_fail_point, - {"configureFailPoint": fail_point["configureFailPoint"], "mode": "off"}, - ) + client.db.test.insert_one({"x": 1}) + close_delay = 0.1 + latencies = [] + should_exit = [] + + def run_task(): + while True: + start_time = time.monotonic() + client.db.test.find_one({}) + elapsed = time.monotonic() - start_time + latencies.append(elapsed) + if should_exit: + break + time.sleep(0.001) + + task = ConcurrentRunner(target=run_task) + task.start() + original_close = Connection.close_conn + try: + # Artificially delay the close operation to simulate a slow close + def mock_close(self, reason): + time.sleep(close_delay) + original_close(self, reason) + + Connection.close_conn = mock_close + + fail_hello = { + "mode": {"times": 4}, + "data": { + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "errorCode": 91, + "appName": "SDAMConnectionCloseTest", + }, + } + with self.fail_point(fail_hello): + # Wait for server heartbeat to fail + listener.wait_for_event(monitoring.ServerHeartbeatFailedEvent, 1) + # Wait until all idle connections are closed to simulate real-world conditions + listener.wait_for_event(monitoring.ConnectionClosedEvent, 10) + # Wait for one more find to complete after the pool has been reset, then shutdown the task + n = len(latencies) + wait_until(lambda: len(latencies) >= n + 1, "run one more find") + should_exit.append(True) + task.join() + # No operation latency should not significantly exceed close_delay + self.assertLessEqual(max(latencies), close_delay * 5.0) + finally: + Connection.close_conn = original_close + + +class TestServerMonitoringMode(IntegrationTest): + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + + def test_rtt_connection_is_enabled_stream(self): + client = self.rs_or_single_client(serverMonitoringMode="stream") + client.admin.command("ping") + + def predicate(): + for _, server in client._topology._servers.items(): + monitor = server._monitor + if not monitor._stream: + return False + if client_context.version >= (4, 4): + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is None: + return False + else: + if monitor._rtt_monitor._executor._task is None: + return False + else: + if _IS_SYNC: + if monitor._rtt_monitor._executor._thread is not None: + return False + else: + if monitor._rtt_monitor._executor._task is not None: + return False + return True + + wait_until(predicate, "find all RTT monitors") + + def test_rtt_connection_is_disabled_poll(self): + client = self.rs_or_single_client(serverMonitoringMode="poll") + + self.assert_rtt_connection_is_disabled(client) + + def test_rtt_connection_is_disabled_auto(self): + envs = [ + {"AWS_EXECUTION_ENV": "AWS_Lambda_python3.10"}, + {"FUNCTIONS_WORKER_RUNTIME": "python"}, + {"K_SERVICE": "gcpservicename"}, + {"FUNCTION_NAME": "gcpfunctionname"}, + {"VERCEL": "1"}, + ] + for env in envs: + with patch.dict("os.environ", env): + client = self.rs_or_single_client(serverMonitoringMode="auto") + self.assert_rtt_connection_is_disabled(client) + + def assert_rtt_connection_is_disabled(self, client): + client.admin.command("ping") + for _, server in client._topology._servers.items(): + monitor = server._monitor + self.assertFalse(monitor._stream) + if _IS_SYNC: + self.assertIsNone(monitor._rtt_monitor._executor._thread) + else: + self.assertIsNone(monitor._rtt_monitor._executor._task) - def run_admin_command(self, command, **kwargs): - """Run the runAdminCommand test operation.""" - self.client.admin.command(command, **kwargs) - def record_primary(self): - """Run the recordPrimary test operation.""" - self._previous_primary = self.scenario_client.primary +class MockTCPHandler(socketserver.BaseRequestHandler): + def handle(self): + self.server.events.append("client connected") + if self.request.recv(1024).strip(): + self.server.events.append("client hello received") + self.request.close() - def wait_for_primary_change(self, timeout): - """Run the waitForPrimaryChange test operation.""" - def primary_changed(): - primary = self.scenario_client.primary - if primary is None: - return False - return primary != self._previous_primary +class TCPServer(socketserver.TCPServer): + allow_reuse_address = True - wait_until(primary_changed, "change primary", timeout=timeout) + def handle_request_and_shutdown(self): + self.handle_request() + self.server_close() - def wait(self, ms): - """Run the "wait" test operation.""" - time.sleep(ms / 1000.0) - def start_thread(self, name): - """Run the 'startThread' thread operation.""" - thread = SpecRunnerThread(name) - thread.start() - self.targets[name] = thread +class TestHeartbeatStartOrdering(PyMongoTestCase): + def test_heartbeat_start_ordering(self): + events = [] + listener = HeartbeatEventsListListener(events) - def run_on_thread(self, sessions, collection, name, operation): - """Run the 'runOnThread' operation.""" - thread = self.targets[name] - thread.schedule(lambda: self._run_op(sessions, collection, operation, False)) + if _IS_SYNC: + server = TCPServer(("localhost", 9999), MockTCPHandler) + server.events = events + server_thread = ConcurrentRunner(target=server.handle_request_and_shutdown) + server_thread.start() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + server_thread.join() + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) - def wait_for_thread(self, name): - """Run the 'waitForThread' operation.""" - thread = self.targets[name] - thread.stop() - thread.join(60) - if thread.exc: - raise thread.exc - self.assertFalse(thread.is_alive(), "Thread %s is still running" % (name,)) + else: + def handle_client(reader: StreamReader, writer: StreamWriter): + events.append("client connected") + if (reader.read(1024)).strip(): + events.append("client hello received") + writer.close() + writer.wait_closed() + + server = asyncio.start_server(handle_client, "localhost", 9999) + server.events = events + server.start_serving() + _c = self.simple_client( + "mongodb://localhost:9999", + serverSelectionTimeoutMS=500, + event_listeners=(listener,), + ) + _c._connect() -def create_spec_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) + listener.wait_for_event(ServerHeartbeatStartedEvent, 1) + listener.wait_for_event(ServerHeartbeatFailedEvent, 1) - return run_scenario + server.close() + server.wait_closed() + _c.close() + + self.assertEqual( + events, + [ + "serverHeartbeatStartedEvent", + "client connected", + "client hello received", + "serverHeartbeatFailedEvent", + ], + ) -test_creator = TestCreator(create_spec_test, TestIntegration, TestIntegration.TEST_PATH) -test_creator.create_tests() +# Generate unified tests. +globals().update(generate_test_classes(os.path.join(SDAM_PATH, "unified"), module=__name__)) if __name__ == "__main__": diff --git a/test/test_dns.py b/test/test_dns.py index 352d05376a..8f88562e3f 100644 --- a/test/test_dns.py +++ b/test/test_dns.py @@ -13,28 +13,42 @@ # limitations under the License. """Run the SRV support tests.""" +from __future__ import annotations import glob import json import os +import pathlib import sys sys.path[0:0] = [""] -from test import client_context, unittest -from test.utils import wait_until +from test import ( + IntegrationTest, + PyMongoTestCase, + client_context, + unittest, +) +from test.utils_shared import wait_until +from unittest.mock import MagicMock, patch from pymongo.common import validate_read_preference_tags from pymongo.errors import ConfigurationError -from pymongo.mongo_client import MongoClient -from pymongo.srv_resolver import _HAVE_DNSPYTHON -from pymongo.uri_parser import parse_uri, split_hosts +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import split_hosts +_IS_SYNC = True -class TestDNSRepl(unittest.TestCase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "replica-set" - ) + +class TestDNSRepl(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "replica-set" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "replica-set" + ) load_balanced = False @client_context.require_replica_set @@ -42,10 +56,15 @@ def setUp(self): pass -class TestDNSLoadBalanced(unittest.TestCase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "load-balanced" - ) +class TestDNSLoadBalanced(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent, "srv_seedlist", "load-balanced" + ) + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "load-balanced" + ) load_balanced = True @client_context.require_load_balancer @@ -53,8 +72,13 @@ def setUp(self): pass -class TestDNSSharded(unittest.TestCase): - TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "srv_seedlist", "sharded") +class TestDNSSharded(PyMongoTestCase): + if _IS_SYNC: + TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "srv_seedlist", "sharded") + else: + TEST_PATH = os.path.join( + pathlib.Path(__file__).resolve().parent.parent, "srv_seedlist", "sharded" + ) load_balanced = False @client_context.require_mongos @@ -64,8 +88,6 @@ def setUp(self): def create_test(test_case): def run_test(self): - if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("DNS tests require the dnspython module") uri = test_case["uri"] seeds = test_case.get("seeds") num_seeds = test_case.get("numSeeds", len(seeds or [])) @@ -122,7 +144,9 @@ def run_test(self): # tests. copts["tlsAllowInvalidHostnames"] = True - client = MongoClient(uri, **copts) + client = self.simple_client(uri, **copts) + if client._options.connect: + client._connect() if num_seeds is not None: self.assertEqual(len(client._topology_settings.seeds), num_seeds) if hosts is not None: @@ -131,6 +155,8 @@ def run_test(self): wait_until( lambda: num_hosts == len(client.nodes), "wait to connect to num_hosts" ) + if test_case.get("ping", True): + client.admin.command("ping") # XXX: we should block until SRV poller runs at least once # and re-run these assertions. else: @@ -157,33 +183,123 @@ def create_tests(cls): create_tests(TestDNSSharded) -class TestParsingErrors(unittest.TestCase): - @unittest.skipUnless(_HAVE_DNSPYTHON, "DNS tests require the dnspython module") +class TestParsingErrors(PyMongoTestCase): def test_invalid_host(self): - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb is not", - MongoClient, - "mongodb+srv://mongodb", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: mongodb.com is not", - MongoClient, - "mongodb+srv://mongodb.com", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - MongoClient, - "mongodb+srv://127.0.0.1", - ) - self.assertRaisesRegex( - ConfigurationError, - "Invalid URI host: an IP address is not", - MongoClient, - "mongodb+srv://[::1]", - ) + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://127.0.0.1") + client._connect() + with self.assertRaisesRegex(ConfigurationError, "Invalid URI host: an IP address is not"): + client = self.simple_client("mongodb+srv://[::1]") + client._connect() + + +class TestCaseInsensitive(IntegrationTest): + def test_connect_case_insensitive(self): + client = self.simple_client("mongodb+srv://TEST1.TEST.BUILD.10GEN.cc/") + client._connect() + self.assertGreater(len(client.topology_description.server_descriptions()), 1) + + +class TestInitialDnsSeedlistDiscovery(PyMongoTestCase): + """ + Initial DNS Seedlist Discovery prose tests + https://github.com/mongodb/specifications/blob/0a7a8b5/source/initial-dns-seedlist-discovery/tests/README.md#prose-tests + """ + + def run_initial_dns_seedlist_discovery_prose_tests(self, test_cases): + for case in test_cases: + with patch("dns.resolver.resolve") as mock_resolver: + + def mock_resolve(query, record_type, *args, **kwargs): + mock_srv = MagicMock() + mock_srv.target.to_text.return_value = case["mock_target"] + return [mock_srv] + + mock_resolver.side_effect = mock_resolve + domain = case["query"].split("._tcp.")[1] + connection_string = f"mongodb+srv://{domain}" + if "expected_error" not in case: + parse_uri(connection_string) + else: + try: + parse_uri(connection_string) + except ConfigurationError as e: + self.assertIn(case["expected_error"], str(e)) + else: + self.fail(f"ConfigurationError was not raised for query: {case['query']}") + + def test_1_allow_srv_hosts_with_fewer_than_three_dot_separated_parts(self): + with patch("dns.resolver.resolve"): + parse_uri("mongodb+srv://localhost/") + parse_uri("mongodb+srv://mongo.local/") + + def test_2_throw_when_return_address_does_not_end_with_srv_domain(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost.mongodb", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.evil.com", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongo.local", + "mock_target": "test_1.evil.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_3_throw_when_return_address_is_identical_to_srv_hostname(self): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "mongo.local", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_4_throw_when_return_address_does_not_contain_dot_separating_shared_part_of_domain( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.localhost", + "mock_target": "test_1.cluster_1localhost", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.mongo.local", + "mock_target": "test_1.my_hostmongo.local", + "expected_error": "Invalid SRV host", + }, + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "cluster.testmongodb.com", + "expected_error": "Invalid SRV host", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) + + def test_5_when_srv_hostname_has_two_dot_separated_parts_it_is_valid_for_the_returned_hostname_to_be_identical( + self + ): + test_cases = [ + { + "query": "_mongodb._tcp.blogs.mongodb.com", + "mock_target": "blogs.mongodb.com", + }, + ] + self.run_initial_dns_seedlist_discovery_prose_tests(test_cases) if __name__ == "__main__": diff --git a/test/test_encryption.py b/test/test_encryption.py index 94a588bd6a..04e61b7bad 100644 --- a/test/test_encryption.py +++ b/test/test_encryption.py @@ -13,79 +13,107 @@ # limitations under the License. """Test client side encryption spec.""" +from __future__ import annotations import base64 import copy +import http.client +import json import os +import pathlib import re import socket +import socketserver import ssl import sys import textwrap import traceback import uuid -from typing import Any, Dict +import warnings +from test import IntegrationTest, PyMongoTestCase, client_context +from test.test_bulk import BulkTestBase +from test.utils import flaky +from test.utils_spec_runner import SpecRunner, SpecTestCreator +from threading import Thread +from typing import Any, Dict, Mapping, Optional + +import pytest -from pymongo.collection import Collection +from pymongo.daemon import _spawn_daemon +from pymongo.synchronous.collection import Collection +from pymongo.uri_parser_shared import _parse_kms_tls_options + +try: + from pymongo.pyopenssl_context import IS_PYOPENSSL +except ImportError: + IS_PYOPENSSL = False sys.path[0:0] = [""] from test import ( + unittest, +) +from test.helpers_shared import ( + ALL_KMS_PROVIDERS, AWS_CREDS, + AWS_TEMP_CREDS, AZURE_CREDS, CA_PEM, CLIENT_PEM, + DEFAULT_KMS_TLS, GCP_CREDS, KMIP_CREDS, LOCAL_MASTER_KEY, - IntegrationTest, - PyMongoTestCase, - client_context, - unittest, ) from test.test_bulk import BulkTestBase from test.unified_format import generate_test_classes -from test.utils import ( +from test.utils_shared import ( AllowListEventListener, OvertCommandListener, - TestCreator, TopologyEventListener, camel_to_snake_args, - rs_or_single_client, + is_greenthread_patched, wait_until, ) from test.utils_spec_runner import SpecRunner -from bson import encode, json_util +from bson import BSON, DatetimeMS, Decimal128, encode, json_util from bson.binary import UUID_SUBTYPE, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import BSONError from bson.json_util import JSONOptions from bson.son import SON -from pymongo import encryption -from pymongo.cursor import CursorType -from pymongo.encryption import Algorithm, ClientEncryption, QueryType -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts +from pymongo import ReadPreference +from pymongo.cursor_shared import CursorType +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts, RangeOpts, TextOpts from pymongo.errors import ( AutoReconnect, BulkWriteError, ConfigurationError, DuplicateKeyError, + EncryptedCollectionError, EncryptionError, InvalidOperation, OperationFailure, + PyMongoError, ServerSelectionTimeoutError, WriteError, ) -from pymongo.mongo_client import MongoClient from pymongo.operations import InsertOne, ReplaceOne, UpdateOne +from pymongo.synchronous import encryption +from pymongo.synchronous.encryption import Algorithm, ClientEncryption, QueryType +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + +pytestmark = pytest.mark.encryption + KMS_PROVIDERS = {"local": {"key": b"\x00" * 96}} def get_client_opts(client): - return client._MongoClient__options + return client.options class TestAutoEncryptionOpts(PyMongoTestCase): @@ -93,13 +121,12 @@ class TestAutoEncryptionOpts(PyMongoTestCase): @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") def test_crypt_shared(self): # Test that we can pick up crypt_shared lib automatically - client = MongoClient( + self.simple_client( auto_encryption_opts=AutoEncryptionOpts( KMS_PROVIDERS, "keyvault.datakeys", crypt_shared_lib_required=True ), connect=False, ) - self.addCleanup(client.close) @unittest.skipIf(_HAVE_PYMONGOCRYPT, "pymongocrypt is installed") def test_init_requires_pymongocrypt(self): @@ -118,7 +145,7 @@ def test_init(self): self.assertEqual(opts._mongocryptd_bypass_spawn, False) self.assertEqual(opts._mongocryptd_spawn_path, "mongocryptd") self.assertEqual(opts._mongocryptd_spawn_args, ["--idleShutdownTimeoutSecs=60"]) - self.assertEqual(opts._kms_ssl_contexts, {}) + self.assertEqual(opts._kms_tls_options, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_spawn_args(self): @@ -144,67 +171,71 @@ def test_init_spawn_args(self): @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_init_kms_tls_options(self): # Error cases: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) with self.assertRaisesRegex(TypeError, r'kms_tls_options\["kmip"\] must be a dict'): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": 1}) + MongoClient(auto_encryption_opts=opts) + tls_opts: Any for tls_opts in [ {"kmip": {"tls": True, "tlsInsecure": True}}, {"kmip": {"tls": True, "tlsAllowInvalidCertificates": True}}, {"kmip": {"tls": True, "tlsAllowInvalidHostnames": True}}, - {"kmip": {"tls": True, "tlsDisableOCSPEndpointCheck": True}}, ]: + opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) with self.assertRaisesRegex(ConfigurationError, "Insecure TLS options prohibited"): - opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) + MongoClient(auto_encryption_opts=opts) + opts = AutoEncryptionOpts( + {}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}} + ) with self.assertRaises(FileNotFoundError): - AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tlsCAFile": "does-not-exist"}}) + MongoClient(auto_encryption_opts=opts) # Success cases: tls_opts: Any for tls_opts in [None, {}]: opts = AutoEncryptionOpts({}, "k.d", kms_tls_options=tls_opts) - self.assertEqual(opts._kms_ssl_contexts, {}) + kms_tls_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + self.assertEqual(kms_tls_contexts, {}) opts = AutoEncryptionOpts({}, "k.d", kms_tls_options={"kmip": {"tls": True}, "aws": {}}) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) - ctx = opts._kms_ssl_contexts["aws"] + ctx = _kms_ssl_contexts["aws"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) opts = AutoEncryptionOpts( {}, "k.d", - kms_tls_options={"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}}, + kms_tls_options=DEFAULT_KMS_TLS, ) - ctx = opts._kms_ssl_contexts["kmip"] + _kms_ssl_contexts = _parse_kms_tls_options(opts._kms_tls_options, _IS_SYNC) + ctx = _kms_ssl_contexts["kmip"] self.assertEqual(ctx.check_hostname, True) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) class TestClientOptions(PyMongoTestCase): def test_default(self): - client = MongoClient(connect=False) - self.addCleanup(client.close) + client = self.simple_client(connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) - client = MongoClient(auto_encryption_opts=None, connect=False) - self.addCleanup(client.close) + client = self.simple_client(auto_encryption_opts=None, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, None) @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") def test_kwargs(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = MongoClient(auto_encryption_opts=opts, connect=False) - self.addCleanup(client.close) + client = self.simple_client(auto_encryption_opts=opts, connect=False) self.assertEqual(get_client_opts(client).auto_encryption_opts, opts) class EncryptionIntegrationTest(IntegrationTest): """Base class for encryption integration tests.""" - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") @client_context.require_version_min(4, 2, -1) - def setUpClass(cls): - super(EncryptionIntegrationTest, cls).setUpClass() + def setUp(self) -> None: + super().setUp() def assertEncrypted(self, val): self.assertIsInstance(val, Binary) @@ -214,9 +245,41 @@ def assertBinaryUUID(self, val): self.assertIsInstance(val, Binary) self.assertEqual(val.subtype, UUID_SUBTYPE) + def create_client_encryption( + self, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + self.addCleanup(client_encryption.close) + return client_encryption + + @classmethod + def unmanaged_create_client_encryption( + cls, + kms_providers: Mapping[str, Any], + key_vault_namespace: str, + key_vault_client: MongoClient, + codec_options: CodecOptions, + kms_tls_options: Optional[Mapping[str, Any]] = None, + ): + client_encryption = ClientEncryption( + kms_providers, key_vault_namespace, key_vault_client, codec_options, kms_tls_options + ) + return client_encryption + # Location of JSON test files. -BASE = os.path.join(os.path.dirname(os.path.realpath(__file__)), "client-side-encryption") +if _IS_SYNC: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent, "client-side-encryption") +else: + BASE = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "client-side-encryption") + SPEC_PATH = os.path.join(BASE, "spec") OPTS = CodecOptions() @@ -241,8 +304,7 @@ def bson_data(*paths): class TestClientSimple(EncryptionIntegrationTest): def _test_auto_encrypt(self, opts): - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) # Create the encrypted field's data key. key_vault = create_key_vault( @@ -265,9 +327,11 @@ def _test_auto_encrypt(self, opts): unack = encrypted_coll.with_options(write_concern=WriteConcern(w=0)) unack.insert_one(docs[3]) unack.insert_many(docs[4:], ordered=False) - wait_until( - lambda: self.db.test.count_documents({}) == len(docs), "insert documents with w=0" - ) + + def count_documents(): + return self.db.test.count_documents({}) == len(docs) + + wait_until(count_documents, "insert documents with w=0") # Database.command auto decrypts. res = client.pymongo_test.command("find", "test", filter={"ssn": "000"}) @@ -275,24 +339,24 @@ def _test_auto_encrypt(self, opts): self.assertEqual(decrypted_docs, [{"_id": 0, "ssn": "000"}]) # Collection.find auto decrypts. - decrypted_docs = list(encrypted_coll.find()) + decrypted_docs = encrypted_coll.find().to_list() self.assertEqual(decrypted_docs, docs) # Collection.find auto decrypts getMores. - decrypted_docs = list(encrypted_coll.find(batch_size=1)) + decrypted_docs = encrypted_coll.find(batch_size=1).to_list() self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts. - decrypted_docs = list(encrypted_coll.aggregate([])) + decrypted_docs = (encrypted_coll.aggregate([])).to_list() self.assertEqual(decrypted_docs, docs) # Collection.aggregate auto decrypts getMores. - decrypted_docs = list(encrypted_coll.aggregate([], batchSize=1)) + decrypted_docs = (encrypted_coll.aggregate([], batchSize=1)).to_list() self.assertEqual(decrypted_docs, docs) # Collection.distinct auto decrypts. decrypted_ssns = encrypted_coll.distinct("ssn") - self.assertEqual(set(decrypted_ssns), set(d["ssn"] for d in docs)) + self.assertEqual(set(decrypted_ssns), {d["ssn"] for d in docs}) # Make sure the field is actually encrypted. for encrypted_doc in self.db.test.find(): @@ -321,20 +385,40 @@ def test_auto_encrypt_local_schema_map(self): def test_use_after_close(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) client.admin.command("ping") client.close() with self.assertRaisesRegex(InvalidOperation, "Cannot use MongoClient after close"): client.admin.command("ping") + @unittest.skipIf( + not hasattr(os, "register_at_fork"), + "register_at_fork not available in this version of Python", + ) + @unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", + ) + @client_context.require_sync + def test_fork(self): + self.skipTest("Test is flaky, PYTHON-4738") + opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") + client = self.rs_or_single_client(auto_encryption_opts=opts) + + def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client.admin.command("ping") + + with self.fork(target): + target() + class TestEncryptedBulkWrite(BulkTestBase, EncryptionIntegrationTest): def test_upsert_uuid_standard_encrypt(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) options = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) encrypted_coll = client.pymongo_test.test @@ -366,30 +450,13 @@ def test_upsert_uuid_standard_encrypt(self): class TestClientMaxWireVersion(IntegrationTest): - @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super(TestClientMaxWireVersion, cls).setUpClass() - - @client_context.require_version_max(4, 0, 99) - def test_raise_max_wire_version_error(self): - opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) - msg = "Auto-encryption requires a minimum MongoDB version of 4.2" - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.insert_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - client.admin.command("ping") - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.find_one({}) - with self.assertRaisesRegex(ConfigurationError, msg): - client.test.test.bulk_write([InsertOne({})]) + def setUp(self): + super().setUp() def test_raise_unsupported_error(self): opts = AutoEncryptionOpts(KMS_PROVIDERS, "keyvault.datakeys") - client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client.close) + client = self.rs_or_single_client(auto_encryption_opts=opts) msg = "find_raw_batches does not support auto encryption" with self.assertRaisesRegex(InvalidOperation, msg): client.test.test.find_raw_batches({}) @@ -408,10 +475,9 @@ def test_raise_unsupported_error(self): class TestExplicitSimple(EncryptionIntegrationTest): def test_encrypt_decrypt(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) # Use standard UUID representation. key_vault = client_context.client.keyvault.get_collection("datakeys", codec_options=OPTS) self.addCleanup(key_vault.drop) @@ -438,15 +504,22 @@ def test_encrypt_decrypt(self): ) self.assertEqual(encrypted_ssn, encrypted_ssn2) + # Test encryption via UUID + encrypted_ssn3 = client_encryption.encrypt( + doc["ssn"], + Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, + key_id=key_id.as_uuid(), + ) + self.assertEqual(encrypted_ssn, encrypted_ssn3) + # Test decryption. decrypted_ssn = client_encryption.decrypt(encrypted_ssn) self.assertEqual(decrypted_ssn, doc["ssn"]) def test_validation(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) msg = "value to decrypt must be a bson.binary.Binary with subtype 6" with self.assertRaisesRegex(TypeError, msg): @@ -456,17 +529,13 @@ def test_validation(self): msg = "key_id must be a bson.binary.Binary with subtype 4" algo = Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic - uid = uuid.uuid4() - with self.assertRaisesRegex(TypeError, msg): - client_encryption.encrypt("str", algo, key_id=uid) # type: ignore[arg-type] with self.assertRaisesRegex(TypeError, msg): client_encryption.encrypt("str", algo, key_id=Binary(b"123")) def test_bson_errors(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) - self.addCleanup(client_encryption.close) # Attempt to encrypt an unencodable object. unencodable_value = object() @@ -479,15 +548,17 @@ def test_bson_errors(self): def test_codec_options(self): with self.assertRaisesRegex(TypeError, "codec_options must be"): - ClientEncryption( - KMS_PROVIDERS, "keyvault.datakeys", client_context.client, None # type: ignore[arg-type] + self.create_client_encryption( + KMS_PROVIDERS, + "keyvault.datakeys", + client_context.client, + None, # type: ignore[arg-type] ) opts = CodecOptions(uuid_representation=UuidRepresentation.JAVA_LEGACY) - client_encryption_legacy = ClientEncryption( + client_encryption_legacy = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) - self.addCleanup(client_encryption_legacy.close) # Create the encrypted field's data key. key_id = client_encryption_legacy.create_data_key("local") @@ -502,10 +573,9 @@ def test_codec_options(self): # Encrypt the same UUID with STANDARD codec options. opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, opts ) - self.addCleanup(client_encryption.close) encrypted_standard = client_encryption.encrypt( value, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=key_id ) @@ -521,7 +591,7 @@ def test_codec_options(self): self.assertNotEqual(client_encryption.decrypt(encrypted_legacy), value) def test_close(self): - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) client_encryption.close() @@ -537,7 +607,7 @@ def test_close(self): client_encryption.decrypt(Binary(b"", 6)) def test_with_statement(self): - with ClientEncryption( + with self.create_client_encryption( KMS_PROVIDERS, "keyvault.datakeys", client_context.client, OPTS ) as client_encryption: pass @@ -546,24 +616,17 @@ def test_with_statement(self): # Spec tests -AWS_TEMP_CREDS = { - "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), - "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), - "sessionToken": os.environ.get("CSFLE_AWS_TEMP_SESSION_TOKEN", ""), -} - AWS_TEMP_NO_SESSION_CREDS = { "accessKeyId": os.environ.get("CSFLE_AWS_TEMP_ACCESS_KEY_ID", ""), "secretAccessKey": os.environ.get("CSFLE_AWS_TEMP_SECRET_ACCESS_KEY", ""), } -KMS_TLS_OPTS = {"kmip": {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM}} class TestSpec(SpecRunner): @classmethod @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") - def setUpClass(cls): - super(TestSpec, cls).setUpClass() + def _setup_class(cls): + super()._setup_class() def parse_auto_encrypt_opts(self, opts): """Parse clientOptions.autoEncryptOpts.""" @@ -593,9 +656,11 @@ def parse_auto_encrypt_opts(self, opts): self.skipTest("GCP environment credentials are not set") if "kmip" in kms_providers: kms_providers["kmip"] = KMIP_CREDS - opts["kms_tls_options"] = KMS_TLS_OPTS + opts["kms_tls_options"] = DEFAULT_KMS_TLS if "key_vault_namespace" not in opts: opts["key_vault_namespace"] = "keyvault.datakeys" + if "extra_options" in opts: + opts.update(camel_to_snake_args(opts.pop("extra_options"))) opts = dict(opts) return AutoEncryptionOpts(**opts) @@ -606,17 +671,24 @@ def parse_client_options(self, opts): if encrypt_opts: opts["auto_encryption_opts"] = self.parse_auto_encrypt_opts(encrypt_opts) - return super(TestSpec, self).parse_client_options(opts) + return super().parse_client_options(opts) def get_object_name(self, op): """Default object is collection.""" return op.get("object", "collection") def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) desc = test["description"].lower() + if ( + "timeoutms applied to listcollections to get collection schema" in desc + and sys.platform in ("win32", "darwin") + ): + self.skipTest("PYTHON-3706 flaky test on Windows/macOS") if "type=symbol" in desc: self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to listcollections to get collection schema" in desc and not _IS_SYNC: + self.skipTest("PYTHON-4844 flaky test on async") def setup_scenario(self, scenario_def): """Override a test's setup.""" @@ -632,7 +704,7 @@ def setup_scenario(self, scenario_def): db_name = self.get_scenario_db_name(scenario_def) coll_name = self.get_scenario_coll_name(scenario_def) db = client_context.client.get_database(db_name, codec_options=OPTS) - coll = db.drop_collection(coll_name, encrypted_fields=encrypted_fields) + db.drop_collection(coll_name, encrypted_fields=encrypted_fields) wc = WriteConcern(w="majority") kwargs: Dict[str, Any] = {} if json_schema: @@ -650,7 +722,7 @@ def setup_scenario(self, scenario_def): def allowable_errors(self, op): """Override expected error classes.""" - errors = super(TestSpec, self).allowable_errors(op) + errors = super().allowable_errors(op) # An updateOne test expects encryption to error when no $ operator # appears but pymongo raises a client side ValueError in this case. if op["name"] == "updateOne": @@ -666,27 +738,17 @@ def run_scenario(self): return run_scenario -test_creator = TestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) +test_creator = SpecTestCreator(create_test, TestSpec, os.path.join(SPEC_PATH, "legacy")) test_creator.create_tests() - if _HAVE_PYMONGOCRYPT: globals().update( generate_test_classes( - os.path.join(SPEC_PATH, "unified"), - module=__name__, + os.path.join(SPEC_PATH, "unified"), module=__name__, expected_failures=["mapReduce .*"] ) ) # Prose Tests -ALL_KMS_PROVIDERS = { - "aws": AWS_CREDS, - "azure": AZURE_CREDS, - "gcp": GCP_CREDS, - "kmip": KMIP_CREDS, - "local": {"key": LOCAL_MASTER_KEY}, -} - LOCAL_KEY_ID = Binary(base64.b64decode(b"LOCALAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AWS_KEY_ID = Binary(base64.b64decode(b"AWSAAAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) AZURE_KEY_ID = Binary(base64.b64decode(b"AZUREAAAAAAAAAAAAAAAAA=="), UUID_SUBTYPE) @@ -708,6 +770,11 @@ def create_key_vault(vault, *data_keys): vault.drop() if data_keys: vault.insert_many(data_keys) + vault.create_index( + "keyAltNames", + unique=True, + partialFilterExpression={"keyAltNames": {"$exists": True}}, + ) return vault @@ -738,17 +805,16 @@ class TestDataKeyDoubleEncryption(EncryptionIntegrationTest): "local": None, } - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def setUpClass(cls): - super(TestDataKeyDoubleEncryption, cls).setUpClass() - cls.listener = OvertCommandListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - cls.client.db.coll.drop() - cls.vault = create_key_vault(cls.client.keyvault.datakeys) + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.client.db.coll.drop() + self.vault = create_key_vault(self.client.keyvault.datakeys) # Configure the encrypted field via the local schema_map option. schemas = { @@ -766,55 +832,56 @@ def setUpClass(cls): } } opts = AutoEncryptionOpts( - cls.KMS_PROVIDERS, "keyvault.datakeys", schema_map=schemas, kms_tls_options=KMS_TLS_OPTS + self.KMS_PROVIDERS, + "keyvault.datakeys", + schema_map=schemas, + kms_tls_options=DEFAULT_KMS_TLS, ) - cls.client_encrypted = rs_or_single_client( + self.client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - cls.client_encryption = ClientEncryption( - cls.KMS_PROVIDERS, "keyvault.datakeys", cls.client, OPTS, kms_tls_options=KMS_TLS_OPTS + self.client_encryption = self.create_client_encryption( + self.KMS_PROVIDERS, + "keyvault.datakeys", + self.client, + OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) - - @classmethod - def tearDownClass(cls): - cls.vault.drop() - cls.client.close() - cls.client_encrypted.close() - cls.client_encryption.close() - - def setUp(self): self.listener.reset() + def tearDown(self) -> None: + self.vault.drop() + def run_test(self, provider_name): # Create data key. master_key: Any = self.MASTER_KEYS[provider_name] datakey_id = self.client_encryption.create_data_key( - provider_name, master_key=master_key, key_alt_names=["%s_altname" % (provider_name,)] + provider_name, master_key=master_key, key_alt_names=[f"{provider_name}_altname"] ) self.assertBinaryUUID(datakey_id) - cmd = self.listener.results["started"][-1] + cmd = self.listener.started_events[-1] self.assertEqual("insert", cmd.command_name) self.assertEqual({"w": "majority"}, cmd.command.get("writeConcern")) - docs = list(self.vault.find({"_id": datakey_id})) + docs = self.vault.find({"_id": datakey_id}).to_list() self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["masterKey"]["provider"], provider_name) # Encrypt by key_id. encrypted = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id=datakey_id, ) self.assertEncrypted(encrypted) self.client_encrypted.db.coll.insert_one({"_id": provider_name, "value": encrypted}) doc_decrypted = self.client_encrypted.db.coll.find_one({"_id": provider_name}) - self.assertEqual(doc_decrypted["value"], "hello %s" % (provider_name,)) # type: ignore + self.assertEqual(doc_decrypted["value"], f"hello {provider_name}") # type: ignore # Encrypt by key_alt_name. encrypted_altname = self.client_encryption.encrypt( - "hello %s" % (provider_name,), + f"hello {provider_name}", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, - key_alt_name="%s_altname" % (provider_name,), + key_alt_name=f"{provider_name}_altname", ) self.assertEqual(encrypted_altname, encrypted) @@ -858,8 +925,7 @@ def _test_external_key_vault(self, with_external_key_vault): # Configure the encrypted field via the local schema_map option. schemas = {"db.coll": json_data("external", "external-schema.json")} if with_external_key_vault: - key_vault_client = rs_or_single_client(username="fake-user", password="fake-pwd") - self.addCleanup(key_vault_client.close) + key_vault_client = self.rs_or_single_client(username="fake-user", password="fake-pwd") else: key_vault_client = client_context.client opts = AutoEncryptionOpts( @@ -869,15 +935,13 @@ def _test_external_key_vault(self, with_external_key_vault): key_vault_client=key_vault_client, ) - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addCleanup(client_encrypted.close) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", key_vault_client, OPTS ) - self.addCleanup(client_encryption.close) if with_external_key_vault: # Authentication error. @@ -923,20 +987,18 @@ def test_views_are_prohibited(self): self.addCleanup(self.client.db.view.drop) opts = AutoEncryptionOpts(self.kms_providers(), "keyvault.datakeys") - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( auto_encryption_opts=opts, uuidRepresentation="standard" ) - self.addCleanup(client_encrypted.close) with self.assertRaisesRegex(EncryptionError, "cannot auto encrypt a view"): client_encrypted.db.view.insert_one({}) class TestCorpus(EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - def setUpClass(cls): - super(TestCorpus, cls).setUpClass() + def setUp(self): + super().setUp() @staticmethod def kms_providers(): @@ -983,17 +1045,15 @@ def _test_corpus(self, opts): ) self.addCleanup(vault.drop) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( self.kms_providers(), "keyvault.datakeys", client_context.client, OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) - self.addCleanup(client_encryption.close) corpus = self.fix_up_curpus(json_data("corpus", "corpus.json")) corpus_copied: SON = SON() @@ -1017,17 +1077,17 @@ def _test_corpus(self, opts): self.assertIn(kms, ("local", "aws", "azure", "gcp", "kmip")) if identifier == "id": if kms == "local": - kwargs = dict(key_id=LOCAL_KEY_ID) + kwargs = {"key_id": LOCAL_KEY_ID} elif kms == "aws": - kwargs = dict(key_id=AWS_KEY_ID) + kwargs = {"key_id": AWS_KEY_ID} elif kms == "azure": - kwargs = dict(key_id=AZURE_KEY_ID) + kwargs = {"key_id": AZURE_KEY_ID} elif kms == "gcp": - kwargs = dict(key_id=GCP_KEY_ID) + kwargs = {"key_id": GCP_KEY_ID} else: - kwargs = dict(key_id=KMIP_KEY_ID) + kwargs = {"key_id": KMIP_KEY_ID} else: - kwargs = dict(key_alt_name=kms) + kwargs = {"key_alt_name": kms} self.assertIn(value["algo"], ("det", "rand")) if value["algo"] == "det": @@ -1037,15 +1097,17 @@ def _test_corpus(self, opts): try: encrypted_val = client_encryption.encrypt( - value["value"], algo, **kwargs # type: ignore[arg-type] + value["value"], + algo, + **kwargs, # type: ignore[arg-type] ) if not value["allowed"]: - self.fail("encrypt should have failed: %r: %r" % (key, value)) + self.fail(f"encrypt should have failed: {key!r}: {value!r}") corpus_copied[key]["value"] = encrypted_val except Exception: if value["allowed"]: tb = traceback.format_exc() - self.fail("encrypt failed: %r: %r, traceback: %s" % (key, value, tb)) + self.fail(f"encrypt failed: {key!r}: {value!r}, traceback: {tb}") client_encrypted.db.coll.insert_one(corpus_copied) corpus_decrypted = client_encrypted.db.coll.find_one() @@ -1082,7 +1144,7 @@ def _test_corpus(self, opts): def test_corpus(self): opts = AutoEncryptionOpts( - self.kms_providers(), "keyvault.datakeys", kms_tls_options=KMS_TLS_OPTS + self.kms_providers(), "keyvault.datakeys", kms_tls_options=DEFAULT_KMS_TLS ) self._test_corpus(opts) @@ -1093,7 +1155,7 @@ def test_corpus_local_schema(self): self.kms_providers(), "keyvault.datakeys", schema_map=schemas, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self._test_corpus(opts) @@ -1110,12 +1172,11 @@ class TestBsonSizeBatches(EncryptionIntegrationTest): client_encrypted: MongoClient listener: OvertCommandListener - @classmethod - def setUpClass(cls): - super(TestBsonSizeBatches, cls).setUpClass() + def setUp(self): + super().setUp() db = client_context.client.db - cls.coll = db.coll - cls.coll.drop() + self.coll = db.coll + self.coll.drop() # Configure the encrypted 'db.coll' collection via jsonSchema. json_schema = json_data("limits", "limits-schema.json") db.create_collection( @@ -1133,17 +1194,14 @@ def setUpClass(cls): coll.insert_one(json_data("limits", "limits-key.json")) opts = AutoEncryptionOpts({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") - cls.listener = OvertCommandListener() - cls.client_encrypted = rs_or_single_client( - auto_encryption_opts=opts, event_listeners=[cls.listener] + self.listener = OvertCommandListener() + self.client_encrypted = self.rs_or_single_client( + auto_encryption_opts=opts, event_listeners=[self.listener] ) - cls.coll_encrypted = cls.client_encrypted.db.coll + self.coll_encrypted = self.client_encrypted.db.coll - @classmethod - def tearDownClass(cls): - cls.coll_encrypted.drop() - cls.client_encrypted.close() - super(TestBsonSizeBatches, cls).tearDownClass() + def tearDown(self) -> None: + self.coll_encrypted.drop() def test_01_insert_succeeds_under_2MiB(self): doc = {"_id": "over_2mib_under_16mib", "unencrypted": "a" * _2_MiB} @@ -1167,7 +1225,9 @@ def test_03_bulk_batch_split(self): doc2 = {"_id": "over_2mib_2", "unencrypted": "a" * _2_MiB} self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) def test_04_bulk_batch_split(self): limits_doc = json_data("limits", "limits-doc.json") @@ -1177,7 +1237,9 @@ def test_04_bulk_batch_split(self): doc2.update(limits_doc) self.listener.reset() self.coll_encrypted.bulk_write([InsertOne(doc1), InsertOne(doc2)]) - self.assertEqual(self.listener.started_command_names(), ["insert", "insert"]) + self.assertEqual( + len([c for c in self.listener.started_command_names() if c == "insert"]), 2 + ) def test_05_insert_succeeds_just_under_16MiB(self): doc = {"_id": "under_16mib", "unencrypted": "a" * (_16_MiB - 2000)} @@ -1200,54 +1262,47 @@ def test_06_insert_fails_over_16MiB(self): with self.assertRaises(BulkWriteError) as ctx: self.coll_encrypted.bulk_write([InsertOne(doc)]) err = ctx.exception.details["writeErrors"][0] - self.assertEqual(2, err["code"]) + self.assertIn(err["code"], [2, 10334]) self.assertIn("object to insert too large", err["errmsg"]) class TestCustomEndpoint(EncryptionIntegrationTest): """Prose tests for creating data keys with a custom endpoint.""" - @classmethod @unittest.skipUnless( any([all(AWS_CREDS.values()), all(AZURE_CREDS.values()), all(GCP_CREDS.values())]), "No environment credentials are set", ) - def setUpClass(cls): - super(TestCustomEndpoint, cls).setUpClass() - def setUp(self): + super().setUp() kms_providers = { "aws": AWS_CREDS, "azure": AZURE_CREDS, "gcp": GCP_CREDS, "kmip": KMIP_CREDS, } - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers=kms_providers, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) kms_providers_invalid = copy.deepcopy(kms_providers) kms_providers_invalid["azure"]["identityPlatformEndpoint"] = "doesnotexist.invalid:443" kms_providers_invalid["gcp"]["endpoint"] = "doesnotexist.invalid:443" - kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.local:5698" - self.client_encryption_invalid = ClientEncryption( + kms_providers_invalid["kmip"]["endpoint"] = "doesnotexist.invalid:5698" + self.client_encryption_invalid = self.create_client_encryption( kms_providers=kms_providers_invalid, key_vault_namespace="keyvault.datakeys", key_vault_client=client_context.client, codec_options=OPTS, - kms_tls_options=KMS_TLS_OPTS, + kms_tls_options=DEFAULT_KMS_TLS, ) self._kmip_host_error = None self._invalid_host_error = None - def tearDown(self): - self.client_encryption.close() - self.client_encryption_invalid.close() - def run_test_expected_success(self, provider_name, master_key): data_key_id = self.client_encryption.create_data_key(provider_name, master_key=master_key) encrypted = self.client_encryption.encrypt( @@ -1293,16 +1348,10 @@ def test_03_aws_region_key_endpoint_port(self): }, ) - @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") - def test_04_aws_endpoint_invalid_port(self): - master_key = { - "region": "us-east-1", - "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), - "endpoint": "kms.us-east-1.amazonaws.com:12345", - } - with self.assertRaises(EncryptionError) as ctx: - self.client_encryption.create_data_key("aws", master_key=master_key) - self.assertIsInstance(ctx.exception.cause, socket.error) + def test_04_kmip_endpoint_invalid_port(self): + master_key = {"keyId": "1", "endpoint": "localhost:12345"} + with self.assertRaisesRegex(EncryptionError, "localhost:12345"): + self.client_encryption.create_data_key("kmip", master_key=master_key) @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def test_05_aws_endpoint_wrong_region(self): @@ -1408,30 +1457,30 @@ def test_11_kmip_master_key_endpoint(self): self.assertEqual("test", self.client_encryption_invalid.decrypt(encrypted)) def test_12_kmip_master_key_invalid_endpoint(self): - key = {"keyId": "1", "endpoint": "doesnotexist.local:5698"} + key = {"keyId": "1", "endpoint": "doesnotexist.invalid:5698"} with self.assertRaisesRegex(EncryptionError, self.kmip_host_error): self.client_encryption.create_data_key("kmip", key) -class AzureGCPEncryptionTestMixin(object): +class AzureGCPEncryptionTestMixin(EncryptionIntegrationTest): DEK = None KMS_PROVIDER_MAP = None KEYVAULT_DB = "keyvault" KEYVAULT_COLL = "datakeys" client: MongoClient - def setUp(self): + def _setup(self): keyvault = self.client.get_database(self.KEYVAULT_DB).get_collection(self.KEYVAULT_COLL) create_key_vault(keyvault, self.DEK) def _test_explicit(self, expectation): - client_encryption = ClientEncryption( + self._setup() + client_encryption = self.create_client_encryption( self.KMS_PROVIDER_MAP, # type: ignore[arg-type] ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]), client_context.client, OPTS, ) - self.addCleanup(client_encryption.close) ciphertext = client_encryption.encrypt( "string0", @@ -1443,6 +1492,7 @@ def _test_explicit(self, expectation): self.assertEqual(client_encryption.decrypt(ciphertext), "string0") def _test_automatic(self, expectation_extjson, payload): + self._setup() encrypted_db = "db" encrypted_coll = "coll" keyvault_namespace = ".".join([self.KEYVAULT_DB, self.KEYVAULT_COLL]) @@ -1454,10 +1504,9 @@ def _test_automatic(self, expectation_extjson, payload): ) insert_listener = AllowListEventListener("insert") - client = rs_or_single_client( + client = self.rs_or_single_client( auto_encryption_opts=encryption_opts, event_listeners=[insert_listener] ) - self.addCleanup(client.close) coll = client.get_database(encrypted_db).get_collection( encrypted_coll, codec_options=OPTS, write_concern=WriteConcern("majority") @@ -1467,7 +1516,7 @@ def _test_automatic(self, expectation_extjson, payload): expected_document = json_util.loads(expectation_extjson, json_options=JSON_OPTS) coll.insert_one(payload) - event = insert_listener.results["started"][0] + event = insert_listener.started_events[0] inserted_doc = event.command["documents"][0] for key, value in expected_document.items(): @@ -1479,13 +1528,12 @@ def _test_automatic(self, expectation_extjson, payload): class TestAzureEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(AZURE_CREDS.values()), "Azure environment credentials are not set") - def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} - cls.DEK = json_data(BASE, "custom", "azure-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestAzureEncryption, cls).setUpClass() + def setUp(self): + self.KMS_PROVIDER_MAP = {"azure": AZURE_CREDS} + self.DEK = json_data(BASE, "custom", "azure-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() def test_explicit(self): return self._test_explicit( @@ -1505,13 +1553,12 @@ def test_automatic(self): class TestGCPEncryption(AzureGCPEncryptionTestMixin, EncryptionIntegrationTest): - @classmethod @unittest.skipUnless(any(GCP_CREDS.values()), "GCP environment credentials are not set") - def setUpClass(cls): - cls.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} - cls.DEK = json_data(BASE, "custom", "gcp-dek.json") - cls.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") - super(TestGCPEncryption, cls).setUpClass() + def setUp(self): + self.KMS_PROVIDER_MAP = {"gcp": GCP_CREDS} + self.DEK = json_data(BASE, "custom", "gcp-dek.json") + self.SCHEMA_MAP = json_data(BASE, "custom", "azure-gcp-schema.json") + super().setUp() def test_explicit(self): return self._test_explicit( @@ -1530,22 +1577,21 @@ def test_automatic(self): return self._test_automatic(expected_document_extjson, {"secret_gcp": "string0"}) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#deadlock-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#deadlock-tests class TestDeadlockProse(EncryptionIntegrationTest): def setUp(self): - self.client_test = rs_or_single_client( + super().setUp() + self.client_test = self.rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", uuidRepresentation="standard" ) - self.addCleanup(self.client_test.close) self.client_keyvault_listener = OvertCommandListener() - self.client_keyvault = rs_or_single_client( + self.client_keyvault = self.rs_or_single_client( maxPoolSize=1, readConcernLevel="majority", w="majority", event_listeners=[self.client_keyvault_listener], ) - self.addCleanup(self.client_keyvault.close) self.client_test.keyvault.datakeys.drop() self.client_test.db.coll.drop() @@ -1556,7 +1602,7 @@ def setUp(self): codec_options=OPTS, ) - client_encryption = ClientEncryption( + client_encryption = self.create_client_encryption( kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, key_vault_namespace="keyvault.datakeys", key_vault_client=self.client_test, @@ -1565,14 +1611,13 @@ def setUp(self): self.ciphertext = client_encryption.encrypt( "string0", Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_alt_name="local" ) - client_encryption.close() self.client_listener = OvertCommandListener() self.topology_listener = TopologyEventListener() self.optargs = ({"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys") def _run_test(self, max_pool_size, auto_encryption_opts): - client_encrypted = rs_or_single_client( + client_encrypted = self.rs_or_single_client( readConcernLevel="majority", w="majority", maxPoolSize=max_pool_size, @@ -1590,8 +1635,6 @@ def _run_test(self, max_pool_size, auto_encryption_opts): result = client_encrypted.db.coll.find_one({"_id": 0}) self.assertEqual(result, {"_id": 0, "encrypted": "string0"}) - self.addCleanup(client_encrypted.close) - def test_case_1(self): self._run_test( max_pool_size=1, @@ -1600,7 +1643,7 @@ def test_case_1(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 4) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1621,7 +1664,7 @@ def test_case_2(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1630,7 +1673,7 @@ def test_case_2(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1645,7 +1688,7 @@ def test_case_3(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1662,12 +1705,12 @@ def test_case_4(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1682,7 +1725,7 @@ def test_case_5(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 5) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1705,7 +1748,7 @@ def test_case_6(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 3) self.assertEqual(cev[0].command_name, "listCollections") self.assertEqual(cev[0].database_name, "db") @@ -1714,7 +1757,7 @@ def test_case_6(self): self.assertEqual(cev[2].command_name, "find") self.assertEqual(cev[2].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1729,7 +1772,7 @@ def test_case_7(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 2) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") @@ -1746,12 +1789,12 @@ def test_case_8(self): ), ) - cev = self.client_listener.results["started"] + cev = self.client_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "db") - cev = self.client_keyvault_listener.results["started"] + cev = self.client_keyvault_listener.started_events self.assertEqual(len(cev), 1) self.assertEqual(cev[0].command_name, "find") self.assertEqual(cev[0].database_name, "keyvault") @@ -1759,18 +1802,16 @@ def test_case_8(self): self.assertEqual(len(self.topology_listener.results["opened"]), 1) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#14-decryption-events +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#14-decryption-events class TestDecryptProse(EncryptionIntegrationTest): def setUp(self): + super().setUp() self.client = client_context.client self.client.db.drop_collection("decryption_events") - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) keyID = self.client_encryption.create_data_key("local") @@ -1785,10 +1826,9 @@ def setUp(self): key_vault_namespace="keyvault.datakeys", kms_providers=kms_providers_map ) self.listener = AllowListEventListener("aggregate") - self.encrypted_client = rs_or_single_client( + self.encrypted_client = self.rs_or_single_client( auto_encryption_opts=opts, retryReads=False, event_listeners=[self.listener] ) - self.addCleanup(self.encrypted_client.close) def test_01_command_error(self): with self.fail_point( @@ -1799,8 +1839,8 @@ def test_01_command_error(self): ): with self.assertRaises(OperationFailure): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - for event in self.listener.results["failed"]: + self.assertEqual(len(self.listener.failed_events), 1) + for event in self.listener.failed_events: self.assertEqual(event.failure["code"], 123) def test_02_network_error(self): @@ -1812,8 +1852,8 @@ def test_02_network_error(self): ): with self.assertRaises(AutoReconnect): self.encrypted_client.db.decryption_events.aggregate([]) - self.assertEqual(len(self.listener.results["failed"]), 1) - self.assertEqual(self.listener.results["failed"][0].command_name, "aggregate") + self.assertEqual(len(self.listener.failed_events), 1) + self.assertEqual(self.listener.failed_events[0].command_name, "aggregate") def test_03_decrypt_error(self): self.encrypted_client.db.decryption_events.insert_one( @@ -1821,8 +1861,8 @@ def test_03_decrypt_error(self): ) with self.assertRaises(EncryptionError): next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual( event.reply["cursor"]["firstBatch"][0]["encrypted"], self.malformed_cipher_text ) @@ -1830,12 +1870,12 @@ def test_03_decrypt_error(self): def test_04_decrypt_success(self): self.encrypted_client.db.decryption_events.insert_one({"encrypted": self.cipher_text}) next(self.encrypted_client.db.decryption_events.aggregate([])) - event = self.listener.results["succeeded"][0] - self.assertEqual(len(self.listener.results["failed"]), 0) + event = self.listener.succeeded_events[0] + self.assertEqual(len(self.listener.failed_events), 0) self.assertEqual(event.reply["cursor"]["firstBatch"][0]["encrypted"], self.cipher_text) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#bypass-spawning-mongocryptd +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#bypass-spawning-mongocryptd class TestBypassSpawningMongocryptdProse(EncryptionIntegrationTest): @unittest.skipIf( os.environ.get("TEST_CRYPT_SHARED"), @@ -1865,8 +1905,7 @@ def reset_timeout(): "--port=27027", ], ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) with self.assertRaisesRegex(EncryptionError, "Timeout"): client_encrypted.db.coll.insert_one({"encrypted": "test"}) @@ -1880,31 +1919,96 @@ def test_bypassAutoEncryption(self): "--port=27027", ], ) - client_encrypted = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(client_encrypted.close) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) client_encrypted.db.coll.insert_one({"unencrypted": "test"}) # Validate that mongocryptd was not spawned: - mongocryptd_client = MongoClient("mongodb://localhost:27027/?serverSelectionTimeoutMS=500") + mongocryptd_client = self.simple_client( + "mongodb://localhost:27027/?serverSelectionTimeoutMS=500" + ) with self.assertRaises(ServerSelectionTimeoutError): mongocryptd_client.admin.command("ping") + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_via_loading_shared_library(self): + create_key_vault( + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000", + mongocryptd_spawn_args=[ + "--pidfilepath=bypass-spawning-mongocryptd.pid", + "--port=47021", + ], + crypt_shared_lib_required=True, + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + self.assertEncrypted((client_context.client.db.coll.find_one({}))["encrypted"]) + no_mongocryptd_client = self.simple_client( + host="mongodb://localhost:47021/db?serverSelectionTimeoutMS=1000" + ) + with self.assertRaises(ServerSelectionTimeoutError): + no_mongocryptd_client.db.command("ping") + + # https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#20-bypass-creating-mongocryptd-client-when-shared-library-is-loaded + @unittest.skipUnless(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is not installed") + def test_client_via_loading_shared_library(self): + connection_established = False + + class Handler(socketserver.BaseRequestHandler): + def handle(self): + nonlocal connection_established + connection_established = True + + server = socketserver.TCPServer(("localhost", 47021), Handler) + + def listener(): + with server: + server.serve_forever(poll_interval=0.05) # Short poll timeout to speed up the test + + listener_t = Thread(target=listener) + listener_t.start() + create_key_vault( + client_context.client.keyvault.datakeys, + json_data("external", "external-key.json"), + ) + schemas = {"db.coll": json_data("external", "external-schema.json")} + opts = AutoEncryptionOpts( + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + key_vault_namespace="keyvault.datakeys", + schema_map=schemas, + mongocryptd_uri="mongodb://localhost:47021", + crypt_shared_lib_required=False, + ) + client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + client_encrypted.db.coll.drop() + client_encrypted.db.coll.insert_one({"encrypted": "test"}) + server.shutdown() + listener_t.join() + self.assertFalse(connection_established, "a connection was established on port 47021") + # https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#kms-tls-tests class TestKmsTLSProse(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSProse, self).setUp() + super().setUp() self.patch_system_certs(CA_PEM) - self.client_encrypted = ClientEncryption( + self.client_encrypted = self.create_client_encryption( {"aws": AWS_CREDS}, "keyvault.datakeys", self.client, OPTS ) - self.addCleanup(self.client_encrypted.close) def test_invalid_kms_certificate_expired(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8000", + "endpoint": "mongodb://127.0.0.1:9000", } # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) @@ -1916,65 +2020,64 @@ def test_invalid_hostname_in_kms_certificate(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "mongodb://127.0.0.1:8001", + "endpoint": "mongodb://127.0.0.1:9001", } # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + with self.assertRaisesRegex( + EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch|Certificate" + ): self.client_encrypted.create_data_key("aws", master_key=key) -# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.rst#kms-tls-options-tests +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#kms-tls-options-tests class TestKmsTLSOptions(EncryptionIntegrationTest): @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") def setUp(self): - super(TestKmsTLSOptions, self).setUp() + super().setUp() # 1, create client with only tlsCAFile. providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8002" - providers["gcp"]["endpoint"] = "127.0.0.1:8002" + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9002" + providers["gcp"]["endpoint"] = "127.0.0.1:9002" kms_tls_opts_ca_only = { "aws": {"tlsCAFile": CA_PEM}, "azure": {"tlsCAFile": CA_PEM}, "gcp": {"tlsCAFile": CA_PEM}, "kmip": {"tlsCAFile": CA_PEM}, } - self.client_encryption_no_client_cert = ClientEncryption( + self.client_encryption_no_client_cert = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_no_client_cert.close) # 2, same providers as above but with tlsCertificateKeyFile. kms_tls_opts = copy.deepcopy(kms_tls_opts_ca_only) for p in kms_tls_opts: kms_tls_opts[p]["tlsCertificateKeyFile"] = CLIENT_PEM - self.client_encryption_with_tls = ClientEncryption( + self.client_encryption_with_tls = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts ) - self.addCleanup(self.client_encryption_with_tls.close) # 3, update endpoints to expired host. providers: dict = copy.deepcopy(providers) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8000" - providers["gcp"]["endpoint"] = "127.0.0.1:8000" - providers["kmip"]["endpoint"] = "127.0.0.1:8000" - self.client_encryption_expired = ClientEncryption( + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9000" + providers["gcp"]["endpoint"] = "127.0.0.1:9000" + providers["kmip"]["endpoint"] = "127.0.0.1:9000" + self.client_encryption_expired = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_expired.close) # 3, update endpoints to invalid host. providers: dict = copy.deepcopy(providers) - providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:8001" - providers["gcp"]["endpoint"] = "127.0.0.1:8001" - providers["kmip"]["endpoint"] = "127.0.0.1:8001" - self.client_encryption_invalid_hostname = ClientEncryption( + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9001" + providers["gcp"]["endpoint"] = "127.0.0.1:9001" + providers["kmip"]["endpoint"] = "127.0.0.1:9001" + self.client_encryption_invalid_hostname = self.create_client_encryption( providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_ca_only ) - self.addCleanup(self.client_encryption_invalid_hostname.close) # Errors when client has no cert, some examples: # [SSL: TLSV13_ALERT_CERTIFICATE_REQUIRED] tlsv13 alert certificate required (_ssl.c:2623) self.cert_error = ( "certificate required|SSL handshake failed|" - "KMS connection closed|Connection reset by peer" + "KMS connection closed|Connection reset by peer|ECONNRESET|EPIPE" ) # On Python 3.10+ this error might be: # EOF occurred in violation of protocol (_ssl.c:2384) @@ -1984,12 +2087,38 @@ def setUp(self): # [WinError 10054] An existing connection was forcibly closed by the remote host if sys.platform == "win32": self.cert_error += "|forcibly closed" + # 4, Test named KMS providers. + providers = { + "aws:no_client_cert": AWS_CREDS, + "azure:no_client_cert": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:no_client_cert": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:no_client_cert": KMIP_CREDS, + "aws:with_tls": AWS_CREDS, + "azure:with_tls": {"identityPlatformEndpoint": "127.0.0.1:9002", **AZURE_CREDS}, + "gcp:with_tls": {"endpoint": "127.0.0.1:9002", **GCP_CREDS}, + "kmip:with_tls": KMIP_CREDS, + } + no_cert = {"tlsCAFile": CA_PEM} + with_cert = {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} + kms_tls_opts_4 = { + "aws:no_client_cert": no_cert, + "azure:no_client_cert": no_cert, + "gcp:no_client_cert": no_cert, + "kmip:no_client_cert": no_cert, + "aws:with_tls": with_cert, + "azure:with_tls": with_cert, + "gcp:with_tls": with_cert, + "kmip:with_tls": with_cert, + } + self.client_encryption_with_names = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts_4 + ) def test_01_aws(self): key = { "region": "us-east-1", "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", - "endpoint": "127.0.0.1:8002", + "endpoint": "127.0.0.1:9002", } with self.assertRaisesRegex(EncryptionError, self.cert_error): self.client_encryption_no_client_cert.create_data_key("aws", key) @@ -1999,18 +2128,22 @@ def test_01_aws(self): # Some examples: # certificate verify failed: certificate has expired (_ssl.c:1129) # amazon1-2018 Python 3.6: certificate verify failed (_ssl.c:852) - key["endpoint"] = "127.0.0.1:8000" + key["endpoint"] = "127.0.0.1:9000" with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("aws", key) # Some examples: # certificate verify failed: IP address mismatch, certificate is not valid for '127.0.0.1'. (_ssl.c:1129)" # hostname '127.0.0.1' doesn't match 'wronghost.com' - key["endpoint"] = "127.0.0.1:8001" - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + # 127.0.0.1:9001: ('Certificate does not contain any `subjectAltName`s.',) + key["endpoint"] = "127.0.0.1:9001" + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): self.client_encryption_invalid_hostname.create_data_key("aws", key) def test_02_azure(self): - key = {"keyVaultEndpoint": "doesnotexist.local", "keyName": "foo"} + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} # Missing client cert error. with self.assertRaisesRegex(EncryptionError, self.cert_error): self.client_encryption_no_client_cert.create_data_key("azure", key) @@ -2021,7 +2154,10 @@ def test_02_azure(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("azure", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): self.client_encryption_invalid_hostname.create_data_key("azure", key) def test_03_gcp(self): @@ -2036,7 +2172,10 @@ def test_03_gcp(self): with self.assertRaisesRegex(EncryptionError, "expired|certificate verify failed"): self.client_encryption_expired.create_data_key("gcp", key) # Invalid cert hostname error. - with self.assertRaisesRegex(EncryptionError, "IP address mismatch|wronghost"): + with self.assertRaisesRegex( + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", + ): self.client_encryption_invalid_hostname.create_data_key("gcp", key) def test_04_kmip(self): @@ -2049,21 +2188,68 @@ def test_04_kmip(self): self.client_encryption_expired.create_data_key("kmip") # Invalid cert hostname error. with self.assertRaisesRegex( - EncryptionError, "IP address mismatch|wronghost|IPAddressMismatch" + EncryptionError, + "IP address mismatch|wronghost|IPAddressMismatch|Certificate|SSL handshake failed", ): self.client_encryption_invalid_hostname.create_data_key("kmip") + def test_05_tlsDisableOCSPEndpointCheck_is_permitted(self): + providers = {"aws": {"accessKeyId": "foo", "secretAccessKey": "bar"}} + options = {"aws": {"tlsDisableOCSPEndpointCheck": True}} + encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=options + ) + ctx = encryption._io_callbacks._kms_ssl_contexts["aws"] + if not hasattr(ctx, "check_ocsp_endpoint"): + raise self.skipTest("OCSP not enabled") + self.assertFalse(ctx.check_ocsp_endpoint) + + def test_06_named_kms_providers_apply_tls_options_aws(self): + key = { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "endpoint": "127.0.0.1:9002", + } + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("aws:no_client_cert", key) + # "parse error" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "parse error"): + self.client_encryption_with_names.create_data_key("aws:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_azure(self): + key = {"keyVaultEndpoint": "doesnotexist.invalid", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("azure:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("azure:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_gcp(self): + key = {"projectId": "foo", "location": "bar", "keyRing": "baz", "keyName": "foo"} + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("gcp:no_client_cert", key) + # "HTTP status=404" here means that the TLS handshake succeeded. + with self.assertRaisesRegex(EncryptionError, "HTTP status=404"): + self.client_encryption_with_names.create_data_key("gcp:with_tls", key) + + def test_06_named_kms_providers_apply_tls_options_kmip(self): + # Missing client cert error. + with self.assertRaisesRegex(EncryptionError, self.cert_error): + self.client_encryption_with_names.create_data_key("kmip:no_client_cert") + self.client_encryption_with_names.create_data_key("kmip:with_tls") -# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.rst#unique-index-on-keyaltnames + +# https://github.com/mongodb/specifications/blob/50e26fe/source/client-side-encryption/tests/README.md#unique-index-on-keyaltnames class TestUniqueIndexOnKeyAltNamesProse(EncryptionIntegrationTest): def setUp(self): + super().setUp() self.client = client_context.client - self.client.keyvault.drop_collection("datakeys") - self.client.keyvault.datakeys.create_index( - "keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}} - ) + create_key_vault(self.client.keyvault.datakeys) kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( kms_providers_map, "keyvault.datakeys", self.client, CodecOptions() ) self.def_key_id = self.client_encryption.create_data_key("local", key_alt_names=["def"]) @@ -2086,10 +2272,10 @@ def test_02_add_key_alt_name(self): assert key_doc["keyAltNames"] == ["def"] -# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.rst#explicit-encryption +# https://github.com/mongodb/specifications/blob/d4c9432/source/client-side-encryption/tests/README.md#explicit-encryption class TestExplicitQueryableEncryption(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() self.encrypted_fields = json_data("etc", "data", "encryptedFields.json") @@ -2097,24 +2283,19 @@ def setUp(self): self.key1_id = self.key1_document["_id"] self.db = self.client.test_queryable_encryption self.client.drop_database(self.db) - self.db.command("create", self.encrypted_fields["escCollection"]) - self.db.command("create", self.encrypted_fields["eccCollection"]) - self.db.command("create", self.encrypted_fields["ecocCollection"]) self.db.command("create", "explicit_encryption", encryptedFields=self.encrypted_fields) key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) self.addCleanup(key_vault.drop) self.key_vault_client = self.client - self.client_encryption = ClientEncryption( + self.client_encryption = self.create_client_encryption( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS ) - self.addCleanup(self.client_encryption.close) opts = AutoEncryptionOpts( {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, bypass_query_analysis=True, ) - self.encrypted_client = rs_or_single_client(auto_encryption_opts=opts) - self.addCleanup(self.encrypted_client.close) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) def test_01_insert_encrypted_indexed_and_find(self): val = "encrypted indexed value" @@ -2128,11 +2309,12 @@ def test_01_insert_encrypted_indexed_and_find(self): find_payload = self.client_encryption.encrypt( val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["encryptedIndexed"], val) @@ -2150,11 +2332,12 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): find_payload = self.client_encryption.encrypt( val, Algorithm.INDEXED, self.key1_id, query_type=QueryType.EQUALITY, contention_factor=0 ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertLessEqual(len(docs), 10) for doc in docs: self.assertEqual(doc["encryptedIndexed"], val) @@ -2167,11 +2350,12 @@ def test_02_insert_encrypted_indexed_and_find_contention(self): query_type=QueryType.EQUALITY, contention_factor=contention, ) - docs = list( - self.encrypted_client[self.db.name].explicit_encryption.find( - {"encryptedIndexed": find_payload} - ) + docs = ( + self.encrypted_client[self.db.name] + .explicit_encryption.find({"encryptedIndexed": find_payload}) + .to_list() ) + self.assertEqual(len(docs), 10) for doc in docs: self.assertEqual(doc["encryptedIndexed"], val) @@ -2183,7 +2367,7 @@ def test_03_insert_encrypted_unindexed(self): {"_id": 1, "encryptedUnindexed": insert_payload} ) - docs = list(self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1})) + docs = self.encrypted_client[self.db.name].explicit_encryption.find({"_id": 1}).to_list() self.assertEqual(len(docs), 1) self.assertEqual(docs[0]["encryptedUnindexed"], val) @@ -2202,67 +2386,483 @@ def test_05_roundtrip_encrypted_unindexed(self): self.assertEqual(decrypted, val) -class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): - # Queryable Encryption is not supported on Standalone topology. +# https://github.com/mongodb/specifications/blob/527e22d5090ec48bf1e144c45fc831de0f1935f6/source/client-side-encryption/tests/README.md#25-test-lookup +class TestLookupProse(EncryptionIntegrationTest): @client_context.require_no_standalone - @client_context.require_version_min(6, 0, -1) + @client_context.require_version_min(7, 0, -1) def setUp(self): super().setUp() + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + encrypted_client.drop_database("db") - def test_queryable_encryption(self): - # MongoClient to use in testing that handles auth/tls/etc, - # and cleanup. - def MongoClient(**kwargs): - c = rs_or_single_client(**kwargs) - self.addCleanup(c.close) - return c + key_doc = json_data("etc", "data", "lookup", "key-doc.json") + create_key_vault(encrypted_client.db.keyvault, key_doc) + self.addCleanup(client_context.client.drop_database, "db") - # Drop data from prior test runs. - self.client.keyvault.datakeys.drop() - self.client.drop_database("docs_examples") + encrypted_client.db.create_collection( + "csfle", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle.json")}, + ) + encrypted_client.db.create_collection( + "csfle2", + validator={"$jsonSchema": json_data("etc", "data", "lookup", "schema-csfle2.json")}, + ) + encrypted_client.db.create_collection( + "qe", encryptedFields=json_data("etc", "data", "lookup", "schema-qe.json") + ) + encrypted_client.db.create_collection( + "qe2", encryptedFields=json_data("etc", "data", "lookup", "schema-qe2.json") + ) + encrypted_client.db.create_collection("no_schema") + encrypted_client.db.create_collection("no_schema2") - kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + unencrypted_client = self.rs_or_single_client() - # Create two data keys. - key_vault_client = MongoClient() - client_encryption = ClientEncryption( - kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() - ) - key1_id = client_encryption.create_data_key("local") - key2_id = client_encryption.create_data_key("local") + encrypted_client.db.csfle.insert_one({"csfle": "csfle"}) + doc = unencrypted_client.db.csfle.find_one() + self.assertIsInstance(doc["csfle"], Binary) + encrypted_client.db.csfle2.insert_one({"csfle2": "csfle2"}) + doc = unencrypted_client.db.csfle2.find_one() + self.assertIsInstance(doc["csfle2"], Binary) + encrypted_client.db.qe.insert_one({"qe": "qe"}) + doc = unencrypted_client.db.qe.find_one() + self.assertIsInstance(doc["qe"], Binary) + encrypted_client.db.qe2.insert_one({"qe2": "qe2"}) + doc = unencrypted_client.db.qe2.find_one() + self.assertIsInstance(doc["qe2"], Binary) + encrypted_client.db.no_schema.insert_one({"no_schema": "no_schema"}) + encrypted_client.db.no_schema2.insert_one({"no_schema2": "no_schema2"}) - # Create an encryptedFieldsMap. - encrypted_fields_map = { - "docs_examples.encrypted": { - "fields": [ + encrypted_client.close() + unencrypted_client.close() + + @client_context.require_version_min(8, 1, -1) + def test_1_csfle_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, { - "path": "encrypted_indexed", - "bsonType": "string", - "keyId": key1_id, - "queries": [ - { - "queryType": "equality", - }, - ], + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"no_schema": "no_schema"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_2_qe_joins_no_schema(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, { - "path": "encrypted_unindexed", - "bsonType": "string", - "keyId": key2_id, + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } }, - ], - }, - } - - # Create an Queryable Encryption collection. - opts = AutoEncryptionOpts( - kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) ) - encrypted_client = MongoClient(auto_encryption_opts=opts) + self.assertEqual(doc, {"qe": "qe", "matched": [{"no_schema": "no_schema"}]}) - # Create a Queryable Encryption collection "docs_examples.encrypted". - # Because docs_examples.encrypted is in encrypted_fields_map, it is - # created with Queryable Encryption support. + @client_context.require_version_min(8, 1, -1) + def test_3_no_schema_joins_csfle(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "csfle", + "as": "matched", + "pipeline": [{"$match": {"csfle": "csfle"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"csfle": "csfle"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_4_no_schema_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [ + {"$match": {"qe": "qe"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"qe": "qe"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_5_csfle_joins_csfle2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "csfle2", + "as": "matched", + "pipeline": [ + {"$match": {"csfle2": "csfle2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"csfle": "csfle", "matched": [{"csfle2": "csfle2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_6_qe_joins_qe2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.qe.aggregate( + [ + {"$match": {"qe": "qe"}}, + { + "$lookup": { + "from": "qe2", + "as": "matched", + "pipeline": [ + {"$match": {"qe2": "qe2"}}, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ], + } + }, + {"$project": {"_id": 0, "__safeContent__": 0}}, + ] + ) + ) + self.assertEqual(doc, {"qe": "qe", "matched": [{"qe2": "qe2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_7_no_schema_joins_no_schema2(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + doc = next( + encrypted_client.db.no_schema.aggregate( + [ + {"$match": {"no_schema": "no_schema"}}, + { + "$lookup": { + "from": "no_schema2", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema2": "no_schema2"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertEqual(doc, {"no_schema": "no_schema", "matched": [{"no_schema2": "no_schema2"}]}) + + @client_context.require_version_min(8, 1, -1) + def test_8_csfle_joins_qe(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "qe"}}, + { + "$lookup": { + "from": "qe", + "as": "matched", + "pipeline": [{"$match": {"qe": "qe"}}, {"$project": {"_id": 0}}], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("not supported", str(exc)) + + @client_context.require_version_max(8, 1, -1) + def test_9_error(self): + encrypted_client = self.rs_or_single_client( + auto_encryption_opts=AutoEncryptionOpts( + key_vault_namespace="db.keyvault", + kms_providers={"local": {"key": LOCAL_MASTER_KEY}}, + ) + ) + with self.assertRaises(PyMongoError) as exc: + _ = next( + encrypted_client.db.csfle.aggregate( + [ + {"$match": {"csfle": "csfle"}}, + { + "$lookup": { + "from": "no_schema", + "as": "matched", + "pipeline": [ + {"$match": {"no_schema": "no_schema"}}, + {"$project": {"_id": 0}}, + ], + } + }, + {"$project": {"_id": 0}}, + ] + ) + ) + self.assertIn("Upgrade", str(exc)) + + +# https://github.com/mongodb/specifications/blob/072601/source/client-side-encryption/tests/README.md#rewrap +class TestRewrapWithSeparateClientEncryption(EncryptionIntegrationTest): + MASTER_KEYS: Mapping[str, Mapping[str, Any]] = { + "aws": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + }, + "azure": { + "keyVaultEndpoint": "key-vault-csfle.vault.azure.net", + "keyName": "key-name-csfle", + }, + "gcp": { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + }, + "kmip": {}, + "local": {}, + } + + def test_rewrap(self): + for src_provider in self.MASTER_KEYS: + for dst_provider in self.MASTER_KEYS: + with self.subTest(src_provider=src_provider, dst_provider=dst_provider): + self.run_test(src_provider, dst_provider) + + def run_test(self, src_provider, dst_provider): + # Step 1. Drop the collection ``keyvault.datakeys``. + self.client.keyvault.drop_collection("datakeys") + + # Step 2. Create a ``ClientEncryption`` object named ``client_encryption1`` + client_encryption1 = self.create_client_encryption( + key_vault_client=self.client, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 3. Call ``client_encryption1.create_data_key`` with ``src_provider``. + key_id = client_encryption1.create_data_key( + master_key=self.MASTER_KEYS[src_provider], kms_provider=src_provider + ) + + # Step 4. Call ``client_encryption1.encrypt`` with the value "test" + cipher_text = client_encryption1.encrypt( + "test", key_id=key_id, algorithm=Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic + ) + + # Step 5. Create a ``ClientEncryption`` object named ``client_encryption2`` + client2 = self.rs_or_single_client() + client_encryption2 = self.create_client_encryption( + key_vault_client=client2, + key_vault_namespace="keyvault.datakeys", + kms_providers=ALL_KMS_PROVIDERS, + kms_tls_options=DEFAULT_KMS_TLS, + codec_options=OPTS, + ) + + # Step 6. Call ``client_encryption2.rewrap_many_data_key`` with an empty ``filter``. + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, provider=dst_provider, master_key=self.MASTER_KEYS[dst_provider] + ) + + self.assertEqual(rewrap_many_data_key_result.bulk_write_result.modified_count, 1) + + # 7. Call ``client_encryption1.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result1 = client_encryption1.decrypt(cipher_text) + self.assertEqual(decrypt_result1, "test") + + # 8. Call ``client_encryption2.decrypt`` with the ``cipher_text``. Assert the return value is "test". + decrypt_result2 = client_encryption2.decrypt(cipher_text) + self.assertEqual(decrypt_result2, "test") + + # 8. Case 2. Provider is not optional when master_key is given. + with self.assertRaises(ConfigurationError): + rewrap_many_data_key_result = client_encryption2.rewrap_many_data_key( + {}, master_key=self.MASTER_KEYS[dst_provider] + ) + + +# https://github.com/mongodb/specifications/blob/5cf3ed/source/client-side-encryption/tests/README.md#on-demand-aws-credentials +class TestOnDemandAWSCredentials(EncryptionIntegrationTest): + def setUp(self): + super().setUp() + self.master_key = { + "region": "us-east-1", + "key": ("arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0"), + } + + @unittest.skipIf(any(AWS_CREDS.values()), "AWS environment credentials are set") + def test_01_failure(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("aws", self.master_key) + + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def test_02_success(self): + self.client_encryption = self.create_client_encryption( + kms_providers={"aws": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=OPTS, + ) + self.client_encryption.create_data_key("aws", self.master_key) + + +class TestQueryableEncryptionDocsExample(EncryptionIntegrationTest): + # Queryable Encryption is not supported on Standalone topology. + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + def setUp(self): + super().setUp() + + def test_queryable_encryption(self): + # MongoClient to use in testing that handles auth/tls/etc, + # and cleanup. + def MongoClient(**kwargs): + c = self.rs_or_single_client(**kwargs) + return c + + # Drop data from prior test runs. + self.client.keyvault.datakeys.drop() + self.client.drop_database("docs_examples") + + kms_providers_map = {"local": {"key": LOCAL_MASTER_KEY}} + + # Create two data keys. + key_vault_client = MongoClient() + client_encryption = self.create_client_encryption( + kms_providers_map, "keyvault.datakeys", key_vault_client, CodecOptions() + ) + key1_id = client_encryption.create_data_key("local") + key2_id = client_encryption.create_data_key("local") + + # Create an encryptedFieldsMap. + encrypted_fields_map = { + "docs_examples.encrypted": { + "fields": [ + { + "path": "encrypted_indexed", + "bsonType": "string", + "keyId": key1_id, + "queries": [ + { + "queryType": "equality", + }, + ], + }, + { + "path": "encrypted_unindexed", + "bsonType": "string", + "keyId": key2_id, + }, + ], + }, + } + + # Create an Queryable Encryption collection. + opts = AutoEncryptionOpts( + kms_providers_map, "keyvault.datakeys", encrypted_fields_map=encrypted_fields_map + ) + encrypted_client = MongoClient(auto_encryption_opts=opts) + + # Create a Queryable Encryption collection "docs_examples.encrypted". + # Because docs_examples.encrypted is in encrypted_fields_map, it is + # created with Queryable Encryption support. db = encrypted_client.docs_examples encrypted_coll = db.create_collection("encrypted") @@ -2291,7 +2891,844 @@ def MongoClient(**kwargs): assert isinstance(res["encrypted_indexed"], Binary) assert isinstance(res["encrypted_unindexed"], Binary) - client_encryption.close() + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#22-range-explicit-encryption +class TestRangeQueryProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 0, -1) + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(key_vault.drop) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, key_vault.full_name, self.key_vault_client, OPTS + ) + opts = AutoEncryptionOpts( + {"local": {"key": LOCAL_MASTER_KEY}}, + key_vault.full_name, + bypass_query_analysis=True, + ) + self.encrypted_client = self.rs_or_single_client(auto_encryption_opts=opts) + self.db = self.encrypted_client.db + + def run_expression_find( + self, name, expression, expected_elems, range_opts, use_expr=False, key_id=None + ): + find_payload = self.client_encryption.encrypt_expression( + expression=expression, + key_id=key_id or self.key1_id, + algorithm=Algorithm.RANGE, + query_type=QueryType.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + if use_expr: + find_payload = {"$expr": find_payload} + sorted_find = sorted( + self.encrypted_client.db.explicit_encryption.find(find_payload).to_list(), + key=lambda x: x["_id"], + ) + for elem, expected in zip(sorted_find, expected_elems): + self.assertEqual(elem[f"encrypted{name}"], expected) + + def run_test_cases(self, name, range_opts, cast_func): + encrypted_fields = json_data("etc", "data", f"range-encryptedFields-{name}.json") + self.db.drop_collection("explicit_encryption", encrypted_fields=encrypted_fields) + self.db.create_collection("explicit_encryption", encryptedFields=encrypted_fields) + + def encrypt_and_cast(i): + return self.client_encryption.encrypt( + cast_func(i), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + for elem in [{f"encrypted{name}": encrypt_and_cast(i)} for i in [0, 6, 30, 200]]: + self.encrypted_client.db.explicit_encryption.insert_one(elem) + + # Case 1. + insert_payload = self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + self.assertEqual(self.client_encryption.decrypt(insert_payload), cast_func(6)) + + # Case 2. + expression = { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(6)}}, + {f"encrypted{name}": {"$lte": cast_func(200)}}, + ] + } + self.run_expression_find(name, expression, [cast_func(i) for i in [6, 30, 200]], range_opts) + # Case 2, with UUID key_id + self.run_expression_find( + name, + expression, + [cast_func(i) for i in [6, 30, 200]], + range_opts, + key_id=self.key1_id.as_uuid(), + ) + + # Case 3. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gte": cast_func(0)}}, + {f"encrypted{name}": {"$lte": cast_func(6)}}, + ] + }, + [cast_func(i) for i in [0, 6]], + range_opts, + ) + + # Case 4. + self.run_expression_find( + name, + { + "$and": [ + {f"encrypted{name}": {"$gt": cast_func(30)}}, + ] + }, + [cast_func(i) for i in [200]], + range_opts, + ) + + # Case 5. + self.run_expression_find( + name, + {"$and": [{"$lt": [f"$encrypted{name}", cast_func(30)]}]}, + [cast_func(i) for i in [0, 6]], + range_opts, + use_expr=True, + ) + + # The spec says to skip the following tests for no precision decimal or double types. + if name not in ("DoubleNoPrecision", "DecimalNoPrecision"): + # Case 6. + with self.assertRaisesRegex( + EncryptionError, + "greater than or equal to the minimum value and less than or equal to the maximum value", + ): + self.client_encryption.encrypt( + cast_func(201), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 7. + with self.assertRaisesRegex( + EncryptionError, "expected matching 'min' and value type. Got range option" + ): + self.client_encryption.encrypt( + 6 if cast_func != int else float(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=range_opts, + ) + + # Case 8. + # The spec says we must additionally not run this case with any precision type, not just the ones above. + if "Precision" not in name: + with self.assertRaisesRegex( + EncryptionError, + "expected 'precision' to be set with double or decimal128 index, but got:", + ): + self.client_encryption.encrypt( + cast_func(6), + key_id=self.key1_id, + algorithm=Algorithm.RANGE, + contention_factor=0, + range_opts=RangeOpts( + min=cast_func(0), + max=cast_func(200), + sparsity=1, + trim_factor=1, + precision=2, + ), + ) + + def test_double_no_precision(self): + self.run_test_cases("DoubleNoPrecision", RangeOpts(sparsity=1, trim_factor=1), float) + + def test_double_precision(self): + self.run_test_cases( + "DoublePrecision", + RangeOpts(min=0.0, max=200.0, sparsity=1, trim_factor=1, precision=2), + float, + ) + + def test_decimal_no_precision(self): + self.run_test_cases( + "DecimalNoPrecision", RangeOpts(sparsity=1, trim_factor=1), lambda x: Decimal128(str(x)) + ) + + def test_decimal_precision(self): + self.run_test_cases( + "DecimalPrecision", + RangeOpts( + min=Decimal128("0.0"), + max=Decimal128("200.0"), + sparsity=1, + trim_factor=1, + precision=2, + ), + lambda x: Decimal128(str(x)), + ) + + def test_datetime(self): + self.run_test_cases( + "Date", + RangeOpts(min=DatetimeMS(0), max=DatetimeMS(200), sparsity=1, trim_factor=1), + lambda x: DatetimeMS(x).as_datetime(), + ) + + def test_int(self): + self.run_test_cases("Int", RangeOpts(min=0, max=200, sparsity=1, trim_factor=1), int) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#23-range-explicit-encryption-applies-defaults +class TestRangeQueryDefaultsProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 0, -1) + def setUp(self): + super().setUp() + self.client.drop_database(self.db) + self.key_vault_client = self.client + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, "keyvault.datakeys", self.key_vault_client, OPTS + ) + self.key_id = self.client_encryption.create_data_key("local") + opts = RangeOpts(min=0, max=1000) + self.payload_defaults = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + + def test_uses_libmongocrypt_defaults(self): + opts = RangeOpts(min=0, max=1000, sparsity=2, trim_factor=6) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) == len(self.payload_defaults) + + def test_accepts_trim_factor_0(self): + opts = RangeOpts(min=0, max=1000, trim_factor=0) + payload = self.client_encryption.encrypt( + 123, "range", self.key_id, contention_factor=0, range_opts=opts + ) + assert len(payload) > len(self.payload_defaults) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#24-kms-retry-tests +class TestKmsRetryProse(EncryptionIntegrationTest): + @unittest.skipUnless(any(AWS_CREDS.values()), "AWS environment credentials are not set") + def setUp(self): + super().setUp() + # 1, create client with only tlsCAFile. + providers: dict = copy.deepcopy(ALL_KMS_PROVIDERS) + providers["azure"]["identityPlatformEndpoint"] = "127.0.0.1:9003" + providers["gcp"]["endpoint"] = "127.0.0.1:9003" + kms_tls_opts = { + p: {"tlsCAFile": CA_PEM, "tlsCertificateKeyFile": CLIENT_PEM} for p in providers + } + self.client_encryption = self.create_client_encryption( + providers, "keyvault.datakeys", self.client, OPTS, kms_tls_options=kms_tls_opts + ) + + def http_post(self, path, data=None): + # Note, the connection to the mock server needs to be closed after + # each request because the server is single threaded. + ctx = ssl.create_default_context(cafile=CA_PEM) + ctx.load_cert_chain(CLIENT_PEM) + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE + conn = http.client.HTTPSConnection("127.0.0.1:9003", context=ctx) + try: + if data is not None: + headers = {"Content-type": "application/json"} + body = json.dumps(data) + else: + headers = {} + body = None + conn.request("POST", path, body, headers) + res = conn.getresponse() + res.read() + finally: + conn.close() + + def _test(self, provider, master_key): + self.http_post("/reset") + # Case 1: createDataKey and encrypt with TCP retry + self.http_post("/set_failpoint/network", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/network", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 2: createDataKey and encrypt with HTTP retry + self.http_post("/set_failpoint/http", {"count": 1}) + key_id = self.client_encryption.create_data_key(provider, master_key=master_key) + self.http_post("/set_failpoint/http", {"count": 1}) + self.client_encryption.encrypt( + 123, Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, key_id + ) + + # Case 3: createDataKey fails after too many retries + self.http_post("/set_failpoint/network", {"count": 4}) + with self.assertRaisesRegex(EncryptionError, "KMS request failed after"): + self.client_encryption.create_data_key(provider, master_key=master_key) + + def test_kms_retry(self): + if IS_PYOPENSSL: + self.skipTest( + "PyOpenSSL does not support a required method for this test, Connection.makefile" + ) + self._test("aws", {"region": "foo", "key": "bar", "endpoint": "127.0.0.1:9003"}) + self._test("azure", {"keyVaultEndpoint": "127.0.0.1:9003", "keyName": "foo"}) + self._test( + "gcp", + { + "projectId": "foo", + "location": "bar", + "keyRing": "baz", + "keyName": "qux", + "endpoint": "127.0.0.1:9003", + }, + ) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#automatic-data-encryption-keys +class TestAutomaticDecryptionKeys(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(7, 0, -1) + @flaky(reason="PYTHON-4982") + def setUp(self): + super().setUp() + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + self.key1_id = self.key1_document["_id"] + self.client.drop_database(self.db) + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + self.client_encryption = self.create_client_encryption( + {"local": {"key": LOCAL_MASTER_KEY}}, + self.key_vault.full_name, + self.client, + OPTS, + ) + + def test_01_simple_create(self): + coll, _ = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + with self.assertRaises(WriteError) as exc: + coll.insert_one({"ssn": "123-45-6789"}) + self.assertEqual(exc.exception.code, 121) + + def test_02_no_fields(self): + with self.assertRaisesRegex( + TypeError, + "create_encrypted_collection.* missing 1 required positional argument: 'encrypted_fields'", + ): + self.client_encryption.create_encrypted_collection( # type:ignore[call-arg] + database=self.db, + name="testing1", + ) + + def test_03_invalid_keyid(self): + # checkAuthForCreateCollection can be removed when SERVER-102101 is fixed. + with self.assertRaisesRegex( + EncryptedCollectionError, + "(create|checkAuthForCreateCollection).encryptedFields.fields.keyId' is the wrong type 'bool', expected type 'binData", + ): + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [{"path": "ssn", "bsonType": "string", "keyId": False}] + }, + kms_provider="local", + ) + + def test_04_insert_encrypted(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + kms_provider="local", + ) + key1_id = ef["fields"][0]["keyId"] + encrypted_value = self.client_encryption.encrypt( + "123-45-6789", + key_id=key1_id, + algorithm=Algorithm.UNINDEXED, + ) + coll.insert_one({"ssn": encrypted_value}) + + def test_copy_encrypted_fields(self): + encrypted_fields = { + "fields": [ + { + "path": "ssn", + "bsonType": "string", + "keyId": None, + } + ] + } + _, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields=encrypted_fields, + ) + self.assertIsNotNone(ef["fields"][0]["keyId"]) + self.assertIsNone(encrypted_fields["fields"][0]["keyId"]) + + def test_options_forward(self): + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + kms_provider="local", + encrypted_fields={"fields": [{"path": "ssn", "bsonType": "string", "keyId": None}]}, + read_preference=ReadPreference.NEAREST, + ) + self.assertEqual(coll.read_preference, ReadPreference.NEAREST) + self.assertEqual(coll.name, "testing1") + + def test_mixed_null_keyids(self): + key = self.client_encryption.create_data_key(kms_provider="local") + coll, ef = self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields={ + "fields": [ + {"path": "ssn", "bsonType": "string", "keyId": None}, + {"path": "dob", "bsonType": "string", "keyId": key}, + {"path": "secrets", "bsonType": "string"}, + {"path": "address", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + encrypted_values = [ + self.client_encryption.encrypt( + val, + key_id=key, + algorithm=Algorithm.UNINDEXED, + ) + for val, key in zip( + ["123-45-6789", "11/22/1963", "My secret", "New Mexico, 87104"], + [field["keyId"] for field in ef["fields"]], + ) + ] + coll.insert_one( + { + "ssn": encrypted_values[0], + "dob": encrypted_values[1], + "secrets": encrypted_values[2], + "address": encrypted_values[3], + } + ) + + def test_create_datakey_fails(self): + key = self.client_encryption.create_data_key(kms_provider="local") + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + } + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # generating keys fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="does not exist", + ) + self.assertEqual(exc.exception.encrypted_fields, encrypted_fields) + + def test_create_failure(self): + key = self.client_encryption.create_data_key(kms_provider="local") + # Make sure the exception's encrypted_fields object includes the previous keys in the error message even when + # it is the creation of the collection that fails. + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name=1, # type:ignore[arg-type] + encrypted_fields={ + "fields": [ + {"path": "address", "bsonType": "string", "keyId": key}, + {"path": "dob", "bsonType": "string", "keyId": None}, + ] + }, + kms_provider="local", + ) + for field in exc.exception.encrypted_fields["fields"]: + self.assertIsInstance(field["keyId"], Binary) + + def test_collection_name_collision(self): + encrypted_fields = { + "fields": [ + {"path": "address", "bsonType": "string", "keyId": None}, + ] + } + self.db.create_collection("testing1") + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + self.db.drop_collection("testing1", encrypted_fields=encrypted_fields) + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + with self.assertRaises( + EncryptedCollectionError, + ) as exc: + self.client_encryption.create_encrypted_collection( + database=self.db, + name="testing1", + encrypted_fields=encrypted_fields, + kms_provider="local", + ) + self.assertIsInstance(exc.exception.encrypted_fields["fields"][0]["keyId"], Binary) + + +# https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption +class TestExplicitTextEncryptionProse(EncryptionIntegrationTest): + @client_context.require_no_standalone + @client_context.require_version_min(8, 2, -1) + @client_context.require_libmongocrypt_min(1, 15, 1) + @client_context.require_pymongocrypt_min(1, 16, 0) + def setUp(self): + super().setUp() + # Load the file key1-document.json as key1Document. + self.key1_document = json_data("etc", "data", "keys", "key1-document.json") + # Read the "_id" field of key1Document as key1ID. + self.key1_id = self.key1_document["_id"] + # Drop and create the collection keyvault.datakeys. + # Insert key1Document in keyvault.datakeys with majority write concern. + self.key_vault = create_key_vault(self.client.keyvault.datakeys, self.key1_document) + self.addCleanup(self.key_vault.drop) + # Create a ClientEncryption object named clientEncryption with these options. + self.kms_providers = {"local": {"key": LOCAL_MASTER_KEY}} + self.client_encryption = self.create_client_encryption( + self.kms_providers, + self.key_vault.full_name, + self.client, + OPTS, + ) + # Create a MongoClient named encryptedClient with these AutoEncryptionOpts. + opts = AutoEncryptionOpts( + self.kms_providers, + "keyvault.datakeys", + bypass_query_analysis=True, + ) + self.client_encrypted = self.rs_or_single_client(auto_encryption_opts=opts) + + # Using QE CreateCollection() and Collection.Drop(), drop and create the following collections with majority write concern: + # db.prefix-suffix using the encryptedFields option set to the contents of encryptedFields-prefix-suffix.json. + db = self.client_encrypted.db + db.drop_collection("prefix-suffix") + encrypted_fields = json_data("etc", "data", "encryptedFields-prefix-suffix.json") + self.client_encryption.create_encrypted_collection( + db, "prefix-suffix", kms_provider="local", encrypted_fields=encrypted_fields + ) + # db.substring using the encryptedFields option set to the contents of encryptedFields-substring.json. + db.drop_collection("substring") + encrypted_fields = json_data("etc", "data", "encryptedFields-substring.json") + self.client_encryption.create_encrypted_collection( + db, "substring", kms_provider="local", encrypted_fields=encrypted_fields + ) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.prefix-suffix with majority write concern. + coll = self.client_encrypted.db["prefix-suffix"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + # Use clientEncryption to encrypt the string "foobarbaz" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foobarbaz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to insert the following document into db.substring with majority write concern. + coll = self.client_encrypted.db["substring"].with_options( + write_concern=WriteConcern(w="majority") + ) + coll.insert_one({"_id": 0, "encryptedText": encrypted_value}) + + def test_01_can_find_a_document_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts. + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter. + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_02_can_find_a_document_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert the following document is returned. + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_03_no_document_found_by_prefix(self): + # Use clientEncryption.encrypt() to encrypt the string "baz" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "baz", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrStartsWith": {"input": "$encryptedText", "prefix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_04_no_document_found_by_suffix(self): + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + suffix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUFFIXPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.prefix-suffix collection with the following filter: + value = self.client_encrypted.db["prefix-suffix"].find_one( + {"$expr": {"$encStrEndsWith": {"input": "$encryptedText", "suffix": encrypted_value}}} + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_05_can_find_a_document_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "bar" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "bar", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert the following document is returned: + expected = {"_id": 0, "encryptedText": "foobarbaz"} + value.pop("__safeContent__", None) + self.assertEqual(value, expected) + + def test_06_no_document_found_by_substring(self): + # Use clientEncryption.encrypt() to encrypt the string "qux" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + substring=dict(strMaxLength=10, strMaxQueryLength=10, strMinQueryLength=2), + ) + encrypted_value = self.client_encryption.encrypt( + "qux", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.SUBSTRINGPREVIEW, + contention_factor=0, + text_opts=text_opts, + ) + # Use encryptedClient to run a "find" operation on the db.substring collection with the following filter: + value = self.client_encrypted.db["substring"].find_one( + { + "$expr": { + "$encStrContains": {"input": "$encryptedText", "substring": encrypted_value} + } + } + ) + # Assert that no documents are returned. + self.assertIsNone(value) + + def test_07_contentionFactor_is_required(self): + from pymongocrypt.errors import MongoCryptError + + # Use clientEncryption.encrypt() to encrypt the string "foo" with the following EncryptOpts: + text_opts = TextOpts( + case_sensitive=True, + diacritic_sensitive=True, + prefix=dict(strMaxQueryLength=10, strMinQueryLength=2), + ) + with self.assertRaises(EncryptionError) as ctx: + self.client_encryption.encrypt( + "foo", + key_id=self.key1_id, + algorithm=Algorithm.TEXTPREVIEW, + query_type=QueryType.PREFIXPREVIEW, + text_opts=text_opts, + ) + # Expect an error from libmongocrypt with a message containing the string: "contention factor is required for textPreview algorithm". + self.assertIsInstance(ctx.exception.cause, MongoCryptError) + self.assertEqual( + str(ctx.exception), "contention factor is required for textPreview algorithm" + ) + + +def start_mongocryptd(port) -> None: + args = ["mongocryptd", f"--port={port}", "--idleShutdownTimeoutSecs=60"] + _spawn_daemon(args) + + +@unittest.skipIf(os.environ.get("TEST_CRYPT_SHARED"), "crypt_shared lib is installed") +class TestNoSessionsSupport(EncryptionIntegrationTest): + mongocryptd_client: MongoClient + MONGOCRYPTD_PORT = 27020 + + def setUp(self) -> None: + super().setUp() + start_mongocryptd(self.MONGOCRYPTD_PORT) + + self.listener = OvertCommandListener() + self.mongocryptd_client = self.simple_client( + f"mongodb://localhost:{self.MONGOCRYPTD_PORT}", event_listeners=[self.listener] + ) + + hello = self.mongocryptd_client.db.command("hello") + self.assertNotIn("logicalSessionTimeoutMinutes", hello) + + def test_implicit_session_ignored_when_unsupported(self): + self.listener.reset() + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.find_one() + + self.assertNotIn("lsid", self.listener.started_events[0].command) + + with self.assertRaises(OperationFailure): + self.mongocryptd_client.db.test.insert_one({"x": 1}) + + self.assertNotIn("lsid", self.listener.started_events[1].command) + + self.mongocryptd_client.close() + + def test_explicit_session_errors_when_unsupported(self): + self.listener.reset() + with self.mongocryptd_client.start_session() as s: + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.find_one(session=s) + with self.assertRaisesRegex( + ConfigurationError, r"Sessions are not supported by this MongoDB deployment" + ): + self.mongocryptd_client.db.test.insert_one({"x": 1}, session=s) + + self.mongocryptd_client.close() if __name__ == "__main__": diff --git a/test/test_errors.py b/test/test_errors.py index 747da48472..d6db6a4ec1 100644 --- a/test/test_errors.py +++ b/test/test_errors.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import pickle import sys @@ -46,15 +47,9 @@ def test_operation_failure(self): self.assertIn("full error", traceback.format_exc()) def _test_unicode_strs(self, exc): - if sys.implementation.name == "pypy" and sys.implementation.version < (7, 3, 7): - # PyPy used to display unicode in repr differently. - self.assertEqual( - "unicode \U0001f40d, full error: {'errmsg': 'unicode \\U0001f40d'}", str(exc) - ) - else: - self.assertEqual( - "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) - ) + self.assertEqual( + "unicode \U0001f40d, full error: {'errmsg': 'unicode \U0001f40d'}", str(exc) + ) try: raise exc except Exception: diff --git a/test/test_examples.py b/test/test_examples.py index b7b70463ac..266e32e8d4 100644 --- a/test/test_examples.py +++ b/test/test_examples.py @@ -13,15 +13,20 @@ # limitations under the License. """MongoDB documentation examples in Python.""" +from __future__ import annotations +import asyncio import datetime +import functools import sys import threading +import time +from test.helpers import ConcurrentRunner sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import rs_client, wait_until +from test.utils_shared import wait_until import pymongo from pymongo.errors import ConnectionFailure, OperationFailure @@ -30,21 +35,18 @@ from pymongo.server_api import ServerApi from pymongo.write_concern import WriteConcern +_IS_SYNC = True -class TestSampleShellCommands(IntegrationTest): - @classmethod - def setUpClass(cls): - super(TestSampleShellCommands, cls).setUpClass() - # Run once before any tests run. - cls.db.inventory.drop() - @classmethod - def tearDownClass(cls): - cls.client.drop_database("pymongo_test") +class TestSampleShellCommands(IntegrationTest): + def setUp(self): + super().setUp() + self.db.inventory.drop() def tearDown(self): # Run after every test. self.db.inventory.drop() + self.client.drop_database("pymongo_test") def test_first_three_examples(self): db = self.db @@ -66,7 +68,7 @@ def test_first_three_examples(self): cursor = db.inventory.find({"item": "canvas"}) # End Example 2 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 3 db.inventory.insert_many( @@ -141,31 +143,31 @@ def test_query_top_level_fields(self): cursor = db.inventory.find({}) # End Example 7 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 9 cursor = db.inventory.find({"status": "D"}) # End Example 9 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 10 cursor = db.inventory.find({"status": {"$in": ["A", "D"]}}) # End Example 10 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 11 cursor = db.inventory.find({"status": "A", "qty": {"$lt": 30}}) # End Example 11 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 12 cursor = db.inventory.find({"$or": [{"status": "A"}, {"qty": {"$lt": 30}}]}) # End Example 12 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 13 cursor = db.inventory.find( @@ -173,46 +175,42 @@ def test_query_top_level_fields(self): ) # End Example 13 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) def test_query_embedded_documents(self): db = self.db # Start Example 14 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - db.inventory.insert_many( [ { "item": "journal", "qty": 25, - "size": SON([("h", 14), ("w", 21), ("uom", "cm")]), + "size": {"h": 14, "w": 21, "uom": "cm"}, "status": "A", }, { "item": "notebook", "qty": 50, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "A", }, { "item": "paper", "qty": 100, - "size": SON([("h", 8.5), ("w", 11), ("uom", "in")]), + "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "D", }, { "item": "planner", "qty": 75, - "size": SON([("h", 22.85), ("w", 30), ("uom", "cm")]), + "size": {"h": 22.85, "w": 30, "uom": "cm"}, "status": "D", }, { "item": "postcard", "qty": 45, - "size": SON([("h", 10), ("w", 15.25), ("uom", "cm")]), + "size": {"h": 10, "w": 15.25, "uom": "cm"}, "status": "A", }, ] @@ -220,34 +218,34 @@ def test_query_embedded_documents(self): # End Example 14 # Start Example 15 - cursor = db.inventory.find({"size": SON([("h", 14), ("w", 21), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"h": 14, "w": 21, "uom": "cm"}}) # End Example 15 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 16 - cursor = db.inventory.find({"size": SON([("w", 21), ("h", 14), ("uom", "cm")])}) + cursor = db.inventory.find({"size": {"w": 21, "h": 14, "uom": "cm"}}) # End Example 16 - self.assertEqual(len(list(cursor)), 0) + self.assertEqual(len(cursor.to_list()), 0) # Start Example 17 cursor = db.inventory.find({"size.uom": "in"}) # End Example 17 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 18 cursor = db.inventory.find({"size.h": {"$lt": 15}}) # End Example 18 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 19 cursor = db.inventory.find({"size.h": {"$lt": 15}, "size.uom": "in", "status": "D"}) # End Example 19 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_query_arrays(self): db = self.db @@ -273,87 +271,83 @@ def test_query_arrays(self): cursor = db.inventory.find({"tags": ["red", "blank"]}) # End Example 21 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 22 cursor = db.inventory.find({"tags": {"$all": ["red", "blank"]}}) # End Example 22 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 23 cursor = db.inventory.find({"tags": "red"}) # End Example 23 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 24 cursor = db.inventory.find({"dim_cm": {"$gt": 25}}) # End Example 24 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 25 cursor = db.inventory.find({"dim_cm": {"$gt": 15, "$lt": 20}}) # End Example 25 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 26 cursor = db.inventory.find({"dim_cm": {"$elemMatch": {"$gt": 22, "$lt": 30}}}) # End Example 26 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 27 cursor = db.inventory.find({"dim_cm.1": {"$gt": 25}}) # End Example 27 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 28 cursor = db.inventory.find({"tags": {"$size": 3}}) # End Example 28 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_query_array_of_documents(self): db = self.db # Start Example 29 - # Subdocument key order matters in a few of these examples so we have - # to use bson.son.SON instead of a Python dict. - from bson.son import SON - db.inventory.insert_many( [ { "item": "journal", "instock": [ - SON([("warehouse", "A"), ("qty", 5)]), - SON([("warehouse", "C"), ("qty", 15)]), + {"warehouse": "A", "qty": 5}, + {"warehouse": "C", "qty": 15}, ], }, - {"item": "notebook", "instock": [SON([("warehouse", "C"), ("qty", 5)])]}, + {"item": "notebook", "instock": [{"warehouse": "C", "qty": 5}]}, { "item": "paper", "instock": [ - SON([("warehouse", "A"), ("qty", 60)]), - SON([("warehouse", "B"), ("qty", 15)]), + {"warehouse": "A", "qty": 60}, + {"warehouse": "B", "qty": 15}, ], }, { "item": "planner", "instock": [ - SON([("warehouse", "A"), ("qty", 40)]), - SON([("warehouse", "B"), ("qty", 5)]), + {"warehouse": "A", "qty": 40}, + {"warehouse": "B", "qty": 5}, ], }, { "item": "postcard", "instock": [ - SON([("warehouse", "B"), ("qty", 15)]), - SON([("warehouse", "C"), ("qty", 35)]), + {"warehouse": "B", "qty": 15}, + {"warehouse": "C", "qty": 35}, ], }, ] @@ -361,52 +355,52 @@ def test_query_array_of_documents(self): # End Example 29 # Start Example 30 - cursor = db.inventory.find({"instock": SON([("warehouse", "A"), ("qty", 5)])}) + cursor = db.inventory.find({"instock": {"warehouse": "A", "qty": 5}}) # End Example 30 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 31 - cursor = db.inventory.find({"instock": SON([("qty", 5), ("warehouse", "A")])}) + cursor = db.inventory.find({"instock": {"qty": 5, "warehouse": "A"}}) # End Example 31 - self.assertEqual(len(list(cursor)), 0) + self.assertEqual(len(cursor.to_list()), 0) # Start Example 32 cursor = db.inventory.find({"instock.0.qty": {"$lte": 20}}) # End Example 32 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 33 cursor = db.inventory.find({"instock.qty": {"$lte": 20}}) # End Example 33 - self.assertEqual(len(list(cursor)), 5) + self.assertEqual(len(cursor.to_list()), 5) # Start Example 34 cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": 5, "warehouse": "A"}}}) # End Example 34 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 35 cursor = db.inventory.find({"instock": {"$elemMatch": {"qty": {"$gt": 10, "$lte": 20}}}}) # End Example 35 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 36 cursor = db.inventory.find({"instock.qty": {"$gt": 10, "$lte": 20}}) # End Example 36 - self.assertEqual(len(list(cursor)), 4) + self.assertEqual(len(cursor.to_list()), 4) # Start Example 37 cursor = db.inventory.find({"instock.qty": 5, "instock.warehouse": "A"}) # End Example 37 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) def test_query_null(self): db = self.db @@ -419,19 +413,19 @@ def test_query_null(self): cursor = db.inventory.find({"item": None}) # End Example 39 - self.assertEqual(len(list(cursor)), 2) + self.assertEqual(len(cursor.to_list()), 2) # Start Example 40 cursor = db.inventory.find({"item": {"$type": 10}}) # End Example 40 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) # Start Example 41 cursor = db.inventory.find({"item": {"$exists": False}}) # End Example 41 - self.assertEqual(len(list(cursor)), 1) + self.assertEqual(len(cursor.to_list()), 1) def test_projection(self): db = self.db @@ -477,84 +471,84 @@ def test_projection(self): cursor = db.inventory.find({"status": "A"}) # End Example 43 - self.assertEqual(len(list(cursor)), 3) + self.assertEqual(len(cursor.to_list()), 3) # Start Example 44 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1}) # End Example 44 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 45 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "_id": 0}) # End Example 45 for doc in cursor: - self.assertFalse("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertFalse("instock" in doc) + self.assertNotIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 46 cursor = db.inventory.find({"status": "A"}, {"status": 0, "instock": 0}) # End Example 46 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertFalse("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertNotIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) # Start Example 47 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "size.uom": 1}) # End Example 47 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertFalse("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertNotIn("instock", doc) size = doc["size"] - self.assertTrue("uom" in size) - self.assertFalse("h" in size) - self.assertFalse("w" in size) + self.assertIn("uom", size) + self.assertNotIn("h", size) + self.assertNotIn("w", size) # Start Example 48 cursor = db.inventory.find({"status": "A"}, {"size.uom": 0}) # End Example 48 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertTrue("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertIn("size", doc) + self.assertIn("instock", doc) size = doc["size"] - self.assertFalse("uom" in size) - self.assertTrue("h" in size) - self.assertTrue("w" in size) + self.assertNotIn("uom", size) + self.assertIn("h", size) + self.assertIn("w", size) # Start Example 49 cursor = db.inventory.find({"status": "A"}, {"item": 1, "status": 1, "instock.qty": 1}) # End Example 49 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) for subdoc in doc["instock"]: - self.assertFalse("warehouse" in subdoc) - self.assertTrue("qty" in subdoc) + self.assertNotIn("warehouse", subdoc) + self.assertIn("qty", subdoc) # Start Example 50 cursor = db.inventory.find( @@ -563,11 +557,11 @@ def test_projection(self): # End Example 50 for doc in cursor: - self.assertTrue("_id" in doc) - self.assertTrue("item" in doc) - self.assertTrue("status" in doc) - self.assertFalse("size" in doc) - self.assertTrue("instock" in doc) + self.assertIn("_id", doc) + self.assertIn("item", doc) + self.assertIn("status", doc) + self.assertNotIn("size", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 1) def test_update_and_replace(self): @@ -650,7 +644,7 @@ def test_update_and_replace(self): for doc in db.inventory.find({"item": "paper"}): self.assertEqual(doc["size"]["uom"], "cm") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 53 db.inventory.update_many( @@ -662,7 +656,7 @@ def test_update_and_replace(self): for doc in db.inventory.find({"qty": {"$lt": 50}}): self.assertEqual(doc["size"]["uom"], "in") self.assertEqual(doc["status"], "P") - self.assertTrue("lastModified" in doc) + self.assertIn("lastModified", doc) # Start Example 54 db.inventory.replace_one( @@ -676,8 +670,8 @@ def test_update_and_replace(self): for doc in db.inventory.find({"item": "paper"}, {"_id": 0}): self.assertEqual(len(doc.keys()), 2) - self.assertTrue("item" in doc) - self.assertTrue("instock" in doc) + self.assertIn("item", doc) + self.assertIn("instock", doc) self.assertEqual(len(doc["instock"]), 2) def test_delete(self): @@ -740,37 +734,41 @@ def test_delete(self): self.assertEqual(db.inventory.count_documents({}), 0) - @client_context.require_replica_set - @client_context.require_no_mmap + @client_context.require_change_streams def test_change_streams(self): db = self.db done = False def insert_docs(): + nonlocal done while not done: db.inventory.insert_one({"username": "alice"}) db.inventory.delete_one({"username": "alice"}) + time.sleep(0.005) - t = threading.Thread(target=insert_docs) + t = ConcurrentRunner(target=insert_docs) t.start() try: # 1. The database for reactive, real-time applications # Start Changestream Example 1 cursor = db.inventory.watch() - document = next(cursor) + next(cursor) # End Changestream Example 1 + cursor.close() # Start Changestream Example 2 cursor = db.inventory.watch(full_document="updateLookup") - document = next(cursor) + next(cursor) # End Changestream Example 2 + cursor.close() # Start Changestream Example 3 resume_token = cursor.resume_token cursor = db.inventory.watch(resume_after=resume_token) - document = next(cursor) + next(cursor) # End Changestream Example 3 + cursor.close() # Start Changestream Example 4 pipeline = [ @@ -778,8 +776,9 @@ def insert_docs(): {"$addFields": {"newField": "this is an added field!"}}, ] cursor = db.inventory.watch(pipeline=pipeline) - document = next(cursor) + next(cursor) # End Changestream Example 4 + cursor.close() finally: done = True t.join() @@ -861,6 +860,38 @@ def test_aggregate_examples(self): ) # End Aggregation Example 4 + @client_context.require_version_min(4, 4) + def test_aggregate_projection_example(self): + db = self.db + + # Start Aggregation Projection Example 1 + db.inventory.find( + {}, + { + "_id": 0, + "item": 1, + "status": { + "$switch": { + "branches": [ + {"case": {"$eq": ["$status", "A"]}, "then": "Available"}, + {"case": {"$eq": ["$status", "D"]}, "then": "Discontinued"}, + ], + "default": "No status found", + } + }, + "area": { + "$concat": [ + {"$toString": {"$multiply": ["$size.h", "$size.w"]}}, + " ", + "$size.uom", + ] + }, + "reportNumber": {"$literal": 1}, + }, + ) + + # End Aggregation Projection Example 1 + def test_commands(self): db = self.db db.restaurants.insert_one({}) @@ -870,7 +901,7 @@ def test_commands(self): # End runCommand Example 1 # Start runCommand Example 2 - db.command("collStats", "restaurants") + db.command("count", "restaurants") # End runCommand Example 2 def test_index_management(self): @@ -899,7 +930,7 @@ def test_misc(self): with client.start_session() as session: collection.insert_one({"_id": 1}, session=session) collection.update_one({"_id": 1}, {"$set": {"a": 1}}, session=session) - for doc in collection.find({}, session=session): + for _doc in collection.find({}, session=session): pass # 3. Exploiting the power of arrays @@ -1079,7 +1110,7 @@ def update_employee_info(session): with client.start_session() as session: try: run_transaction_with_retry(update_employee_info, session) - except Exception as exc: + except Exception: # Do something with error. raise @@ -1090,7 +1121,9 @@ def update_employee_info(session): self.assertIsNotNone(employee) self.assertEqual(employee["status"], "Inactive") - MongoClient = lambda _: rs_client() + def MongoClient(_): + return self.rs_client() + uriString = None # Start Transactions withTxn API Example 1 @@ -1119,19 +1152,13 @@ def callback(session): # Step 2: Start a client session. with client.start_session() as session: # Step 3: Use with_transaction to start a transaction, execute the callback, and commit (or abort on error). - session.with_transaction( - callback, - read_concern=ReadConcern("local"), - write_concern=wc_majority, - read_preference=ReadPreference.PRIMARY, - ) + session.with_transaction(callback) # End Transactions withTxn API Example 1 class TestCausalConsistencyExamples(IntegrationTest): @client_context.require_secondaries_count(1) - @client_context.require_no_mmap def test_causal_consistency(self): # Causal consistency examples client = self.client @@ -1180,25 +1207,27 @@ class TestVersionedApiExamples(IntegrationTest): @client_context.require_version_min(4, 7) def test_versioned_api(self): # Versioned API examples - MongoClient = lambda _, server_api: rs_client(server_api=server_api, connect=False) + def MongoClient(_, server_api): + return self.rs_client(server_api=server_api, connect=False) + uri = None # Start Versioned API Example 1 from pymongo.server_api import ServerApi - client = MongoClient(uri, server_api=ServerApi("1")) + MongoClient(uri, server_api=ServerApi("1")) # End Versioned API Example 1 # Start Versioned API Example 2 - client = MongoClient(uri, server_api=ServerApi("1", strict=True)) + MongoClient(uri, server_api=ServerApi("1", strict=True)) # End Versioned API Example 2 # Start Versioned API Example 3 - client = MongoClient(uri, server_api=ServerApi("1", strict=False)) + MongoClient(uri, server_api=ServerApi("1", strict=False)) # End Versioned API Example 3 # Start Versioned API Example 4 - client = MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) + MongoClient(uri, server_api=ServerApi("1", deprecation_errors=True)) # End Versioned API Example 4 @unittest.skip("PYTHON-3167 count has been added to API version 1") @@ -1210,7 +1239,7 @@ def test_versioned_api_migration(self): ): self.skipTest("This test needs MongoDB 5.0.2 or newer") - client = rs_client(server_api=ServerApi("1", strict=True)) + client = self.rs_client(server_api=ServerApi("1", strict=True)) client.db.sales.drop() # Start Versioned API Example 5 @@ -1311,20 +1340,37 @@ def test_snapshot_query(self): db.drop_collection("dogs") db.cats.insert_one({"name": "Whiskers", "color": "white", "age": 10, "adoptable": True}) db.dogs.insert_one({"name": "Pebbles", "color": "Brown", "age": 10, "adoptable": True}) - wait_until(lambda: self.check_for_snapshot(db.cats), "success") - wait_until(lambda: self.check_for_snapshot(db.dogs), "success") + + def predicate_one(): + return self.check_for_snapshot(db.cats) + + def predicate_two(): + return self.check_for_snapshot(db.dogs) + + wait_until(predicate_two, "success") + wait_until(predicate_one, "success") # Start Snapshot Query Example 1 db = client.pets with client.start_session(snapshot=True) as s: - adoptablePetsCount = db.cats.aggregate( - [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], session=s - ).next()["adoptableCatsCount"] - - adoptablePetsCount += db.dogs.aggregate( - [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], session=s - ).next()["adoptableDogsCount"] + adoptablePetsCount = ( + ( + db.cats.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableCatsCount"}], + session=s, + ) + ).next() + )["adoptableCatsCount"] + + adoptablePetsCount += ( + ( + db.dogs.aggregate( + [{"$match": {"adoptable": True}}, {"$count": "adoptableDogsCount"}], + session=s, + ) + ).next() + )["adoptableDogsCount"] print(adoptablePetsCount) @@ -1335,33 +1381,41 @@ def test_snapshot_query(self): saleDate = datetime.datetime.now() db.sales.insert_one({"shoeType": "boot", "price": 30, "saleDate": saleDate}) - wait_until(lambda: self.check_for_snapshot(db.sales), "success") + + def predicate_three(): + return self.check_for_snapshot(db.sales) + + wait_until(predicate_three, "success") # Start Snapshot Query Example 2 db = client.retail with client.start_session(snapshot=True) as s: - total = db.sales.aggregate( - [ - { - "$match": { - "$expr": { - "$gt": [ - "$saleDate", - { - "$dateSubtract": { - "startDate": "$$NOW", - "unit": "day", - "amount": 1, - } - }, - ] - } - } - }, - {"$count": "totalDailySales"}, - ], - session=s, - ).next()["totalDailySales"] + _ = ( + ( + db.sales.aggregate( + [ + { + "$match": { + "$expr": { + "$gt": [ + "$saleDate", + { + "$dateSubtract": { + "startDate": "$$NOW", + "unit": "day", + "amount": 1, + } + }, + ] + } + } + }, + {"$count": "totalDailySales"}, + ], + session=s, + ) + ).next() + )["totalDailySales"] # End Snapshot Query Example 2 @@ -1372,9 +1426,9 @@ def check_for_snapshot(self, collection): """ with self.client.start_session(snapshot=True) as s: try: - with collection.aggregate([], session=s): - pass - return True + if collection.find_one(session=s): + return True + return False except OperationFailure as e: # Retry them as the server demands... if e.code == 246: # SnapshotUnavailable diff --git a/test/test_fork.py b/test/test_fork.py new file mode 100644 index 0000000000..dad947d8c5 --- /dev/null +++ b/test/test_fork.py @@ -0,0 +1,102 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that pymongo resets its own locks after a fork.""" +from __future__ import annotations + +import os +import sys +import unittest +import warnings +from multiprocessing import Pipe + +sys.path[0:0] = [""] + +from test import IntegrationTest +from test.utils_shared import is_greenthread_patched + +from bson.objectid import ObjectId + + +@unittest.skipIf( + not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python" +) +@unittest.skipIf( + is_greenthread_patched(), + "gevent does not support POSIX-style forking.", +) +class TestFork(IntegrationTest): + def test_lock_client(self): + # Forks the client with some items locked. + # Parent => All locks should be as before the fork. + # Child => All locks should be reset. + with self.client._lock: + + def target(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + self.client.admin.command("ping") + + with self.fork(target): + pass + self.client.admin.command("ping") + + def test_lock_object_id(self): + # Forks the client with ObjectId's _inc_lock locked. + # Parent => _inc_lock should remain locked. + # Child => _inc_lock should be unlocked. + with ObjectId._inc_lock: + + def target(): + self.assertFalse(ObjectId._inc_lock.locked()) + self.assertTrue(ObjectId()) + + with self.fork(target): + pass + + def test_topology_reset(self): + # Tests that topologies are different from each other. + # Cannot use ID because virtual memory addresses may be the same. + # Cannot reinstantiate ObjectId in the topology settings. + # Relies on difference in PID when opened again. + parent_conn, child_conn = Pipe() + init_id = self.client._topology._pid + parent_cursor_exc = self.client._kill_cursors_executor + + def target(): + # Catch the fork warning and send to the parent for assertion. + with warnings.catch_warnings(record=True) as ctx: + warnings.simplefilter("always") + self.client.admin.command("ping") + child_conn.send(str(ctx[0])) + child_conn.send(self.client._topology._pid) + child_conn.send( + ( + parent_cursor_exc != self.client._kill_cursors_executor, + "client._kill_cursors_executor was not reinitialized", + ) + ) + + with self.fork(target): + self.assertEqual(self.client._topology._pid, init_id) + fork_warning = parent_conn.recv() + self.assertIn("MongoClient opened before fork", fork_warning) + child_id = parent_conn.recv() + self.assertNotEqual(child_id, init_id) + passed, msg = parent_conn.recv() + self.assertTrue(passed, msg) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_grid_file.py b/test/test_grid_file.py index 8b46133a60..c7ccda44a4 100644 --- a/test/test_grid_file.py +++ b/test/test_grid_file.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,29 +13,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the grid_file module. -""" +"""Tests for the grid_file module.""" +from __future__ import annotations import datetime import io import sys import zipfile from io import BytesIO +from test import ( + IntegrationTest, + UnitTest, + client_context, + qcheck, + unittest, +) -from pymongo.database import Database +from pymongo.synchronous.database import Database sys.path[0:0] = [""] -from test import IntegrationTest, qcheck, unittest -from test.utils import EventListener, rs_or_single_client +from test.utils_shared import OvertCommandListener from bson.objectid import ObjectId -from gridfs import GridFS from gridfs.errors import NoFile -from gridfs.grid_file import ( +from gridfs.synchronous.grid_file import ( _SEEK_CUR, _SEEK_END, DEFAULT_CHUNK_SIZE, + GridFS, GridIn, GridOut, GridOutCursor, @@ -45,8 +50,10 @@ from pymongo.errors import ConfigurationError, ServerSelectionTimeoutError from pymongo.message import _CursorAddress +_IS_SYNC = True + -class TestGridFileNoConnect(unittest.TestCase): +class TestGridFileNoConnect(UnitTest): """Test GridFile features on a client that does not connect.""" db: Database @@ -89,6 +96,7 @@ def test_grid_in_custom_opts(self): class TestGridFile(IntegrationTest): def setUp(self): + super().setUp() self.cleanup_colls(self.db.fs.files, self.db.fs.chunks) def test_basic(self): @@ -141,7 +149,7 @@ def test_grid_in_default_opts(self): a = GridIn(self.db.fs) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual(None, a.filename) @@ -152,6 +160,7 @@ def test_grid_in_default_opts(self): self.assertEqual(None, a.content_type) a.content_type = "text/html" + self.assertEqual("text/html", a.content_type) self.assertRaises(AttributeError, getattr, a, "length") @@ -165,20 +174,27 @@ def test_grid_in_default_opts(self): self.assertRaises(AttributeError, getattr, a, "aliases") a.aliases = ["foo"] + self.assertEqual(["foo"], a.aliases) self.assertRaises(AttributeError, getattr, a, "metadata") a.metadata = {"foo": 1} + self.assertEqual({"foo": 1}, a.metadata) self.assertRaises(AttributeError, setattr, a, "md5", 5) a.close() - a.forty_two = 42 + if _IS_SYNC: + a.forty_two = 42 + else: + self.assertRaises(AttributeError, setattr, a, "forty_two", 42) + a.set("forty_two", 42) + self.assertEqual(42, a.forty_two) - self.assertTrue(isinstance(a._id, ObjectId)) + self.assertIsInstance(a._id, ObjectId) self.assertRaises(AttributeError, setattr, a, "_id", 5) self.assertEqual("my_file", a.filename) @@ -192,7 +208,7 @@ def test_grid_in_default_opts(self): self.assertEqual(255 * 1024, a.chunk_size) self.assertRaises(AttributeError, setattr, a, "chunk_size", 5) - self.assertTrue(isinstance(a.upload_date, datetime.datetime)) + self.assertIsInstance(a.upload_date, datetime.datetime) self.assertRaises(AttributeError, setattr, a, "upload_date", 5) self.assertEqual(["foo"], a.aliases) @@ -214,12 +230,16 @@ def test_grid_out_default_opts(self): gout = GridOut(self.db.fs, 5) with self.assertRaises(NoFile): + if not _IS_SYNC: + gout.open() gout.name a = GridIn(self.db.fs) a.close() b = GridOut(self.db.fs, a._id) + if not _IS_SYNC: + b.open() self.assertEqual(a._id, b._id) self.assertEqual(0, b.length) @@ -227,7 +247,7 @@ def test_grid_out_default_opts(self): self.assertEqual(None, b.name) self.assertEqual(None, b.filename) self.assertEqual(255 * 1024, b.chunk_size) - self.assertTrue(isinstance(b.upload_date, datetime.datetime)) + self.assertIsInstance(b.upload_date, datetime.datetime) self.assertEqual(None, b.aliases) self.assertEqual(None, b.metadata) self.assertEqual(None, b.md5) @@ -254,9 +274,9 @@ def test_grid_out_cursor_options(self): cursor_clone = cursor.clone() cursor_dict = cursor.__dict__.copy() - cursor_dict.pop("_Cursor__session") + cursor_dict.pop("_session") cursor_clone_dict = cursor_clone.__dict__.copy() - cursor_clone_dict.pop("_Cursor__session") + cursor_clone_dict.pop("_session") self.assertDictEqual(cursor_dict, cursor_clone_dict) self.assertRaises(NotImplementedError, cursor.add_option, 0) @@ -279,13 +299,16 @@ def test_grid_out_custom_opts(self): two = GridOut(self.db.fs, 5) + if not _IS_SYNC: + two.open() + self.assertEqual("my_file", two.name) self.assertEqual("my_file", two.filename) self.assertEqual(5, two._id) self.assertEqual(11, two.length) self.assertEqual("text/html", two.content_type) self.assertEqual(1000, two.chunk_size) - self.assertTrue(isinstance(two.upload_date, datetime.datetime)) + self.assertIsInstance(two.upload_date, datetime.datetime) self.assertEqual(["foo"], two.aliases) self.assertEqual({"foo": 1, "bar": 2}, two.metadata) self.assertEqual(3, two.bar) @@ -317,6 +340,8 @@ def test_grid_out_file_document(self): four = GridOut(self.db.fs, file_document={}) with self.assertRaises(NoFile): + if not _IS_SYNC: + four.open() four.name def test_write_file_like(self): @@ -351,7 +376,8 @@ def test_write_lines(self): def test_close(self): f = GridIn(self.db.fs) f.close() - self.assertRaises(ValueError, f.write, "test") + with self.assertRaises(ValueError): + f.write("test") f.close() def test_closed(self): @@ -360,6 +386,8 @@ def test_closed(self): f.close() g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() self.assertFalse(g.closed) g.read(1) self.assertFalse(g.closed) @@ -416,18 +444,21 @@ def test_seek(self): self.assertEqual(b"hello world", g.read()) g.seek(1) self.assertEqual(b"ello world", g.read()) - self.assertRaises(IOError, g.seek, -1) + with self.assertRaises(IOError): + g.seek(-1) g.seek(-3, _SEEK_END) self.assertEqual(b"rld", g.read()) g.seek(0, _SEEK_END) self.assertEqual(b"", g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_END) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_END) g.seek(3) g.seek(3, _SEEK_CUR) self.assertEqual(b"world", g.read()) - self.assertRaises(IOError, g.seek, -100, _SEEK_CUR) + with self.assertRaises(IOError): + g.seek(-100, _SEEK_CUR) def test_tell(self): f = GridIn(self.db.fs, chunkSize=3) @@ -462,12 +493,10 @@ def test_multiple_reads(self): def test_readline(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() @@ -498,12 +527,10 @@ def test_readline(self): def test_readlines(self): f = GridIn(self.db.fs, chunkSize=5) f.write( - ( - b"""Hello world, + b"""Hello world, How are you? Hope all is well. Bye""" - ) ) f.close() @@ -524,12 +551,14 @@ def test_readlines(self): # Only readlines(). g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines() + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(), ) g = GridOut(self.db.fs, f._id) self.assertEqual( - [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], g.readlines(0) + [b"Hello world,\n", b"How are you?\n", b"Hope all is well.\n", b"Bye"], + g.readlines(0), ) g = GridOut(self.db.fs, f._id) @@ -555,15 +584,25 @@ def test_iterator(self): f = GridIn(self.db.fs) f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([], list(g)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) f = GridIn(self.db.fs) f.write(b"hello world\nhere are\nsome lines.") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + if _IS_SYNC: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], list(g)) + else: + self.assertEqual([b"hello world\n", b"here are\n", b"some lines."], g.to_list()) + self.assertEqual(b"", g.read(5)) - self.assertEqual([], list(g)) + if _IS_SYNC: + self.assertEqual([], list(g)) + else: + self.assertEqual([], g.to_list()) g = GridOut(self.db.fs, f._id) self.assertEqual(b"hello world\n", next(iter(g))) @@ -571,13 +610,17 @@ def test_iterator(self): self.assertEqual(b" are\n", next(iter(g))) self.assertEqual(b"some lines", g.read(10)) self.assertEqual(b".", next(iter(g))) - self.assertRaises(StopIteration, iter(g).__next__) + with self.assertRaises(StopIteration): + iter(g).__next__() f = GridIn(self.db.fs, chunk_size=2) f.write(b"hello world") f.close() g = GridOut(self.db.fs, f._id) - self.assertEqual([b"hello world"], list(g)) + if _IS_SYNC: + self.assertEqual([b"hello world"], list(g)) + else: + self.assertEqual([b"hello world"], g.to_list()) def test_read_unaligned_buffer_size(self): in_data = b"This is a text that doesn't quite fit in a single 16-byte chunk." @@ -615,7 +658,8 @@ def test_readchunk(self): def test_write_unicode(self): f = GridIn(self.db.fs) - self.assertRaises(TypeError, f.write, "foo") + with self.assertRaises(TypeError): + f.write("foo") f = GridIn(self.db.fs, encoding="utf-8") f.write("foo") @@ -640,8 +684,12 @@ def test_set_after_close(self): self.assertRaises(AttributeError, getattr, f, "uploadDate") self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "foo" - f.baz = 5 + if _IS_SYNC: + f.bar = "foo" + f.baz = 5 + else: + f.set("bar", "foo") + f.set("baz", 5) self.assertEqual("foo", f._id) self.assertEqual("foo", f.bar) @@ -656,11 +704,17 @@ def test_set_after_close(self): self.assertTrue(f.uploadDate) self.assertRaises(AttributeError, setattr, f, "_id", 5) - f.bar = "a" - f.baz = "b" + if _IS_SYNC: + f.bar = "a" + f.baz = "b" + else: + f.set("bar", "a") + f.set("baz", "b") self.assertRaises(AttributeError, setattr, f, "upload_date", 5) g = GridOut(self.db.fs, f._id) + if not _IS_SYNC: + g.open() self.assertEqual("a", g.bar) self.assertEqual("b", g.baz) # Versions 2.0.1 and older saved a _closed field for some reason. @@ -718,8 +772,12 @@ def write_me(s, chunk_size): def test_grid_out_lazy_connect(self): fs = self.db.fs outfile = GridOut(fs, file_id=-1) - self.assertRaises(NoFile, outfile.read) - self.assertRaises(NoFile, getattr, outfile, "filename") + with self.assertRaises(NoFile): + outfile.read() + with self.assertRaises(NoFile): + if not _IS_SYNC: + outfile.open() + outfile.filename infile = GridIn(fs, filename=1) infile.close() @@ -732,24 +790,26 @@ def test_grid_out_lazy_connect(self): outfile.readchunk() def test_grid_in_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + client = self.simple_client("badhost", connect=False, serverSelectionTimeoutMS=10) fs = client.db.fs infile = GridIn(fs, file_id=-1, chunk_size=1) - self.assertRaises(ServerSelectionTimeoutError, infile.write, b"data") - self.assertRaises(ServerSelectionTimeoutError, infile.close) + with self.assertRaises(ServerSelectionTimeoutError): + infile.write(b"data") + with self.assertRaises(ServerSelectionTimeoutError): + infile.close() def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - GridIn(rs_or_single_client(w=0).pymongo_test.fs) + GridIn((self.rs_or_single_client(w=0)).pymongo_test.fs) def test_survive_cursor_not_found(self): # By default the find command returns 101 documents in the first batch. # Use 102 batches to cause a single getMore. chunk_size = 1024 data = b"d" * (102 * chunk_size) - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) db = client.pymongo_test with GridIn(db.fs, chunk_size=chunk_size) as infile: infile.write(data) @@ -760,9 +820,10 @@ def test_survive_cursor_not_found(self): # Kill the cursor to simulate the cursor timing out on the server # when an application spends a long time between two calls to # readchunk(). + assert client.address is not None client._close_cursor_now( - outfile._GridOut__chunk_iter._cursor.cursor_id, - _CursorAddress(client.address, db.fs.chunks.full_name), + outfile._chunk_iter._cursor.cursor_id, + _CursorAddress(client.address, db.fs.chunks.full_name), # type: ignore[arg-type] ) # Read the rest of the file without error. @@ -771,6 +832,7 @@ def test_survive_cursor_not_found(self): # Paranoid, ensure that a getMore was actually sent. self.assertIn("getMore", listener.started_command_names()) + @client_context.require_sync def test_zip(self): zf = BytesIO() z = zipfile.ZipFile(zf, "w") diff --git a/test/test_gridfs.py b/test/test_gridfs.py index 35a574a1d9..8bda041447 100644 --- a/test/test_gridfs.py +++ b/test/test_gridfs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2009-present MongoDB, Inc. # @@ -14,40 +13,46 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" +from __future__ import annotations +import asyncio import datetime import sys import threading import time from io import BytesIO +from test.helpers import ConcurrentRunner +from unittest.mock import patch sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +from test.utils import joinall +from test.utils_shared import one import gridfs from bson.binary import Binary from gridfs.errors import CorruptGridFile, FileExists, NoFile -from gridfs.grid_file import GridOutCursor -from pymongo.database import Database +from gridfs.synchronous.grid_file import DEFAULT_CHUNK_SIZE, GridOutCursor from pymongo.errors import ( ConfigurationError, NotPrimaryError, ServerSelectionTimeoutError, ) -from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.database import Database +from pymongo.synchronous.mongo_client import MongoClient +_IS_SYNC = True -class JustWrite(threading.Thread): + +class JustWrite(ConcurrentRunner): def __init__(self, fs, n): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -56,13 +61,13 @@ def run(self): file.close() -class JustRead(threading.Thread): +class JustRead(ConcurrentRunner): def __init__(self, fs, n, results): - threading.Thread.__init__(self) + super().__init__() self.fs = fs self.n = n self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -75,9 +80,9 @@ def run(self): class TestGridfsNoConnect(unittest.TestCase): db: Database - @classmethod - def setUpClass(cls): - cls.db = MongoClient(connect=False).pymongo_test + def setUp(self): + super().setUp() + self.db = MongoClient(connect=False).pymongo_test def test_gridfs(self): self.assertRaises(TypeError, gridfs.GridFS, "foo") @@ -88,32 +93,31 @@ class TestGridfs(IntegrationTest): fs: gridfs.GridFS alt: gridfs.GridFS - @classmethod - def setUpClass(cls): - super(TestGridfs, cls).setUpClass() - cls.fs = gridfs.GridFS(cls.db) - cls.alt = gridfs.GridFS(cls.db, "alt") - def setUp(self): + super().setUp() + self.fs = gridfs.GridFS(self.db) + self.alt = gridfs.GridFS(self.db, "alt") self.cleanup_colls( self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks ) def test_basic(self): oid = self.fs.put(b"hello world") - self.assertEqual(b"hello world", self.fs.get(oid).read()) + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) self.fs.delete(oid) - self.assertRaises(NoFile, self.fs.get, oid) + with self.assertRaises(NoFile): + self.fs.get(oid) self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) - self.assertRaises(NoFile, self.fs.get, "foo") + with self.assertRaises(NoFile): + self.fs.get("foo") oid = self.fs.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b"hello world", self.fs.get("foo").read()) + self.assertEqual(b"hello world", (self.fs.get("foo")).read()) def test_multi_chunk_delete(self): self.db.fs.drop() @@ -141,11 +145,11 @@ def test_list(self): self.fs.put(b"foo", filename="test") self.fs.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.fs.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.fs.list())) def test_empty_file(self): oid = self.fs.put(b"") - self.assertEqual(b"", self.fs.get(oid).read()) + self.assertEqual(b"", (self.fs.get(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -153,7 +157,7 @@ def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) @@ -162,10 +166,12 @@ def test_corrupt_chunk(self): self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.get(files_id) - self.assertRaises(CorruptGridFile, out.read) + with self.assertRaises(CorruptGridFile): + out.read() out = self.fs.get(files_id) - self.assertRaises(CorruptGridFile, out.readline) + with self.assertRaises(CorruptGridFile): + out.readline() finally: self.fs.delete(files_id) @@ -180,58 +186,60 @@ def test_put_ensures_index(self): self.assertTrue( any( info.get("key") == [("files_id", 1), ("n", 1)] - for info in chunks.index_information().values() + for info in (chunks.index_information()).values() ) ) self.assertTrue( any( info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() + for info in (files.index_information()).values() ) ) def test_alt_collection(self): oid = self.alt.put(b"hello world") - self.assertEqual(b"hello world", self.alt.get(oid).read()) + self.assertEqual(b"hello world", (self.alt.get(oid)).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) self.alt.delete(oid) - self.assertRaises(NoFile, self.alt.get, oid) + with self.assertRaises(NoFile): + self.alt.get(oid) self.assertEqual(0, self.db.alt.files.count_documents({})) self.assertEqual(0, self.db.alt.chunks.count_documents({})) - self.assertRaises(NoFile, self.alt.get, "foo") + with self.assertRaises(NoFile): + self.alt.get("foo") oid = self.alt.put(b"hello world", _id="foo") self.assertEqual("foo", oid) - self.assertEqual(b"hello world", self.alt.get("foo").read()) + self.assertEqual(b"hello world", (self.alt.get("foo")).read()) self.alt.put(b"", filename="mike") self.alt.put(b"foo", filename="test") self.alt.put(b"", filename="hello world") - self.assertEqual(set(["mike", "test", "hello world"]), set(self.alt.list())) + self.assertEqual({"mike", "test", "hello world"}, set(self.alt.list())) def test_threaded_reads(self): self.fs.put(b"hello", _id="test") - threads = [] + tasks = [] results: list = [] for i in range(10): - threads.append(JustRead(self.fs, 10, results)) - threads[i].start() + tasks.append(JustRead(self.fs, 10, results)) + tasks[i].start() - joinall(threads) + joinall(tasks) self.assertEqual(100 * [b"hello"], results) def test_threaded_writes(self): - threads = [] + tasks = [] for i in range(10): - threads.append(JustWrite(self.fs, 10)) - threads[i].start() + tasks.append(JustWrite(self.fs, 10)) + tasks[i].start() - joinall(threads) + joinall(tasks) f = self.fs.get_last_version("test") self.assertEqual(f.read(), b"hello") @@ -249,34 +257,37 @@ def test_get_last_version(self): two = two._id three = self.fs.put(b"baz", filename="test") - self.assertEqual(b"baz", self.fs.get_last_version("test").read()) + self.assertEqual(b"baz", (self.fs.get_last_version("test")).read()) self.fs.delete(three) - self.assertEqual(b"bar", self.fs.get_last_version("test").read()) + self.assertEqual(b"bar", (self.fs.get_last_version("test")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.get_last_version("test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version("test")).read()) self.fs.delete(one) - self.assertRaises(NoFile, self.fs.get_last_version, "test") + with self.assertRaises(NoFile): + self.fs.get_last_version("test") def test_get_last_version_with_metadata(self): one = self.fs.put(b"foo", filename="test", author="author") time.sleep(0.01) two = self.fs.put(b"bar", filename="test", author="author") - self.assertEqual(b"bar", self.fs.get_last_version(author="author").read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.get_last_version(author="author").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author")).read()) self.fs.delete(one) one = self.fs.put(b"foo", filename="test", author="author1") time.sleep(0.01) two = self.fs.put(b"bar", filename="test", author="author2") - self.assertEqual(b"foo", self.fs.get_last_version(author="author1").read()) - self.assertEqual(b"bar", self.fs.get_last_version(author="author2").read()) - self.assertEqual(b"bar", self.fs.get_last_version(filename="test").read()) + self.assertEqual(b"foo", (self.fs.get_last_version(author="author1")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(author="author2")).read()) + self.assertEqual(b"bar", (self.fs.get_last_version(filename="test")).read()) - self.assertRaises(NoFile, self.fs.get_last_version, author="author3") - self.assertRaises(NoFile, self.fs.get_last_version, filename="nottest", author="author1") + with self.assertRaises(NoFile): + self.fs.get_last_version(author="author3") + with self.assertRaises(NoFile): + self.fs.get_last_version(filename="nottest", author="author1") self.fs.delete(one) self.fs.delete(two) @@ -289,16 +300,18 @@ def test_get_version(self): self.fs.put(b"baz", filename="test") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.get_version("test", 0).read()) - self.assertEqual(b"bar", self.fs.get_version("test", 1).read()) - self.assertEqual(b"baz", self.fs.get_version("test", 2).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", 0)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", 1)).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", 2)).read()) - self.assertEqual(b"baz", self.fs.get_version("test", -1).read()) - self.assertEqual(b"bar", self.fs.get_version("test", -2).read()) - self.assertEqual(b"foo", self.fs.get_version("test", -3).read()) + self.assertEqual(b"baz", (self.fs.get_version("test", -1)).read()) + self.assertEqual(b"bar", (self.fs.get_version("test", -2)).read()) + self.assertEqual(b"foo", (self.fs.get_version("test", -3)).read()) - self.assertRaises(NoFile, self.fs.get_version, "test", 3) - self.assertRaises(NoFile, self.fs.get_version, "test", -4) + with self.assertRaises(NoFile): + self.fs.get_version("test", 3) + with self.assertRaises(NoFile): + self.fs.get_version("test", -4) def test_get_version_with_metadata(self): one = self.fs.put(b"foo", filename="test", author="author1") @@ -308,25 +321,32 @@ def test_get_version_with_metadata(self): three = self.fs.put(b"baz", filename="test", author="author2") self.assertEqual( - b"foo", self.fs.get_version(filename="test", author="author1", version=-2).read() + b"foo", + (self.fs.get_version(filename="test", author="author1", version=-2)).read(), ) self.assertEqual( - b"bar", self.fs.get_version(filename="test", author="author1", version=-1).read() + b"bar", + (self.fs.get_version(filename="test", author="author1", version=-1)).read(), ) self.assertEqual( - b"foo", self.fs.get_version(filename="test", author="author1", version=0).read() + b"foo", + (self.fs.get_version(filename="test", author="author1", version=0)).read(), ) self.assertEqual( - b"bar", self.fs.get_version(filename="test", author="author1", version=1).read() + b"bar", + (self.fs.get_version(filename="test", author="author1", version=1)).read(), ) self.assertEqual( - b"baz", self.fs.get_version(filename="test", author="author2", version=0).read() + b"baz", + (self.fs.get_version(filename="test", author="author2", version=0)).read(), ) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=-1).read()) - self.assertEqual(b"baz", self.fs.get_version(filename="test", version=2).read()) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=-1)).read()) + self.assertEqual(b"baz", (self.fs.get_version(filename="test", version=2)).read()) - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author3") - self.assertRaises(NoFile, self.fs.get_version, filename="test", author="author1", version=2) + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author3") + with self.assertRaises(NoFile): + self.fs.get_version(filename="test", author="author1", version=2) self.fs.delete(one) self.fs.delete(two) @@ -335,18 +355,31 @@ def test_get_version_with_metadata(self): def test_put_filelike(self): oid = self.fs.put(BytesIO(b"hello world"), chunk_size=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", self.fs.get(oid).read()) + self.assertEqual(b"hello world", (self.fs.get(oid)).read()) def test_file_exists(self): oid = self.fs.put(b"hello") - self.assertRaises(FileExists, self.fs.put, b"world", _id=oid) + with self.assertRaises(FileExists): + self.fs.put(b"world", _id=oid) one = self.fs.new_file(_id=123) one.write(b"some content") one.close() + # Attempt to upload a file with more chunks to the same _id. + with patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", DEFAULT_CHUNK_SIZE): + two = self.fs.new_file(_id=123) + with self.assertRaises(FileExists): + two.write(b"x" * DEFAULT_CHUNK_SIZE * 3) + # Original file is still readable (no extra chunks were uploaded). + self.assertEqual((self.fs.get(123)).read(), b"some content") + two = self.fs.new_file(_id=123) - self.assertRaises(FileExists, two.write, b"x" * 262146) + two.write(b"some content") + with self.assertRaises(FileExists): + two.close() + # Original file is still readable. + self.assertEqual((self.fs.get(123)).read(), b"some content") def test_exists(self): oid = self.fs.put(b"hello") @@ -374,15 +407,16 @@ def test_exists(self): self.assertFalse(self.fs.exists({"foo": {"$gt": 12}})) def test_put_unicode(self): - self.assertRaises(TypeError, self.fs.put, "hello") + with self.assertRaises(TypeError): + self.fs.put("hello") oid = self.fs.put("hello", encoding="utf-8") - self.assertEqual(b"hello", self.fs.get(oid).read()) - self.assertEqual("utf-8", self.fs.get(oid).encoding) + self.assertEqual(b"hello", (self.fs.get(oid)).read()) + self.assertEqual("utf-8", (self.fs.get(oid)).encoding) oid = self.fs.put("aé", encoding="iso-8859-1") - self.assertEqual("aé".encode("iso-8859-1"), self.fs.get(oid).read()) - self.assertEqual("iso-8859-1", self.fs.get(oid).encoding) + self.assertEqual("aé".encode("iso-8859-1"), (self.fs.get(oid)).read()) + self.assertEqual("iso-8859-1", (self.fs.get(oid)).encoding) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -394,21 +428,23 @@ def test_missing_length_iter(self): f = self.fs.get_last_version(filename="empty") def iterate_file(grid_file): - for chunk in grid_file: + for _chunk in grid_file: pass return True self.assertTrue(iterate_file(f)) def test_gridfs_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=10) + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=10) db = client.db gfs = gridfs.GridFS(db) - self.assertRaises(ServerSelectionTimeoutError, gfs.list) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.list() fs = gridfs.GridFS(db) f = fs.new_file() - self.assertRaises(ServerSelectionTimeoutError, f.close) + with self.assertRaises(ServerSelectionTimeoutError): + f.close() def test_gridfs_find(self): self.fs.put(b"test2", filename="two") @@ -422,14 +458,21 @@ def test_gridfs_find(self): self.assertEqual(3, files.count_documents({"filename": "two"})) self.assertEqual(4, files.count_documents({})) cursor = self.fs.find(no_cursor_timeout=False).sort("uploadDate", -1).skip(1).limit(2) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) cursor.rewind() - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test2+", gout.read()) - self.assertRaises(StopIteration, cursor.__next__) + with self.assertRaises(StopIteration): + cursor.__next__() + cursor.rewind() + items = cursor.to_list() + self.assertEqual(len(items), 2) + cursor.rewind() + items = cursor.to_list(1) + self.assertEqual(len(items), 1) cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) @@ -471,12 +514,12 @@ def test_grid_in_non_int_chunksize(self): self.fs.put(data, filename="f") self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.get_version("f").read()) + self.assertEqual(data, (self.fs.get_version("f")).read()) def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFS(rs_or_single_client(w=0).pymongo_test) + gridfs.GridFS((self.rs_or_single_client(w=0)).pymongo_test) def test_md5(self): gin = self.fs.new_file() @@ -493,17 +536,17 @@ def test_md5(self): class TestGridfsReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super(TestGridfsReplicaSet, cls).setUpClass() + def setUp(self): + super().setUp() @classmethod + @client_context.require_connection def tearDownClass(cls): client_context.client.drop_database("gfsreplica") def test_gridfs_replica_set(self): - rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) fs = gridfs.GridFS(rsc.gfsreplica, "gfsreplicatest") @@ -511,12 +554,12 @@ def test_gridfs_replica_set(self): self.assertEqual(gin._coll.read_preference, ReadPreference.PRIMARY) oid = fs.put(b"foo") - content = fs.get(oid).read() + content = (fs.get(oid)).read() self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) - secondary_connection = single_client( + secondary_connection = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY ) @@ -525,13 +568,14 @@ def test_gridfs_secondary(self): fs = gridfs.GridFS(secondary_connection.gfsreplica, "gfssecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, fs.put, b"foo") + with self.assertRaises(NotPrimaryError): + fs.put(b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) - client = single_client( + client = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False ) @@ -539,8 +583,10 @@ def test_gridfs_secondary_lazy(self): fs = gridfs.GridFS(client.gfsreplica, "gfssecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, fs.get_last_version) - self.assertRaises(NotPrimaryError, fs.put, "data", encoding="utf-8") + with self.assertRaises(NoFile): + fs.get_last_version() + with self.assertRaises(NotPrimaryError): + fs.put("data", encoding="utf-8") if __name__ == "__main__": diff --git a/test/test_gridfs_bucket.py b/test/test_gridfs_bucket.py index d9bf0cf058..9dbb082ee9 100644 --- a/test/test_gridfs_bucket.py +++ b/test/test_gridfs_bucket.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2015-present MongoDB, Inc. # @@ -14,15 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for the gridfs package. -""" +"""Tests for the gridfs package.""" +from __future__ import annotations + +import asyncio import datetime import itertools +import sys import threading import time from io import BytesIO +from test.helpers import ConcurrentRunner +from unittest.mock import patch + +sys.path[0:0] = [""] + from test import IntegrationTest, client_context, unittest -from test.utils import joinall, one, rs_client, rs_or_single_client, single_client +from test.utils import joinall +from test.utils_shared import one import gridfs from bson.binary import Binary @@ -34,17 +42,20 @@ ConfigurationError, NotPrimaryError, ServerSelectionTimeoutError, + WriteConcernError, ) -from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.mongo_client import MongoClient + +_IS_SYNC = True -class JustWrite(threading.Thread): +class JustWrite(ConcurrentRunner): def __init__(self, gfs, num): - threading.Thread.__init__(self) + super().__init__() self.gfs = gfs self.num = num - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -53,13 +64,13 @@ def run(self): file.close() -class JustRead(threading.Thread): +class JustRead(ConcurrentRunner): def __init__(self, gfs, num, results): - threading.Thread.__init__(self) + super().__init__() self.gfs = gfs self.num = num self.results = results - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.num): @@ -73,25 +84,23 @@ class TestGridfs(IntegrationTest): fs: gridfs.GridFSBucket alt: gridfs.GridFSBucket - @classmethod - def setUpClass(cls): - super(TestGridfs, cls).setUpClass() - cls.fs = gridfs.GridFSBucket(cls.db) - cls.alt = gridfs.GridFSBucket(cls.db, bucket_name="alt") - def setUp(self): + super().setUp() + self.fs = gridfs.GridFSBucket(self.db) + self.alt = gridfs.GridFSBucket(self.db, bucket_name="alt") self.cleanup_colls( self.db.fs.files, self.db.fs.chunks, self.db.alt.files, self.db.alt.chunks ) def test_basic(self): oid = self.fs.upload_from_stream("test_filename", b"hello world") - self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(1, self.db.fs.chunks.count_documents({})) self.fs.delete(oid) - self.assertRaises(NoFile, self.fs.open_download_stream, oid) + with self.assertRaises(NoFile): + self.fs.open_download_stream(oid) self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -106,9 +115,20 @@ def test_multi_chunk_delete(self): self.assertEqual(0, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) + def test_delete_by_name(self): + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + gfs = gridfs.GridFSBucket(self.db) + gfs.upload_from_stream("test_filename", b"hello", chunk_size_bytes=1) + self.assertEqual(1, self.db.fs.files.count_documents({})) + self.assertEqual(5, self.db.fs.chunks.count_documents({})) + gfs.delete_by_name("test_filename") + self.assertEqual(0, self.db.fs.files.count_documents({})) + self.assertEqual(0, self.db.fs.chunks.count_documents({})) + def test_empty_file(self): oid = self.fs.upload_from_stream("test_filename", b"") - self.assertEqual(b"", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"", (self.fs.open_download_stream(oid)).read()) self.assertEqual(1, self.db.fs.files.count_documents({})) self.assertEqual(0, self.db.fs.chunks.count_documents({})) @@ -116,7 +136,7 @@ def test_empty_file(self): assert raw is not None self.assertEqual(0, raw["length"]) self.assertEqual(oid, raw["_id"]) - self.assertTrue(isinstance(raw["uploadDate"], datetime.datetime)) + self.assertIsInstance(raw["uploadDate"], datetime.datetime) self.assertEqual(255 * 1024, raw["chunkSize"]) self.assertNotIn("md5", raw) @@ -125,10 +145,12 @@ def test_corrupt_chunk(self): self.db.fs.chunks.update_one({"files_id": files_id}, {"$set": {"data": Binary(b"foo", 0)}}) try: out = self.fs.open_download_stream(files_id) - self.assertRaises(CorruptGridFile, out.read) + with self.assertRaises(CorruptGridFile): + out.read() out = self.fs.open_download_stream(files_id) - self.assertRaises(CorruptGridFile, out.readline) + with self.assertRaises(CorruptGridFile): + out.readline() finally: self.fs.delete(files_id) @@ -140,17 +162,16 @@ def test_upload_ensures_index(self): files.drop() self.fs.upload_from_stream("filename", b"junk") - self.assertTrue( - any( - info.get("key") == [("files_id", 1), ("n", 1)] - for info in chunks.index_information().values() - ) + self.assertIn( + [("files_id", 1), ("n", 1)], + [info.get("key") for info in (chunks.index_information()).values()], + "Missing required index on chunks collection: {files_id: 1, n: 1}", ) - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() - ) + + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) def test_ensure_index_shell_compat(self): @@ -168,36 +189,37 @@ def test_ensure_index_shell_compat(self): # No error. self.fs.upload_from_stream("filename", b"data") - self.assertTrue( - any( - info.get("key") == [("filename", 1), ("uploadDate", 1)] - for info in files.index_information().values() - ) + self.assertIn( + [("filename", 1), ("uploadDate", 1)], + [info.get("key") for info in (files.index_information()).values()], + "Missing required index on files collection: {filename: 1, uploadDate: 1}", ) files.drop() def test_alt_collection(self): oid = self.alt.upload_from_stream("test_filename", b"hello world") - self.assertEqual(b"hello world", self.alt.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.alt.open_download_stream(oid)).read()) self.assertEqual(1, self.db.alt.files.count_documents({})) self.assertEqual(1, self.db.alt.chunks.count_documents({})) self.alt.delete(oid) - self.assertRaises(NoFile, self.alt.open_download_stream, oid) + with self.assertRaises(NoFile): + self.alt.open_download_stream(oid) self.assertEqual(0, self.db.alt.files.count_documents({})) self.assertEqual(0, self.db.alt.chunks.count_documents({})) - self.assertRaises(NoFile, self.alt.open_download_stream, "foo") + with self.assertRaises(NoFile): + self.alt.open_download_stream("foo") self.alt.upload_from_stream("foo", b"hello world") - self.assertEqual(b"hello world", self.alt.open_download_stream_by_name("foo").read()) + self.assertEqual(b"hello world", (self.alt.open_download_stream_by_name("foo")).read()) self.alt.upload_from_stream("mike", b"") self.alt.upload_from_stream("test", b"foo") self.alt.upload_from_stream("hello world", b"") self.assertEqual( - set(["mike", "test", "hello world", "foo"]), - set(k["filename"] for k in list(self.db.alt.files.find())), + {"mike", "test", "hello world", "foo"}, + {k["filename"] for k in self.db.alt.files.find().to_list()}, ) def test_threaded_reads(self): @@ -237,13 +259,14 @@ def test_get_last_version(self): two = two._id three = self.fs.upload_from_stream("test", b"baz") - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(three) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(two) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test").read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test")).read()) self.fs.delete(one) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test") def test_get_version(self): self.fs.upload_from_stream("test", b"foo") @@ -253,41 +276,76 @@ def test_get_version(self): self.fs.upload_from_stream("test", b"baz") time.sleep(0.01) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=0).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=1).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=2).read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=0)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=1)).read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=2)).read()) - self.assertEqual(b"baz", self.fs.open_download_stream_by_name("test", revision=-1).read()) - self.assertEqual(b"bar", self.fs.open_download_stream_by_name("test", revision=-2).read()) - self.assertEqual(b"foo", self.fs.open_download_stream_by_name("test", revision=-3).read()) + self.assertEqual(b"baz", (self.fs.open_download_stream_by_name("test", revision=-1)).read()) + self.assertEqual(b"bar", (self.fs.open_download_stream_by_name("test", revision=-2)).read()) + self.assertEqual(b"foo", (self.fs.open_download_stream_by_name("test", revision=-3)).read()) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=3) - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "test", revision=-4) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=3) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("test", revision=-4) def test_upload_from_stream(self): oid = self.fs.upload_from_stream("test_file", BytesIO(b"hello world"), chunk_size_bytes=1) self.assertEqual(11, self.db.fs.chunks.count_documents({})) - self.assertEqual(b"hello world", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"hello world", (self.fs.open_download_stream(oid)).read()) def test_upload_from_stream_with_id(self): oid = ObjectId() self.fs.upload_from_stream_with_id( oid, "test_file_custom_id", BytesIO(b"custom id"), chunk_size_bytes=1 ) - self.assertEqual(b"custom id", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"custom id", (self.fs.open_download_stream(oid)).read()) + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 3) + @client_context.require_failCommand_fail_point + def test_upload_bulk_write_error(self): + # Test BulkWriteError from insert_many is converted to an insert_one style error. + expected_wce = { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes", + } + cause_wce = { + "configureFailPoint": "failCommand", + "mode": {"times": 2}, + "data": {"failCommands": ["insert"], "writeConcernError": expected_wce}, + } + gin = self.fs.open_upload_stream("test_file", chunk_size_bytes=1) + with self.fail_point(cause_wce): + # Assert we raise WriteConcernError, not BulkWriteError. + with self.assertRaises(WriteConcernError): + gin.write(b"hello world") + # 3 chunks were uploaded. + self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.abort() + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_CHUNKS", 10) + def test_upload_batching(self): + with self.fs.open_upload_stream("test_file", chunk_size_bytes=1) as gin: + gin.write(b"s" * (10 - 1)) + # No chunks were uploaded yet. + self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) + gin.write(b"s") + # All chunks were uploaded since we hit the _UPLOAD_BUFFER_CHUNKS limit. + self.assertEqual(10, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_open_upload_stream(self): gin = self.fs.open_upload_stream("from_stream") gin.write(b"from stream") gin.close() - self.assertEqual(b"from stream", self.fs.open_download_stream(gin._id).read()) + self.assertEqual(b"from stream", (self.fs.open_download_stream(gin._id)).read()) def test_open_upload_stream_with_id(self): oid = ObjectId() gin = self.fs.open_upload_stream_with_id(oid, "from_stream_custom_id") gin.write(b"from stream with custom id") gin.close() - self.assertEqual(b"from stream with custom id", self.fs.open_download_stream(oid).read()) + self.assertEqual(b"from stream with custom id", (self.fs.open_download_stream(oid)).read()) def test_missing_length_iter(self): # Test fix that guards against PHP-237 @@ -306,15 +364,15 @@ def iterate_file(grid_file): self.assertTrue(iterate_file(fstr)) def test_gridfs_lazy_connect(self): - client = MongoClient("badhost", connect=False, serverSelectionTimeoutMS=0) + client = self.single_client("badhost", connect=False, serverSelectionTimeoutMS=0) cdb = client.db gfs = gridfs.GridFSBucket(cdb) - self.assertRaises(ServerSelectionTimeoutError, gfs.delete, 0) + with self.assertRaises(ServerSelectionTimeoutError): + gfs.delete(0) gfs = gridfs.GridFSBucket(cdb) - self.assertRaises( - ServerSelectionTimeoutError, gfs.upload_from_stream, "test", b"" - ) # Still no connection. + with self.assertRaises(ServerSelectionTimeoutError): + gfs.upload_from_stream("test", b"") # Still no connection. def test_gridfs_find(self): self.fs.upload_from_stream("two", b"test2") @@ -330,14 +388,15 @@ def test_gridfs_find(self): cursor = self.fs.find( {}, no_cursor_timeout=False, sort=[("uploadDate", -1)], skip=1, limit=2 ) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) cursor.rewind() - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test1", gout.read()) - gout = next(cursor) + gout = cursor.next() self.assertEqual(b"test2+", gout.read()) - self.assertRaises(StopIteration, cursor.__next__) + with self.assertRaises(StopIteration): + cursor.next() cursor.close() self.assertRaises(TypeError, self.fs.find, {}, {"_id": True}) @@ -347,21 +406,32 @@ def test_grid_in_non_int_chunksize(self): self.fs.upload_from_stream("f", data) self.db.fs.files.update_one({"filename": "f"}, {"$set": {"chunkSize": 100.0}}) - self.assertEqual(data, self.fs.open_download_stream_by_name("f").read()) + self.assertEqual(data, (self.fs.open_download_stream_by_name("f")).read()) def test_unacknowledged(self): # w=0 is prohibited. with self.assertRaises(ConfigurationError): - gridfs.GridFSBucket(rs_or_single_client(w=0).pymongo_test) + gridfs.GridFSBucket((self.rs_or_single_client(w=0)).pymongo_test) def test_rename(self): _id = self.fs.upload_from_stream("first_name", b"testing") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name("first_name").read()) + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) self.fs.rename(_id, "second_name") - self.assertRaises(NoFile, self.fs.open_download_stream_by_name, "first_name") - self.assertEqual(b"testing", self.fs.open_download_stream_by_name("second_name").read()) + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + + def test_rename_by_name(self): + _id = self.fs.upload_from_stream("first_name", b"testing") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("first_name")).read()) + self.fs.rename_by_name("first_name", "second_name") + with self.assertRaises(NoFile): + self.fs.open_download_stream_by_name("first_name") + self.assertEqual(b"testing", (self.fs.open_download_stream_by_name("second_name")).read()) + + @patch("gridfs.synchronous.grid_file._UPLOAD_BUFFER_SIZE", 5) def test_abort(self): gin = self.fs.open_upload_stream("test_filename", chunk_size_bytes=5) gin.write(b"test1") @@ -370,7 +440,8 @@ def test_abort(self): self.assertEqual(3, self.db.fs.chunks.count_documents({"files_id": gin._id})) gin.abort() self.assertTrue(gin.closed) - self.assertRaises(ValueError, gin.write, b"test4") + with self.assertRaises(ValueError): + gin.write(b"test4") self.assertEqual(0, self.db.fs.chunks.count_documents({"files_id": gin._id})) def test_download_to_stream(self): @@ -439,26 +510,26 @@ def test_md5(self): class TestGridfsBucketReplicaSet(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super(TestGridfsBucketReplicaSet, cls).setUpClass() + def setUp(self): + super().setUp() @classmethod + @client_context.require_connection def tearDownClass(cls): client_context.client.drop_database("gfsbucketreplica") def test_gridfs_replica_set(self): - rsc = rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) + rsc = self.rs_client(w=client_context.w, read_preference=ReadPreference.SECONDARY) gfs = gridfs.GridFSBucket(rsc.gfsbucketreplica, "gfsbucketreplicatest") oid = gfs.upload_from_stream("test_filename", b"foo") - content = gfs.open_download_stream(oid).read() + content = (gfs.open_download_stream(oid)).read() self.assertEqual(b"foo", content) def test_gridfs_secondary(self): secondary_host, secondary_port = one(self.client.secondaries) - secondary_connection = single_client( + secondary_connection = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY ) @@ -467,13 +538,14 @@ def test_gridfs_secondary(self): gfs = gridfs.GridFSBucket(secondary_connection.gfsbucketreplica, "gfsbucketsecondarytest") # This won't detect secondary, raises error - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"foo") + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"foo") def test_gridfs_secondary_lazy(self): # Should detect it's connected to secondary and not attempt to # create index. secondary_host, secondary_port = one(self.client.secondaries) - client = single_client( + client = self.single_client( secondary_host, secondary_port, read_preference=ReadPreference.SECONDARY, connect=False ) @@ -481,8 +553,10 @@ def test_gridfs_secondary_lazy(self): gfs = gridfs.GridFSBucket(client.gfsbucketreplica, "gfsbucketsecondarylazytest") # Connects, doesn't create index. - self.assertRaises(NoFile, gfs.open_download_stream_by_name, "test_filename") - self.assertRaises(NotPrimaryError, gfs.upload_from_stream, "test_filename", b"data") + with self.assertRaises(NoFile): + gfs.open_download_stream_by_name("test_filename") + with self.assertRaises(NotPrimaryError): + gfs.upload_from_stream("test_filename", b"data") if __name__ == "__main__": diff --git a/test/test_gridfs_spec.py b/test/test_gridfs_spec.py index d080c05c4d..e84e19725e 100644 --- a/test/test_gridfs_spec.py +++ b/test/test_gridfs_spec.py @@ -13,17 +13,24 @@ # limitations under the License. """Test the GridFS unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "gridfs") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "gridfs") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "gridfs") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_heartbeat_monitoring.py b/test/test_heartbeat_monitoring.py index a14ab9a3a7..7864caf6e1 100644 --- a/test/test_heartbeat_monitoring.py +++ b/test/test_heartbeat_monitoring.py @@ -13,17 +13,21 @@ # limitations under the License. """Test the monitoring of the server heartbeats.""" +from __future__ import annotations import sys +from test.utils import MockPool sys.path[0:0] = [""] from test import IntegrationTest, client_knobs, unittest -from test.utils import HeartbeatEventListener, MockPool, single_client, wait_until +from test.utils_shared import HeartbeatEventListener, wait_until from pymongo.errors import ConnectionFailure from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor +from pymongo.synchronous.monitor import Monitor + +_IS_SYNC = True class TestHeartbeatMonitoring(IntegrationTest): @@ -39,8 +43,12 @@ def _check_with_socket(self, *args, **kwargs): raise responses[1] return Hello(responses[1]), 99 - m = single_client( - h=uri, event_listeners=(listener,), _monitor_class=MockMonitor, _pool_class=MockPool + _ = self.single_client( + h=uri, + event_listeners=(listener,), + _monitor_class=MockMonitor, + _pool_class=MockPool, + connect=True, ) expected_len = len(expected_results) @@ -49,20 +57,16 @@ def _check_with_socket(self, *args, **kwargs): # of this test. wait_until(lambda: len(listener.events) >= expected_len, "publish all events") - try: - # zip gives us len(expected_results) pairs. - for expected, actual in zip(expected_results, listener.events): - self.assertEqual(expected, actual.__class__.__name__) - self.assertEqual(actual.connection_id, responses[0]) - if expected != "ServerHeartbeatStartedEvent": - if isinstance(actual.reply, Hello): - self.assertEqual(actual.duration, 99) - self.assertEqual(actual.reply._doc, responses[1]) - else: - self.assertEqual(actual.reply, responses[1]) - - finally: - m.close() + # zip gives us len(expected_results) pairs. + for expected, actual in zip(expected_results, listener.events): + self.assertEqual(expected, actual.__class__.__name__) + self.assertEqual(actual.connection_id, responses[0]) + if expected != "ServerHeartbeatStartedEvent": + if isinstance(actual.reply, Hello): + self.assertEqual(actual.duration, 99) + self.assertEqual(actual.reply._doc, responses[1]) + else: + self.assertEqual(actual.reply, responses[1]) def test_standalone(self): responses = ( diff --git a/test/test_index_management.py b/test/test_index_management.py new file mode 100644 index 0000000000..dea8c0e2be --- /dev/null +++ b/test/test_index_management.py @@ -0,0 +1,379 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the auth spec tests.""" +from __future__ import annotations + +import asyncio +import os +import pathlib +import sys +import time +import uuid +from typing import Any, Mapping + +import pytest + +sys.path[0:0] = [""] + +from test import IntegrationTest, PyMongoTestCase, unittest +from test.unified_format import generate_test_classes +from test.utils_shared import AllowListEventListener, OvertCommandListener + +from pymongo.errors import OperationFailure +from pymongo.operations import SearchIndexModel +from pymongo.read_concern import ReadConcern +from pymongo.write_concern import WriteConcern + +_IS_SYNC = True + +pytestmark = pytest.mark.search_index + +# Location of JSON test specifications. +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "index_management") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "index_management") + +_NAME = "test-search-index" + + +class TestCreateSearchIndex(IntegrationTest): + def test_inputs(self): + listener = AllowListEventListener("createSearchIndexes") + client = self.simple_client(event_listeners=[listener]) + coll = client.test.test + coll.drop() + definition = dict(mappings=dict(dynamic=True)) + model_kwarg_list: list[Mapping[str, Any]] = [ + dict(definition=definition, name=None), + dict(definition=definition, name="test"), + ] + for model_kwargs in model_kwarg_list: + model = SearchIndexModel(**model_kwargs) + with self.assertRaises(OperationFailure): + coll.create_search_index(model) + with self.assertRaises(OperationFailure): + coll.create_search_index(model_kwargs) + + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "arbitraryOption": 1}) + self.assertEqual( + {"definition": definition, "arbitraryOption": 1}, + listener.events[0].command["indexes"][0], + ) + + listener.reset() + with self.assertRaises(OperationFailure): + coll.create_search_index({"definition": definition, "type": "search"}) + self.assertEqual( + {"definition": definition, "type": "search"}, listener.events[0].command["indexes"][0] + ) + + +class SearchIndexIntegrationBase(PyMongoTestCase): + db_name = "test_search_index_base" + + @classmethod + def setUpClass(cls) -> None: + cls.url = os.environ.get("MONGODB_URI") + cls.username = os.environ["DB_USER"] + cls.password = os.environ["DB_PASSWORD"] + cls.listener = OvertCommandListener() + + def setUp(self) -> None: + self.client = self.simple_client( + self.url, + username=self.username, + password=self.password, + event_listeners=[self.listener], + ) + self.client.drop_database(_NAME) + self.db = self.client[self.db_name] + + def tearDown(self): + self.client.drop_database(_NAME) + + def wait_for_ready(self, coll, name=_NAME, predicate=None): + """Wait for a search index to be ready.""" + indices: list[Mapping[str, Any]] = [] + if predicate is None: + predicate = lambda index: index.get("queryable") is True + + while True: + indices = (coll.list_search_indexes(name)).to_list() + if len(indices) and predicate(indices[0]): + return indices[0] + time.sleep(5) + + +class TestSearchIndexIntegration(SearchIndexIntegrationBase): + db_name = "test_search_index" + + def test_comment_field(self): + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0`` that implicitly passes its type. + search_definition = {"mappings": {"dynamic": False}} + self.listener.reset() + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition}, comment="foo" + ) + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + # Get the index definition. + self.listener.reset() + (coll0.list_search_indexes(name=implicit_search_resp, comment="foo")).next() + event = self.listener.events[0] + self.assertEqual(event.command["comment"], "foo") + + +class TestSearchIndexProse(SearchIndexIntegrationBase): + db_name = "test_search_index_prose" + + def test_case_1(self): + """Driver can successfully create and list search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. Use the following definition: + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + coll0.insert_one({}) + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # An index with the ``name`` of ``test-search-index`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0) + + # . Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_2(self): + """Driver can successfully create multiple indexes in batch.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create two new search indexes on ``coll0`` with the ``createSearchIndexes`` helper. + name1 = "test-search-index-1" + name2 = "test-search-index-2" + definition = {"mappings": {"dynamic": False}} + index_definitions: list[dict[str, Any]] = [ + {"name": name1, "definition": definition}, + {"name": name2, "definition": definition}, + ] + coll0.create_search_indexes( + [SearchIndexModel(i["definition"], i["name"]) for i in index_definitions] + ) + + # .Assert that the command returns an array containing the new indexes' names: ``["test-search-index-1", "test-search-index-2"]``. + indices = (coll0.list_search_indexes()).to_list() + names = [i["name"] for i in indices] + self.assertIn(name1, names) + self.assertIn(name2, names) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied. + # An index with the ``name`` of ``test-search-index-1`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index1``. + # An index with the ``name`` of ``test-search-index-2`` is present and index has a field ``queryable`` with the value of ``true``. Store result in ``index2``. + index1 = self.wait_for_ready(coll0, name1) + index2 = self.wait_for_ready(coll0, name2) + + # Assert that ``index1`` and ``index2`` have the property ``latestDefinition`` whose value is ``{ "mappings" : { "dynamic" : false } }`` + for index in [index1, index2]: + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], definition) + + def test_case_3(self): + """Driver can successfully drop search indexes.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, "test-search-index") + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``dropSearchIndex`` on ``coll0``, using ``test-search-index`` for the name. + coll0.drop_search_index(_NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until ``listSearchIndexes`` returns an empty array. + t0 = time.time() + while True: + indices = (coll0.list_search_indexes()).to_list() + if indices: + break + if (time.time() - t0) / 60 > 5: + raise TimeoutError("Timed out waiting for index deletion") + time.sleep(5) + + def test_case_4(self): + """Driver can update a search index.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Create a new search index on ``coll0``. + model = {"name": _NAME, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index"``. + self.assertEqual(resp, _NAME) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present and index has a field ``queryable`` with the value of ``true``. + self.wait_for_ready(coll0) + + # Run a ``updateSearchIndex`` on ``coll0``. + # Assert that the command does not error and the server responds with a success. + model2: dict[str, Any] = {"name": _NAME, "definition": {"mappings": {"dynamic": True}}} + coll0.update_search_index(_NAME, model2["definition"]) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied: + # An index with the ``name`` of ``test-search-index`` is present. This index is referred to as ``index``. + # The index has a field ``queryable`` with a value of ``true`` and has a field ``status`` with the value of ``READY``. + predicate = lambda index: index.get("queryable") is True and index.get("status") == "READY" + self.wait_for_ready(coll0, predicate=predicate) + + # Assert that an index is present with the name ``test-search-index`` and the definition has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': true } }``. + index = ((coll0.list_search_indexes(_NAME)).to_list())[0] + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model2["definition"]) + + def test_case_5(self): + """``dropSearchIndex`` suppresses namespace not found errors.""" + # Create a driver-side collection object for a randomly generated collection name. Do not create this collection on the server. + coll0 = self.db[f"col{uuid.uuid4()}"] + + # Run a ``dropSearchIndex`` command and assert that no error is thrown. + coll0.drop_search_index("foo") + + def test_case_6(self): + """Driver can successfully create and list search indexes with non-default readConcern and writeConcern.""" + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Apply a write concern ``WriteConcern(w=1)`` and a read concern with ``ReadConcern(level="majority")`` to ``coll0``. + coll0 = coll0.with_options( + write_concern=WriteConcern(w="1"), read_concern=ReadConcern(level="majority") + ) + + # Create a new search index on ``coll0`` with the ``createSearchIndex`` helper. + name = "test-search-index-case6" + model = {"name": name, "definition": {"mappings": {"dynamic": False}}} + resp = coll0.create_search_index(model) + + # Assert that the command returns the name of the index: ``"test-search-index-case6"``. + self.assertEqual(resp, name) + + # Run ``coll0.listSearchIndexes()`` repeatedly every 5 seconds until the following condition is satisfied and store the value in a variable ``index``: + # - An index with the ``name`` of ``test-search-index-case6`` is present and the index has a field ``queryable`` with a value of ``true``. + index = self.wait_for_ready(coll0, name) + + # Assert that ``index`` has a property ``latestDefinition`` whose value is ``{ 'mappings': { 'dynamic': false } }`` + self.assertIn("latestDefinition", index) + self.assertEqual(index["latestDefinition"], model["definition"]) + + def test_case_7(self): + """Driver handles index types.""" + + # Create a collection with the "create" command using a randomly generated name (referred to as ``coll0``). + coll0 = self.db[f"col{uuid.uuid4()}"] + coll0.insert_one({}) + + # Use these search and vector search definitions for indexes. + search_definition = {"mappings": {"dynamic": False}} + vector_search_definition = { + "fields": [ + { + "type": "vector", + "path": "plot_embedding", + "numDimensions": 1536, + "similarity": "euclidean", + }, + ] + } + + # Create a new search index on ``coll0`` that implicitly passes its type. + implicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-implicit", "definition": search_definition} + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=implicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new search index on ``coll0`` that explicitly passes its type. + explicit_search_resp = coll0.create_search_index( + model={"name": _NAME + "-explicit", "type": "search", "definition": search_definition} + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=explicit_search_resp)).next() + + # Assert that the index model contains the correct index type: ``"search"``. + self.assertEqual(resp["type"], "search") + + # Create a new vector search index on ``coll0`` that explicitly passes its type. + explicit_vector_resp = coll0.create_search_index( + model={ + "name": _NAME + "-vector", + "type": "vectorSearch", + "definition": vector_search_definition, + } + ) + + # Get the index definition. + resp = (coll0.list_search_indexes(name=explicit_vector_resp)).next() + + # Assert that the index model contains the correct index type: ``"vectorSearch"``. + self.assertEqual(resp["type"], "vectorSearch") + + # Catch the error raised when trying to create a vector search index without specifying the type + with self.assertRaises(OperationFailure) as e: + coll0.create_search_index( + model={"name": _NAME + "-error", "definition": vector_search_definition} + ) + self.assertIn("Attribute mappings missing.", e.exception.details["errmsg"]) + + +globals().update( + generate_test_classes( + _TEST_PATH, + module=__name__, + ) +) + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_json_util.py b/test/test_json_util.py index ee5b7abb49..cf2c0efb93 100644 --- a/test/test_json_util.py +++ b/test/test_json_util.py @@ -13,19 +13,23 @@ # limitations under the License. """Test some utilities for working with JSON and PyMongo.""" +from __future__ import annotations import datetime import json import re import sys import uuid -from typing import Any, List, MutableMapping +from collections import OrderedDict +from typing import Any, Tuple, Type + +from bson.codec_options import CodecOptions, DatetimeConversion sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import unittest -from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, json_util +from bson import EPOCH_AWARE, EPOCH_NAIVE, SON, DatetimeMS, json_util from bson.binary import ( ALL_UUID_REPRESENTATIONS, MD5_SUBTYPE, @@ -35,10 +39,14 @@ UuidRepresentation, ) from bson.code import Code +from bson.datetime_ms import _MAX_UTC_MS from bson.dbref import DBRef +from bson.decimal128 import Decimal128 from bson.int64 import Int64 from bson.json_util import ( + CANONICAL_JSON_OPTIONS, LEGACY_JSON_OPTIONS, + RELAXED_JSON_OPTIONS, DatetimeRepresentation, JSONMode, JSONOptions, @@ -68,6 +76,11 @@ def round_trip(self, doc, **kwargs): def test_basic(self): self.round_trip({"hello": "world"}) + def test_loads_bytes(self): + string = b'{"hello": "world"}' + self.assertEqual(json_util.loads(bytes(string)), {"hello": "world"}) + self.assertEqual(json_util.loads(bytearray(string)), {"hello": "world"}) + def test_json_options_with_options(self): opts = JSONOptions( datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY @@ -124,7 +137,7 @@ def test_datetime(self): '{"dt": { "$date" : "1970-01-01T00:00:00.000Z"}}', '{"dt": { "$date" : "1970-01-01T00:00:00.000000Z"}}', '{"dt": { "$date" : "1970-01-01T00:00:00Z"}}', - '{"dt": {"$date": "1970-01-01T00:00:00.000"}}', + '{"dt": { "$date" : "1970-01-01T00:00:00.000"}}', '{"dt": { "$date" : "1970-01-01T00:00:00"}}', '{"dt": { "$date" : "1970-01-01T00:00:00.000000"}}', '{"dt": { "$date" : "1969-12-31T16:00:00.000-0800"}}', @@ -241,12 +254,94 @@ def test_datetime(self): ), ) + def test_datetime_ms(self): + # Test ISO8601 in-range + dat_min: dict[str, Any] = {"x": DatetimeMS(0)} + dat_max: dict[str, Any] = {"x": DatetimeMS(_MAX_UTC_MS)} + opts = JSONOptions(datetime_representation=DatetimeRepresentation.ISO8601) + + self.assertEqual( + dat_min["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_min))["x"], + ) + self.assertEqual( + dat_max["x"].as_datetime(CodecOptions(tz_aware=False)), + json_util.loads(json_util.dumps(dat_max))["x"], + ) + + # Test ISO8601 out-of-range + dat_min = {"x": DatetimeMS(-1)} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS + 1)} + + self.assertEqual('{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min)) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max), + ) + # Test legacy. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.LEGACY, json_mode=JSONMode.LEGACY + ) + self.assertEqual('{"x": {"$date": -1}}', json_util.dumps(dat_min, json_options=opts)) + self.assertEqual( + '{"x": {"$date": ' + str(int(dat_max["x"])) + "}}", + json_util.dumps(dat_max, json_options=opts), + ) + + # Test regular. + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.NUMBERLONG, json_mode=JSONMode.LEGACY + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "-1"}}}', json_util.dumps(dat_min, json_options=opts) + ) + self.assertEqual( + '{"x": {"$date": {"$numberLong": "' + str(int(dat_max["x"])) + '"}}}', + json_util.dumps(dat_max, json_options=opts), + ) + + # Test decode from datetime.datetime to DatetimeMS + dat_min = {"x": datetime.datetime.min} + dat_max = {"x": DatetimeMS(_MAX_UTC_MS).as_datetime(CodecOptions(tz_aware=False))} + opts = JSONOptions( + datetime_representation=DatetimeRepresentation.ISO8601, + datetime_conversion=DatetimeConversion.DATETIME_MS, + ) + + self.assertEqual( + DatetimeMS(dat_min["x"]), + json_util.loads(json_util.dumps(dat_min), json_options=opts)["x"], + ) + self.assertEqual( + DatetimeMS(dat_max["x"]), + json_util.loads(json_util.dumps(dat_max), json_options=opts)["x"], + ) + + def test_parse_invalid_date(self): + # These cases should raise ValueError, not IndexError. + for invalid in [ + '{"dt": { "$date" : "1970-01-01T00:00:"}}', + '{"dt": { "$date" : "1970-01-01T01:00"}}', + '{"dt": { "$date" : "1970-01-01T01:"}}', + '{"dt": { "$date" : "1970-01-01T01"}}', + '{"dt": { "$date" : "1970-01-01T"}}', + '{"dt": { "$date" : "1970-01-01"}}', + '{"dt": { "$date" : "1970-01-"}}', + '{"dt": { "$date" : "1970-01"}}', + '{"dt": { "$date" : "1970-"}}', + '{"dt": { "$date" : "1970"}}', + '{"dt": { "$date" : "1"}}', + '{"dt": { "$date" : ""}}', + ]: + with self.assertRaisesRegex(ValueError, "does not match"): + json_util.loads(invalid) + def test_regex_object_hook(self): # Extended JSON format regular expression. pat = "a*b" json_re = '{"$regex": "%s", "$options": "u"}' % pat loaded = json_util.object_hook(json.loads(json_re)) - self.assertTrue(isinstance(loaded, Regex)) + self.assertIsInstance(loaded, Regex) self.assertEqual(pat, loaded.pattern) self.assertEqual(re.U, loaded.flags) @@ -378,7 +473,7 @@ def test_uuid_uuid_rep_unspecified(self): ) # Cannot directly encode native UUIDs with UNSPECIFIED. - doc = {"uuid": _uuid} + doc: dict[str, Any] = {"uuid": _uuid} with self.assertRaises(ValueError): json_util.dumps(doc, json_options=options) @@ -473,36 +568,72 @@ def test_numberlong(self): self.assertEqual(json_util.dumps({"weight": Int64(65535)}), '{"weight": 65535}') json_options = JSONOptions(strict_number_long=True, json_mode=JSONMode.LEGACY) self.assertEqual(json_util.dumps({"weight": Int64(65535)}, json_options=json_options), jsn) - - def test_loads_document_class(self): - # document_class dict should always work + # Ensure json_util.default converts Int64 to int in non-strict mode. + converted = json_util.default(Int64(65535)) + self.assertEqual(converted, 65535) + self.assertNotIsInstance(converted, Int64) self.assertEqual( - {"foo": "bar"}, - json_util.loads('{"foo": "bar"}', json_options=JSONOptions(document_class=dict)), + json_util.default(Int64(65535), json_options=json_options), {"$numberLong": "65535"} ) - self.assertEqual( - SON([("foo", "bar"), ("b", 1)]), - json_util.loads('{"foo": "bar", "b": 1}', json_options=JSONOptions(document_class=SON)), - ) - -class TestJsonUtilRoundtrip(IntegrationTest): - def test_cursor(self): - db = self.db - - db.drop_collection("test") - docs: List[MutableMapping[str, Any]] = [ - {"foo": [1, 2]}, - {"bar": {"hello": "world"}}, - {"code": Code("function x() { return 1; }")}, - {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, - {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + def test_loads_document_class(self): + json_doc = '{"foo": "bar", "b": 1, "d": {"a": 1}}' + expected_doc = {"foo": "bar", "b": 1, "d": {"a": 1}} + for cls in (dict, SON, OrderedDict): + doc = json_util.loads(json_doc, json_options=JSONOptions(document_class=cls)) + self.assertEqual(doc, expected_doc) + self.assertIsInstance(doc, cls) + self.assertIsInstance(doc["d"], cls) + + def test_encode_subclass(self): + cases: list[Tuple[Type, Any]] = [ + (int, (1,)), + (int, (2 << 60,)), + (float, (1.1,)), + (Int64, (64,)), + (Int64, (2 << 60,)), + (str, ("str",)), + (bytes, (b"bytes",)), + (datetime.datetime, (2024, 1, 16)), + (DatetimeMS, (1,)), + (uuid.UUID, ("f47ac10b-58cc-4372-a567-0e02b2c3d479",)), + (Binary, (b"1", USER_DEFINED_SUBTYPE)), + (Code, ("code",)), + (DBRef, ("coll", ObjectId())), + (ObjectId, ("65a6dab5f98bc03906ee3597",)), + (MaxKey, ()), + (MinKey, ()), + (Regex, ("pat",)), + (Timestamp, (1, 1)), + (Decimal128, ("0.5",)), ] - - db.test.insert_many(docs) - reloaded_docs = json_util.loads(json_util.dumps(db.test.find())) - for doc in docs: - self.assertTrue(doc in reloaded_docs) + allopts = [ + CANONICAL_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + RELAXED_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + LEGACY_JSON_OPTIONS.with_options(uuid_representation=STANDARD), + ] + for cls, args in cases: + basic_obj = cls(*args) + my_cls = type(f"My{cls.__name__}", (cls,), {}) + my_obj = my_cls(*args) + for opts in allopts: + expected_json = json_util.dumps(basic_obj, json_options=opts) + self.assertEqual(json_util.dumps(my_obj, json_options=opts), expected_json) + + def test_encode_type_marker(self): + # Assert that a custom subclass can be JSON encoded based on the _type_marker attribute. + class MyMaxKey: + _type_marker = 127 + + expected_json = json_util.dumps(MaxKey()) + self.assertEqual(json_util.dumps(MyMaxKey()), expected_json) + + # Test a class that inherits from two built in types + class MyBinary(Binary): + pass + + expected_json = json_util.dumps(Binary(b"bin", USER_DEFINED_SUBTYPE)) + self.assertEqual(json_util.dumps(MyBinary(b"bin", USER_DEFINED_SUBTYPE)), expected_json) if __name__ == "__main__": diff --git a/test/test_json_util_integration.py b/test/test_json_util_integration.py new file mode 100644 index 0000000000..4ef5f10fe2 --- /dev/null +++ b/test/test_json_util_integration.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from test import IntegrationTest +from typing import Any, List, MutableMapping + +from bson import Binary, Code, DBRef, ObjectId, json_util +from bson.binary import USER_DEFINED_SUBTYPE + +_IS_SYNC = True + + +class TestJsonUtilRoundtrip(IntegrationTest): + def test_cursor(self): + db = self.db + + db.drop_collection("test") + docs: List[MutableMapping[str, Any]] = [ + {"foo": [1, 2]}, + {"bar": {"hello": "world"}}, + {"code": Code("function x() { return 1; }")}, + {"bin": Binary(b"\x00\x01\x02\x03\x04", USER_DEFINED_SUBTYPE)}, + {"dbref": {"_ref": DBRef("simple", ObjectId("509b8db456c02c5ab7e63c34"))}}, + ] + + db.test.insert_many(docs) + reloaded_docs = json_util.loads(json_util.dumps((db.test.find()).to_list())) + for doc in docs: + self.assertIn(doc, reloaded_docs) diff --git a/test/test_load_balancer.py b/test/test_load_balancer.py index 378ae33e03..472ef51da3 100644 --- a/test/test_load_balancer.py +++ b/test/test_load_balancer.py @@ -13,41 +13,60 @@ # limitations under the License. """Test the Load Balancer unified spec tests.""" +from __future__ import annotations +import asyncio import gc import os +import pathlib import sys import threading +from asyncio import Event +from test.helpers import ConcurrentRunner, ExceptionCatchingTask +from test.utils import get_pool + +import pytest sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest from test.unified_format import generate_test_classes -from test.utils import ExceptionCatchingThread, get_pool, rs_client, wait_until +from test.utils_shared import ( + create_event, + wait_until, +) + +_IS_SYNC = True + +pytestmark = pytest.mark.load_balancer # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "load_balancer") +if _IS_SYNC: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "load_balancer") +else: + _TEST_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "load_balancer") # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +globals().update(generate_test_classes(_TEST_PATH, module=__name__)) class TestLB(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True def test_connections_are_only_returned_once(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3011 + self.skipTest("Test is flaky on PyPy") pool = get_pool(self.client) - nconns = len(pool.sockets) + n_conns = len(pool.conns) self.db.test.find_one({}) - self.assertEqual(len(pool.sockets), nconns) - list(self.db.test.aggregate([{"$limit": 1}])) - self.assertEqual(len(pool.sockets), nconns) + self.assertEqual(len(pool.conns), n_conns) + (self.db.test.aggregate([{"$limit": 1}])).to_list() + self.assertEqual(len(pool.conns), n_conns) @client_context.require_load_balancer def test_unpin_committed_transaction(self): - client = rs_client() - self.addCleanup(client.close) + client = self.rs_client() pool = get_pool(client) coll = client[self.db.name].test with client.start_session() as session: @@ -77,8 +96,7 @@ def create_resource(coll): self._test_no_gc_deadlock(create_resource) def _test_no_gc_deadlock(self, create_resource): - client = rs_client() - self.addCleanup(client.close) + client = self.rs_client() pool = get_pool(client) coll = client[self.db.name].test coll.insert_many([{} for _ in range(10)]) @@ -96,19 +114,19 @@ def _test_no_gc_deadlock(self, create_resource): if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. - thread = PoolLocker(pool) - thread.start() - self.assertTrue(thread.locked.wait(5), "timed out") + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") # Garbage collect the resource while the pool is locked to ensure we # don't deadlock. del resource # On PyPy it can take a few rounds to collect the cursor. for _ in range(3): gc.collect() - thread.unlock.set() - thread.join(5) - self.assertFalse(thread.is_alive()) - self.assertIsNone(thread.exc) + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. @@ -116,50 +134,61 @@ def _test_no_gc_deadlock(self, create_resource): @client_context.require_transactions def test_session_gc(self): - client = rs_client() - self.addCleanup(client.close) + client = self.rs_client() pool = get_pool(client) session = client.start_session() session.start_transaction() client.test_session_gc.test.find_one({}, session=session) + # Cleanup the transaction left open on the server + self.addCleanup(self.client.admin.command, "killSessions", [session.session_id]) if client_context.load_balancer: self.assertEqual(pool.active_sockets, 1) # Pinned. - thread = PoolLocker(pool) - thread.start() - self.assertTrue(thread.locked.wait(5), "timed out") + task = PoolLocker(pool) + task.start() + self.assertTrue(task.wait(task.locked, 5), "timed out") # Garbage collect the session while the pool is locked to ensure we # don't deadlock. del session # On PyPy it can take a few rounds to collect the session. for _ in range(3): gc.collect() - thread.unlock.set() - thread.join(5) - self.assertFalse(thread.is_alive()) - self.assertIsNone(thread.exc) + task.unlock.set() + task.join(5) + self.assertFalse(task.is_alive()) + self.assertIsNone(task.exc) wait_until(lambda: pool.active_sockets == 0, "return socket") # Run another operation to ensure the socket still works. client[self.db.name].test.delete_many({}) -class PoolLocker(ExceptionCatchingThread): +class PoolLocker(ExceptionCatchingTask): def __init__(self, pool): - super(PoolLocker, self).__init__(target=self.lock_pool) + super().__init__(target=self.lock_pool) self.pool = pool self.daemon = True - self.locked = threading.Event() - self.unlock = threading.Event() + self.locked = create_event() + self.unlock = create_event() def lock_pool(self): with self.pool.lock: self.locked.set() # Wait for the unlock flag. - unlock_pool = self.unlock.wait(10) + unlock_pool = self.wait(self.unlock, 10) if not unlock_pool: raise Exception("timed out waiting for unlock signal: deadlock?") + def wait(self, event: Event, timeout: int): + if _IS_SYNC: + return event.wait(timeout) # type: ignore[call-arg] + else: + try: + asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + return False + return True + if __name__ == "__main__": unittest.main() diff --git a/test/test_logger.py b/test/test_logger.py new file mode 100644 index 0000000000..a7d97927fa --- /dev/null +++ b/test/test_logger.py @@ -0,0 +1,144 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from test import IntegrationTest, client_context, unittest +from unittest.mock import patch + +from bson import json_util +from pymongo.errors import OperationFailure +from pymongo.logger import _DEFAULT_DOCUMENT_LENGTH + +_IS_SYNC = True + + +# https://github.com/mongodb/specifications/tree/master/source/command-logging-and-monitoring/tests#prose-tests +class TestLogger(IntegrationTest): + def test_default_truncation_limit(self): + docs = [{"x": "y"} for _ in range(100)] + db = self.db + + with patch.dict("os.environ"): + os.environ.pop("MONGOB_LOG_MAX_DOCUMENT_LENGTH", None) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.test.insert_many(docs) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.test.find({}).to_list() + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertEqual(len(cmd_succeeded_log["reply"]), _DEFAULT_DOCUMENT_LENGTH + 3) + + def test_configured_truncation_limit(self): + cmd = {"hello": True} + db = self.db + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": "5"}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + db.command(cmd) + + cmd_started_log = json_util.loads(cm.records[0].getMessage()) + self.assertEqual(len(cmd_started_log["command"]), 5 + 3) + + cmd_succeeded_log = json_util.loads(cm.records[1].getMessage()) + self.assertLessEqual(len(cmd_succeeded_log["reply"]), 5 + 3) + with self.assertRaises(OperationFailure): + db.command({"notARealCommand": True}) + cmd_failed_log = json_util.loads(cm.records[-1].getMessage()) + self.assertEqual(len(cmd_failed_log["failure"]), 5 + 3) + + def test_truncation_multi_byte_codepoints(self): + document_lengths = ["20000", "20001", "20002"] + multi_byte_char_str_len = 50_000 + str_to_repeat = "界" + + multi_byte_char_str = "" + for i in range(multi_byte_char_str_len): + multi_byte_char_str += str_to_repeat + + for length in document_lengths: + with patch.dict("os.environ", {"MONGOB_LOG_MAX_DOCUMENT_LENGTH": length}): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": multi_byte_char_str}) + cmd_started_log = json_util.loads(cm.records[0].getMessage())["command"] + + cmd_started_log = cmd_started_log[:-3] + last_3_bytes = cmd_started_log.encode()[-3:].decode() + + self.assertEqual(last_3_bytes, str_to_repeat) + + def test_logging_without_listeners(self): + c = self.single_client() + self.assertEqual(len(c._event_listeners.event_listeners()), 0) + with self.assertLogs("pymongo.connection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + with self.assertLogs("pymongo.serverSelection", level="DEBUG") as cm: + c.db.test.insert_one({"x": "1"}) + self.assertGreater(len(cm.records), 0) + + @client_context.require_failCommand_fail_point + def test_logging_retry_read_attempts(self): + self.db.test.insert_one({"x": "1"}) + + with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "failCommands": ["find"], + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.find_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying read attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + @client_context.require_failCommand_fail_point + @client_context.require_retryable_writes + def test_logging_retry_write_attempts(self): + with self.fail_point( + { + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError"], + "failCommands": ["insert"], + }, + } + ): + with self.assertLogs("pymongo.command", level="DEBUG") as cm: + self.db.test.insert_one({"x": "1"}) + + retry_messages = [ + r.getMessage() for r in cm.records if "Retrying write attempt" in r.getMessage() + ] + self.assertEqual(len(retry_messages), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_max_staleness.py b/test/test_max_staleness.py index 799083f3b4..56e047fd4b 100644 --- a/test/test_max_staleness.py +++ b/test/test_max_staleness.py @@ -13,78 +13,91 @@ # limitations under the License. """Test maxStalenessSeconds support.""" +from __future__ import annotations +import asyncio import os import sys import time import warnings +from pathlib import Path + +from pymongo import MongoClient +from pymongo.operations import _Op sys.path[0:0] = [""] -from test import client_context, unittest -from test.utils import rs_or_single_client +from test import PyMongoTestCase, client_context, unittest from test.utils_selection_tests import create_selection_tests -from pymongo import MongoClient from pymongo.errors import ConfigurationError from pymongo.server_selectors import writable_server_selector +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "max_staleness") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "max_staleness") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "max_staleness") -class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore pass -class TestMaxStaleness(unittest.TestCase): +class TestMaxStaleness(PyMongoTestCase): def test_max_staleness(self): - client = MongoClient() + client = self.simple_client() self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary") + client = self.simple_client("mongodb://a/?readPreference=secondary") self.assertEqual(-1, client.read_preference.max_staleness) # These tests are specified in max-staleness-tests.rst. with self.assertRaises(ConfigurationError): # Default read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?maxStalenessSeconds=120") + self.simple_client("mongodb://a/?maxStalenessSeconds=120") with self.assertRaises(ConfigurationError): # Read pref "primary" can't be used with max staleness. - MongoClient("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") + self.simple_client("mongodb://a/?readPreference=primary&maxStalenessSeconds=120") - client = MongoClient("mongodb://host/?maxStalenessSeconds=-1") + client = self.simple_client("mongodb://host/?maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") + client = self.simple_client("mongodb://host/?readPreference=primary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient("mongodb://host/?readPreference=secondary&maxStalenessSeconds=120") + client = self.simple_client( + "mongodb://host/?readPreference=secondary&maxStalenessSeconds=120" + ) self.assertEqual(120, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=1") self.assertEqual(1, client.read_preference.max_staleness) - client = MongoClient("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") + client = self.simple_client("mongodb://a/?readPreference=secondary&maxStalenessSeconds=-1") self.assertEqual(-1, client.read_preference.max_staleness) - client = MongoClient(maxStalenessSeconds=-1, readPreference="nearest") + client = self.simple_client(maxStalenessSeconds=-1, readPreference="nearest") self.assertEqual(-1, client.read_preference.max_staleness) with self.assertRaises(TypeError): # Prohibit None. - MongoClient(maxStalenessSeconds=None, readPreference="nearest") + self.simple_client(maxStalenessSeconds=None, readPreference="nearest") def test_max_staleness_float(self): with self.assertRaises(TypeError) as ctx: - rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") + self.rs_or_single_client(maxStalenessSeconds=1.5, readPreference="nearest") self.assertIn("must be an integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=1.5&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -93,13 +106,15 @@ def test_max_staleness_float(self): def test_max_staleness_zero(self): # Zero is too small. with self.assertRaises(ValueError) as ctx: - rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") + self.rs_or_single_client(maxStalenessSeconds=0, readPreference="nearest") self.assertIn("must be a positive integer", str(ctx.exception)) with warnings.catch_warnings(record=True) as ctx: warnings.simplefilter("always") - client = MongoClient("mongodb://host/?maxStalenessSeconds=0&readPreference=nearest") + client = self.simple_client( + "mongodb://host/?maxStalenessSeconds=0&readPreference=nearest" + ) # Option was ignored. self.assertEqual(-1, client.read_preference.max_staleness) @@ -108,11 +123,11 @@ def test_max_staleness_zero(self): @client_context.require_replica_set def test_last_write_date(self): # From max-staleness-tests.rst, "Parse lastWriteDate". - client = rs_or_single_client(heartbeatFrequencyMS=500) + client = self.rs_or_single_client(heartbeatFrequencyMS=500) client.pymongo_test.test.insert_one({}) # Wait for the server description to be updated. time.sleep(1) - server = client._topology.select_server(writable_server_selector) + server = client._topology.select_server(writable_server_selector, _Op.TEST) first = server.description.last_write_date self.assertTrue(first) # The first last_write_date may correspond to a internal server write, @@ -121,8 +136,11 @@ def test_last_write_date(self): client.pymongo_test.test.insert_one({}) # Wait for the server description to be updated. time.sleep(1) - server = client._topology.select_server(writable_server_selector) + server = client._topology.select_server(writable_server_selector, _Op.TEST) second = server.description.last_write_date + assert first is not None + + assert second is not None self.assertGreater(second, first) self.assertLess(second, first + 10) diff --git a/test/test_mongos_load_balancing.py b/test/test_mongos_load_balancing.py index e39940f56b..8c31854343 100644 --- a/test/test_mongos_load_balancing.py +++ b/test/test_mongos_load_balancing.py @@ -13,30 +13,31 @@ # limitations under the License. """Test MongoClient's mongos load balancing using a mock.""" +from __future__ import annotations +import asyncio import sys import threading +from test.helpers import ConcurrentRunner + +from pymongo.operations import _Op sys.path[0:0] = [""] -from test import MockClientTest, client_context, unittest +from test import MockClientTest, client_context, connected, unittest from test.pymongo_mocks import MockClient -from test.utils import connected, wait_until +from test.utils_shared import wait_until from pymongo.errors import AutoReconnect, InvalidOperation from pymongo.server_selectors import writable_server_selector from pymongo.topology_description import TOPOLOGY_TYPE - -@client_context.require_connection -@client_context.require_no_load_balancer -def setUpModule(): - pass +_IS_SYNC = True -class SimpleOp(threading.Thread): +class SimpleOp(ConcurrentRunner): def __init__(self, client): - super(SimpleOp, self).__init__() + super().__init__() self.client = client self.passed = False @@ -45,25 +46,31 @@ def run(self): self.passed = True # No exception raised. -def do_simple_op(client, nthreads): - threads = [SimpleOp(client) for _ in range(nthreads)] - for t in threads: +def do_simple_op(client, ntasks): + tasks = [SimpleOp(client) for _ in range(ntasks)] + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() - for t in threads: + for t in tasks: assert t.passed def writable_addresses(topology): - return set( - server.description.address for server in topology.select_servers(writable_server_selector) - ) + return { + server.description.address + for server in topology.select_servers(writable_server_selector, _Op.TEST) + } class TestMongosLoadBalancing(MockClientTest): + @client_context.require_connection + @client_context.require_no_load_balancer + def setUp(self): + super().setUp() + def mock_client(self, **kwargs): mock_client = MockClient( standalones=[], @@ -71,7 +78,7 @@ def mock_client(self, **kwargs): mongoses=["a:1", "b:2", "c:3"], host="a:1,b:2,c:3", connect=False, - **kwargs + **kwargs, ) self.addCleanup(mock_client.close) @@ -94,7 +101,7 @@ def test_lazy_connect(self): wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") def test_failover(self): - nthreads = 10 + ntasks = 10 client = connected(self.mock_client(localThresholdMS=0.001)) wait_until(lambda: len(client.nodes) == 3, "connect to all mongoses") @@ -114,14 +121,14 @@ def f(): passed.append(True) - threads = [threading.Thread(target=f) for _ in range(nthreads)] - for t in threads: + tasks = [ConcurrentRunner(target=f) for _ in range(ntasks)] + for t in tasks: t.start() - for t in threads: + for t in tasks: t.join() - self.assertEqual(nthreads, len(passed)) + self.assertEqual(ntasks, len(passed)) # Down host removed from list. self.assertEqual(2, len(client.nodes)) @@ -133,7 +140,7 @@ def test_local_threshold(self): topology = client._topology # All are within a 30-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2), ("c", 3)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2), ("c", 3)}, writable_addresses(topology)) # No error client.admin.command("ping") @@ -143,7 +150,7 @@ def test_local_threshold(self): # No error client.db.command("ping") # Our chosen mongos goes down. - client.kill_host("%s:%s" % next(iter(client.nodes))) + client.kill_host("{}:{}".format(*next(iter(client.nodes)))) try: client.db.command("ping") except: @@ -174,13 +181,16 @@ def test_load_balancing(self): self.assertEqual(TOPOLOGY_TYPE.Sharded, topology.description.topology_type) # a and b are within the 15-ms latency window, see self.mock_client(). - self.assertEqual(set([("a", 1), ("b", 2)]), writable_addresses(topology)) + self.assertEqual({("a", 1), ("b", 2)}, writable_addresses(topology)) client.mock_rtts["a:1"] = 0.045 # Discover only b is within latency window. + def predicate(): + return {("b", 2)} == writable_addresses(topology) + wait_until( - lambda: set([("b", 2)]) == writable_addresses(topology), + predicate, 'discover server "a" is too far', ) diff --git a/test/test_monitor.py b/test/test_monitor.py index 85cfb0bc40..c10662c893 100644 --- a/test/test_monitor.py +++ b/test/test_monitor.py @@ -13,23 +13,27 @@ # limitations under the License. """Test the monitor module.""" +from __future__ import annotations +import asyncio import gc +import subprocess import sys +import warnings from functools import partial sys.path[0:0] = [""] -from test import IntegrationTest, unittest +from test import IntegrationTest, client_context, connected, unittest from test.utils import ( - ServerAndTopologyEventListener, - connected, - single_client, wait_until, ) +from test.utils_shared import ServerAndTopologyEventListener, gevent_monkey_patched from pymongo.periodic_executor import _EXECUTORS +_IS_SYNC = True + def unregistered(ref): gc.collect() @@ -46,39 +50,71 @@ def get_executors(client): return [e for e in executors if e is not None] -def create_client(): - listener = ServerAndTopologyEventListener() - client = single_client(event_listeners=[listener]) - connected(client) - return client - - class TestMonitor(IntegrationTest): + def create_client(self): + listener = ServerAndTopologyEventListener() + client = self.unmanaged_single_client(event_listeners=[listener]) + connected(client) + return client + + @unittest.skipIf("PyPy" in sys.version, "PYTHON-5283 fails often on PyPy") + @unittest.skipIf( + gevent_monkey_patched(), "PYTHON-5516 Resources are not cleared when using gevent" + ) def test_cleanup_executors_on_client_del(self): - client = create_client() - executors = get_executors(client) - self.assertEqual(len(executors), 4) - - # Each executor stores a weakref to itself in _EXECUTORS. - executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] - - del executors - del client - - for ref, name in executor_refs: - wait_until(partial(unregistered, ref), "unregister executor: %s" % (name,), timeout=5) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + client = self.create_client() + executors = get_executors(client) + self.assertEqual(len(executors), 4) + + # Each executor stores a weakref to itself in _EXECUTORS. + executor_refs = [(r, r()._name) for r in _EXECUTORS.copy() if r() in executors] + + del executors + del client + + for ref, name in executor_refs: + wait_until(partial(unregistered, ref), f"unregister executor: {name}", timeout=5) + + def resource_warning_caught(): + gc.collect() + for warning in w: + if ( + issubclass(warning.category, ResourceWarning) + and "Call MongoClient.close() to safely shut down your client and free up resources." + in str(warning.message) + ): + return True + return False + + wait_until(resource_warning_caught, "catch resource warning") def test_cleanup_executors_on_client_close(self): - client = create_client() + client = self.create_client() executors = get_executors(client) self.assertEqual(len(executors), 4) client.close() for executor in executors: - wait_until( - lambda: executor._stopped, "closed executor: %s" % (executor._name,), timeout=5 - ) + wait_until(lambda: executor._stopped, f"closed executor: {executor._name}", timeout=5) + + @client_context.require_sync + def test_no_thread_start_runtime_err_on_shutdown(self): + """Test we silence noisy runtime errors fired when the MongoClient spawns a new thread + on process shutdown.""" + command = [ + sys.executable, + "-c", + "from pymongo import MongoClient; c = MongoClient()", + ] + completed_process: subprocess.CompletedProcess = subprocess.run( + command, capture_output=True + ) + + self.assertFalse(completed_process.stderr) + self.assertFalse(completed_process.stdout) if __name__ == "__main__": diff --git a/test/test_monitoring.py b/test/test_monitoring.py index 0b8200c019..f5a18af9ed 100644 --- a/test/test_monitoring.py +++ b/test/test_monitoring.py @@ -11,7 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +import asyncio import copy import datetime import sys @@ -20,91 +22,93 @@ sys.path[0:0] = [""] -from test import IntegrationTest, client_context, client_knobs, sanitize_cmd, unittest -from test.utils import EventListener, rs_or_single_client, single_client, wait_until +from test import ( + IntegrationTest, + client_context, + client_knobs, + sanitize_cmd, + unittest, +) +from test.utils_shared import ( + EventListener, + OvertCommandListener, + wait_until, +) from bson.int64 import Int64 from bson.objectid import ObjectId from bson.son import SON from pymongo import CursorType, DeleteOne, InsertOne, UpdateOne, monitoring -from pymongo.command_cursor import CommandCursor from pymongo.errors import AutoReconnect, NotPrimaryError, OperationFailure from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.command_cursor import CommandCursor from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestCommandMonitoring(IntegrationTest): listener: EventListener @classmethod - @client_context.require_connection - def setUpClass(cls): - super(TestCommandMonitoring, cls).setUpClass() - cls.listener = EventListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener], retryWrites=False) + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() - @classmethod - def tearDownClass(cls): - cls.client.close() - super(TestCommandMonitoring, cls).tearDownClass() - - def tearDown(self): - self.listener.results.clear() - super(TestCommandMonitoring, self).tearDown() + @client_context.require_connection + def setUp(self) -> None: + super().setUp() + self.listener.reset() + self.client = self.rs_or_single_client(event_listeners=[self.listener], retryWrites=False) def test_started_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) def test_succeeded_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual("ping", succeeded.command_name) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(1, succeeded.reply.get("ok")) - self.assertTrue(isinstance(succeeded.request_id, int)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(succeeded.request_id, int) + self.assertIsInstance(succeeded.duration_micros, int) def test_failed_simple(self): try: self.client.pymongo_test.command("oops!") except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("oops!", failed.command_name) self.assertEqual(self.client.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) def test_find_one(self): self.client.pymongo_test.test.find_one() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("find", "test"), ("filter", {}), ("limit", 1), ("singleBatch", True)]), started.command, @@ -112,21 +116,20 @@ def test_find_one(self): self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) def test_find_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find(projection={"_id": False}, batch_size=4) for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 4)] @@ -136,27 +139,26 @@ def test_find_and_get_more(self): self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) self.assertEqual(csr["ns"], "pymongo_test.test") self.assertEqual(csr["firstBatch"], [{} for _ in range(4)]) - self.listener.results.clear() + self.listener.reset() # Next batch. Exhausting the cursor could cause a getMore # that returns id of 0 and no results. next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -164,11 +166,11 @@ def test_find_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) csr = succeeded.reply["cursor"] self.assertEqual(csr["id"], cursor_id) @@ -176,32 +178,31 @@ def test_find_and_get_more(self): self.assertEqual(csr["nextBatch"], [{} for _ in range(4)]) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_find_with_explain(self): cmd = SON([("explain", SON([("find", "test"), ("filter", {})]))]) self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) res = coll.find().explain() - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(cmd, started.command) self.assertEqual("explain", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("explain", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(self.client.address, succeeded.connection_id) self.assertEqual(res, succeeded.reply) @@ -212,7 +213,7 @@ def _test_find_options(self, query, expected_cmd): coll.insert_many([{"x": i} for i in range(5)]) # Test that we publish the unwrapped command. - self.listener.results.clear() + self.listener.reset() if self.client.is_mongos: coll = coll.with_options(read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -220,60 +221,59 @@ def _test_find_options(self, query, expected_cmd): next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(expected_cmd, started.command) self.assertEqual("find", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(self.client.address, succeeded.connection_id) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_find_options(self): - query = dict( - filter={}, - hint=[("x", 1)], - max_time_ms=10000, - max={"x": 10}, - min={"x": -10}, - return_key=True, - show_record_id=True, - projection={"x": False}, - skip=1, - no_cursor_timeout=True, - sort=[("_id", 1)], - allow_partial_results=True, - comment="this is a test", - batch_size=2, - ) + query = { + "filter": {}, + "hint": [("x", 1)], + "max_time_ms": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "return_key": True, + "show_record_id": True, + "projection": {"x": False}, + "skip": 1, + "no_cursor_timeout": True, + "sort": [("_id", 1)], + "allow_partial_results": True, + "comment": "this is a test", + "batch_size": 2, + } - cmd = dict( - find="test", - filter={}, - hint=SON([("x", 1)]), - comment="this is a test", - maxTimeMS=10000, - max={"x": 10}, - min={"x": -10}, - returnKey=True, - showRecordId=True, - sort=SON([("_id", 1)]), - projection={"x": False}, - skip=1, - batchSize=2, - noCursorTimeout=True, - allowPartialResults=True, - ) + cmd = { + "find": "test", + "filter": {}, + "hint": SON([("x", 1)]), + "comment": "this is a test", + "maxTimeMS": 10000, + "max": {"x": 10}, + "min": {"x": -10}, + "returnKey": True, + "showRecordId": True, + "sort": SON([("_id", 1)]), + "projection": {"x": False}, + "skip": 1, + "batchSize": 2, + "noCursorTimeout": True, + "allowPartialResults": True, + } if client_context.version < (4, 1, 0, -1): query["max_scan"] = 10 @@ -284,16 +284,16 @@ def test_find_options(self): @client_context.require_version_max(3, 7, 2) def test_find_snapshot(self): # Test "snapshot" parameter separately, can't combine with "sort". - query = dict(filter={}, snapshot=True) + query = {"filter": {}, "snapshot": True} - cmd = dict(find="test", filter={}, snapshot=True) + cmd = {"find": "test", "filter": {}, "snapshot": True} self._test_find_options(query, cmd) def test_command_and_get_more(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{"x": 1} for _ in range(10)]) - self.listener.results.clear() + self.listener.reset() coll = self.client.pymongo_test.test # Test that we publish the unwrapped command. if self.client.is_mongos: @@ -302,11 +302,10 @@ def test_command_and_get_more(self): for _ in range(4): next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [ @@ -320,11 +319,11 @@ def test_command_and_get_more(self): self.assertEqual("aggregate", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("aggregate", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_cursor = { "id": cursor_id, @@ -333,14 +332,13 @@ def test_command_and_get_more(self): } self.assertEqualCommand(expected_cursor, succeeded.reply.get("cursor")) - self.listener.results.clear() + self.listener.reset() next(cursor) try: - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 4)]), started.command, @@ -348,11 +346,11 @@ def test_command_and_get_more(self): self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("getMore", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -365,7 +363,7 @@ def test_command_and_get_more(self): self.assertEqualReply(expected_result, succeeded.reply) finally: # Exhaust the cursor to avoid kill cursors. - tuple(cursor) + tuple(cursor.to_list()) def test_get_more_failure(self): address = self.client.address @@ -377,22 +375,21 @@ def test_get_more_failure(self): next(cursor) except Exception: pass - results = self.listener.results - started = results["started"][0] - self.assertEqual(0, len(results["succeeded"])) - failed = results["failed"][0] - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + failed = self.listener.failed_events[0] + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test")]), started.command ) self.assertEqual("getMore", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual("getMore", failed.command_name) - self.assertTrue(isinstance(failed.request_id, int)) + self.assertIsInstance(failed.request_id, int) self.assertEqual(cursor.address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) @@ -400,43 +397,41 @@ def test_get_more_failure(self): @client_context.require_secondaries_count(1) def test_not_primary_error(self): address = next(iter(client_context.client.secondaries)) - client = single_client(*address, event_listeners=[self.listener]) + client = self.single_client(*address, event_listeners=[self.listener]) # Clear authentication command results from the listener. client.admin.command("ping") - self.listener.results.clear() + self.listener.reset() error = None try: client.pymongo_test.test.find_one_and_delete({}) except NotPrimaryError as exc: error = exc.errors - results = self.listener.results - started = results["started"][0] - failed = results["failed"][0] - self.assertEqual(0, len(results["succeeded"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) - self.assertTrue(isinstance(failed, monitoring.CommandFailedEvent)) + started = self.listener.started_events[0] + failed = self.listener.failed_events[0] + self.assertEqual(0, len(self.listener.succeeded_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) + self.assertIsInstance(failed, monitoring.CommandFailedEvent) self.assertEqual("findAndModify", failed.command_name) self.assertEqual(address, failed.connection_id) self.assertEqual(0, failed.failure.get("ok")) - self.assertTrue(isinstance(failed.request_id, int)) - self.assertTrue(isinstance(failed.duration_micros, int)) + self.assertIsInstance(failed.request_id, int) + self.assertIsInstance(failed.duration_micros, int) self.assertEqual(error, failed.failure) @client_context.require_no_mongos def test_exhaust(self): self.client.pymongo_test.test.drop() self.client.pymongo_test.test.insert_many([{} for _ in range(11)]) - self.listener.results.clear() + self.listener.reset() cursor = self.client.pymongo_test.test.find( projection={"_id": False}, batch_size=5, cursor_type=CursorType.EXHAUST ) next(cursor) cursor_id = cursor.cursor_id - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand( SON( [("find", "test"), ("filter", {}), ("projection", {"_id": False}), ("batchSize", 5)] @@ -446,11 +441,11 @@ def test_exhaust(self): self.assertEqual("find", started.command_name) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("find", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertEqual(cursor.address, succeeded.connection_id) expected_result = { "cursor": { @@ -462,12 +457,11 @@ def test_exhaust(self): } self.assertEqualReply(expected_result, succeeded.reply) - self.listener.results.clear() - tuple(cursor) - results = self.listener.results - self.assertEqual(0, len(results["failed"])) - for event in results["started"]: - self.assertTrue(isinstance(event, monitoring.CommandStartedEvent)) + self.listener.reset() + tuple(cursor.to_list()) + self.assertEqual(0, len(self.listener.failed_events)) + for event in self.listener.started_events: + self.assertIsInstance(event, monitoring.CommandStartedEvent) self.assertEqualCommand( SON([("getMore", cursor_id), ("collection", "test"), ("batchSize", 5)]), event.command, @@ -475,15 +469,15 @@ def test_exhaust(self): self.assertEqual("getMore", event.command_name) self.assertEqual(cursor.address, event.connection_id) self.assertEqual("pymongo_test", event.database_name) - self.assertTrue(isinstance(event.request_id, int)) - for event in results["succeeded"]: - self.assertTrue(isinstance(event, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(event.duration_micros, int)) + self.assertIsInstance(event.request_id, int) + for event in self.listener.succeeded_events: + self.assertIsInstance(event, monitoring.CommandSucceededEvent) + self.assertIsInstance(event.duration_micros, int) self.assertEqual("getMore", event.command_name) - self.assertTrue(isinstance(event.request_id, int)) + self.assertIsInstance(event.request_id, int) self.assertEqual(cursor.address, event.connection_id) # Last getMore receives a response with cursor id 0. - self.assertEqual(0, results["succeeded"][-1].reply["cursor"]["id"]) + self.assertEqual(0, self.listener.succeeded_events[-1].reply["cursor"]["id"]) def test_kill_cursors(self): with client_knobs(kill_cursor_frequency=0.01): @@ -492,14 +486,13 @@ def test_kill_cursors(self): cursor = self.client.pymongo_test.test.find().batch_size(5) next(cursor) cursor_id = cursor.cursor_id - self.listener.results.clear() + self.listener.reset() cursor.close() time.sleep(2) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(started, monitoring.CommandStartedEvent) # There could be more than one cursor_id here depending on # when the thread last ran. self.assertIn(cursor_id, started.command["cursors"]) @@ -507,31 +500,29 @@ def test_kill_cursors(self): self.assertIs(type(started.connection_id), tuple) self.assertEqual(cursor.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(succeeded.duration_micros, int)) + self.assertIsInstance(started.request_id, int) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(succeeded.duration_micros, int) self.assertEqual("killCursors", succeeded.command_name) - self.assertTrue(isinstance(succeeded.request_id, int)) + self.assertIsInstance(succeeded.request_id, int) self.assertIs(type(succeeded.connection_id), tuple) self.assertEqual(cursor.address, succeeded.connection_id) # There could be more than one cursor_id here depending on # when the thread last ran. - self.assertTrue( - cursor_id in succeeded.reply["cursorsUnknown"] - or cursor_id in succeeded.reply["cursorsKilled"] + self.assertIn( + cursor_id, succeeded.reply["cursorsUnknown"] + succeeded.reply["cursorsKilled"] ) def test_non_bulk_writes(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() # Implied write concern insert_one res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -555,13 +546,12 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # Unacknowledged insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=0)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -584,13 +574,12 @@ def test_non_bulk_writes(self): self.assertEqualReply(succeeded.reply, {"ok": 1}) # Explicit write concern insert_one - self.listener.results.clear() + self.listener.reset() coll = coll.with_options(write_concern=WriteConcern(w=1)) res = coll.insert_one({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -615,12 +604,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_many - self.listener.results.clear() + self.listener.reset() res = coll.delete_many({"x": 1}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -645,13 +633,12 @@ def test_non_bulk_writes(self): self.assertEqual(res.deleted_count, reply.get("n")) # replace_one - self.listener.results.clear() + self.listener.reset() oid = ObjectId() res = coll.replace_one({"_id": oid}, {"_id": oid, "x": 1}, upsert=True) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -689,12 +676,11 @@ def test_non_bulk_writes(self): self.assertEqual([{"index": 0, "_id": oid}], reply.get("upserted")) # update_one - self.listener.results.clear() + self.listener.reset() res = coll.update_one({"x": 1}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -731,12 +717,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # update_many - self.listener.results.clear() + self.listener.reset() res = coll.update_many({"x": 2}, {"$inc": {"x": 1}}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -773,12 +758,11 @@ def test_non_bulk_writes(self): self.assertEqual(1, reply.get("n")) # delete_one - self.listener.results.clear() + self.listener.reset() _ = coll.delete_one({"x": 3}) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -807,14 +791,13 @@ def test_non_bulk_writes(self): # write errors coll.insert_one({"_id": 1}) try: - self.listener.results.clear() + self.listener.reset() coll.insert_one({"_id": 1}) except OperationFailure: pass - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON( [ @@ -848,15 +831,14 @@ def test_insert_many(self): # This always uses the bulk API. coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() big = "x" * (1024 * 1024 * 4) docs = [{"_id": i, "big": big} for i in range(6)] coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] count = 0 operation_id = started[0].operation_id @@ -889,16 +871,15 @@ def test_insert_many_unacknowledged(self): coll = self.client.pymongo_test.test coll.drop() unack_coll = coll.with_options(write_concern=WriteConcern(w=0)) - self.listener.results.clear() + self.listener.reset() # Force two batches on legacy servers. big = "x" * (1024 * 1024 * 12) docs = [{"_id": i, "big": big} for i in range(6)] unack_coll.insert_many(docs) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) documents = [] operation_id = started[0].operation_id self.assertIsInstance(operation_id, int) @@ -923,12 +904,16 @@ def test_insert_many_unacknowledged(self): self.assertEqual(succeed.operation_id, operation_id) self.assertEqual(1, succeed.reply.get("ok")) self.assertEqual(documents, docs) - wait_until(lambda: coll.count_documents({}) == 6, "insert documents with w=0") + + def check(): + return coll.count_documents({}) == 6 + + wait_until(check, "insert documents with w=0") def test_bulk_write(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() coll.bulk_write( [ @@ -937,10 +922,9 @@ def test_bulk_write(self): DeleteOne({"_id": 1}), ] ) - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) self.assertEqual(3, len(pairs)) @@ -991,7 +975,7 @@ def test_bulk_write(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_network_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_network_error = { "configureFailPoint": "failCommand", @@ -1004,7 +988,7 @@ def test_bulk_write_command_network_error(self): with self.fail_point(insert_network_error): with self.assertRaises(AutoReconnect): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1015,7 +999,7 @@ def test_bulk_write_command_network_error(self): @client_context.require_failCommand_fail_point def test_bulk_write_command_error(self): coll = self.client.pymongo_test.test - self.listener.results.clear() + self.listener.reset() insert_command_error = { "configureFailPoint": "failCommand", @@ -1029,7 +1013,7 @@ def test_bulk_write_command_error(self): with self.fail_point(insert_command_error): with self.assertRaises(NotPrimaryError): coll.bulk_write([InsertOne({"_id": 1})]) - failed = self.listener.results["failed"] + failed = self.listener.failed_events self.assertEqual(1, len(failed)) event = failed[0] self.assertEqual(event.command_name, "insert") @@ -1040,7 +1024,7 @@ def test_bulk_write_command_error(self): def test_write_errors(self): coll = self.client.pymongo_test.test coll.drop() - self.listener.results.clear() + self.listener.reset() try: coll.bulk_write( @@ -1054,10 +1038,9 @@ def test_write_errors(self): ) except OperationFailure: pass - results = self.listener.results - started = results["started"] - succeeded = results["succeeded"] - self.assertEqual(0, len(results["failed"])) + started = self.listener.started_events + succeeded = self.listener.succeeded_events + self.assertEqual(0, len(self.listener.failed_events)) operation_id = started[0].operation_id pairs = list(zip(started, succeeded)) errors = [] @@ -1077,19 +1060,18 @@ def test_write_errors(self): errors.extend(succeed.reply["writeErrors"]) self.assertEqual(2, len(errors)) - fields = set(["index", "code", "errmsg"]) + fields = {"index", "code", "errmsg"} for error in errors: - self.assertTrue(fields.issubset(set(error))) + self.assertLessEqual(fields, set(error)) def test_first_batch_helper(self): # Regardless of server version and use of helpers._first_batch # this test should still pass. - self.listener.results.clear() - tuple(self.client.pymongo_test.test.list_indexes()) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + self.listener.reset() + tuple((self.client.pymongo_test.test.list_indexes()).to_list()) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) expected = SON([("listIndexes", "test"), ("cursor", {})]) self.assertEqualCommand(expected, started.command) @@ -1102,31 +1084,39 @@ def test_first_batch_helper(self): self.assertEqual(started.command_name, succeeded.command_name) self.assertEqual(started.request_id, succeeded.request_id) self.assertEqual(started.connection_id, succeeded.connection_id) - self.assertTrue("cursor" in succeeded.reply) - self.assertTrue("ok" in succeeded.reply) + self.assertIn("cursor", succeeded.reply) + self.assertIn("ok", succeeded.reply) - self.listener.results.clear() + self.listener.reset() + @client_context.require_version_max(6, 1, 99) def test_sensitive_commands(self): - listeners = self.client._event_listeners + listener = EventListener() + client = self.rs_or_single_client(event_listeners=[listener]) + listeners = client._event_listeners - self.listener.results.clear() + listener.reset() cmd = SON([("getnonce", 1)]) - listeners.publish_command_start(cmd, "pymongo_test", 12345, self.client.address) + listeners.publish_command_start(cmd, "pymongo_test", 12345, client.address, None) # type: ignore[arg-type] delta = datetime.timedelta(milliseconds=100) listeners.publish_command_success( - delta, {"nonce": "e474f4561c5eb40b", "ok": 1.0}, "getnonce", 12345, self.client.address + delta, + {"nonce": "e474f4561c5eb40b", "ok": 1.0}, + "getnonce", + 12345, + self.client.address, # type: ignore[arg-type] + None, + database_name="pymongo_test", ) - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) + started = listener.started_events[0] + succeeded = listener.succeeded_events[0] + self.assertEqual(0, len(listener.failed_events)) self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqual({}, started.command) self.assertEqual("pymongo_test", started.database_name) self.assertEqual("getnonce", started.command_name) self.assertIsInstance(started.request_id, int) - self.assertEqual(self.client.address, started.connection_id) + self.assertEqual(client.address, started.connection_id) self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) self.assertEqual(succeeded.duration_micros, 100000) self.assertEqual(started.command_name, succeeded.command_name) @@ -1140,80 +1130,80 @@ class TestGlobalListener(IntegrationTest): saved_listeners: Any @classmethod - @client_context.require_connection - def setUpClass(cls): - super(TestGlobalListener, cls).setUpClass() - cls.listener = EventListener() + def setUpClass(cls) -> None: + cls.listener = OvertCommandListener() # We plan to call register(), which internally modifies _LISTENERS. cls.saved_listeners = copy.deepcopy(monitoring._LISTENERS) monitoring.register(cls.listener) - cls.client = single_client() + + @client_context.require_connection + def setUp(self): + super().setUp() + self.listener.reset() + self.client = self.single_client() # Get one (authenticated) socket in the pool. - cls.client.pymongo_test.command("ping") + self.client.pymongo_test.command("ping") @classmethod def tearDownClass(cls): monitoring._LISTENERS = cls.saved_listeners - cls.client.close() - super(TestGlobalListener, cls).tearDownClass() - - def setUp(self): - super(TestGlobalListener, self).setUp() - self.listener.results.clear() def test_simple(self): self.client.pymongo_test.command("ping") - results = self.listener.results - started = results["started"][0] - succeeded = results["succeeded"][0] - self.assertEqual(0, len(results["failed"])) - self.assertTrue(isinstance(succeeded, monitoring.CommandSucceededEvent)) - self.assertTrue(isinstance(started, monitoring.CommandStartedEvent)) + started = self.listener.started_events[0] + succeeded = self.listener.succeeded_events[0] + self.assertEqual(0, len(self.listener.failed_events)) + self.assertIsInstance(succeeded, monitoring.CommandSucceededEvent) + self.assertIsInstance(started, monitoring.CommandStartedEvent) self.assertEqualCommand(SON([("ping", 1)]), started.command) self.assertEqual("ping", started.command_name) self.assertEqual(self.client.address, started.connection_id) self.assertEqual("pymongo_test", started.database_name) - self.assertTrue(isinstance(started.request_id, int)) + self.assertIsInstance(started.request_id, int) class TestEventClasses(unittest.TestCase): def test_command_event_repr(self): - request_id, connection_id, operation_id = 1, ("localhost", 27017), 2 + request_id, connection_id, operation_id, db_name = 1, ("localhost", 27017), 2, "admin" event = monitoring.CommandStartedEvent( - {"ping": 1}, "admin", request_id, connection_id, operation_id + {"ping": 1}, db_name, request_id, connection_id, operation_id ) self.assertEqual( repr(event), "", + "command: 'ping', operation_id: 2, service_id: None, server_connection_id: None>", ) delta = datetime.timedelta(milliseconds=100) event = monitoring.CommandSucceededEvent( - delta, {"ok": 1}, "ping", request_id, connection_id, operation_id + delta, {"ok": 1}, "ping", request_id, connection_id, operation_id, database_name=db_name ) self.assertEqual( repr(event), - "", + "service_id: None, server_connection_id: None>", ) event = monitoring.CommandFailedEvent( - delta, {"ok": 0}, "ping", request_id, connection_id, operation_id + delta, {"ok": 0}, "ping", request_id, connection_id, operation_id, database_name=db_name ) self.assertEqual( repr(event), - "", + "failure: {'ok': 0}, service_id: None, server_connection_id: None>", ) def test_server_heartbeat_event_repr(self): connection_id = ("localhost", 27017) event = monitoring.ServerHeartbeatStartedEvent(connection_id) - self.assertEqual(repr(event), "") + self.assertEqual( + repr(event), "" + ) delta = 0.1 event = monitoring.ServerHeartbeatSucceededEvent( - delta, {"ok": 1}, connection_id # type: ignore[arg-type] + delta, + {"ok": 1}, # type: ignore[arg-type] + connection_id, ) self.assertEqual( repr(event), @@ -1221,7 +1211,9 @@ def test_server_heartbeat_event_repr(self): "duration: 0.1, awaited: False, reply: {'ok': 1}>", ) event = monitoring.ServerHeartbeatFailedEvent( - delta, "ERROR", connection_id # type: ignore[arg-type] + delta, + "ERROR", # type: ignore[arg-type] + connection_id, ) self.assertEqual( repr(event), @@ -1238,7 +1230,10 @@ def test_server_event_repr(self): "", ) event = monitoring.ServerDescriptionChangedEvent( - "PREV", "NEW", server_address, topology_id # type: ignore[arg-type] + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + server_address, + topology_id, ) self.assertEqual( repr(event), @@ -1255,7 +1250,9 @@ def test_topology_event_repr(self): event = monitoring.TopologyOpenedEvent(topology_id) self.assertEqual(repr(event), "") event = monitoring.TopologyDescriptionChangedEvent( - "PREV", "NEW", topology_id # type: ignore[arg-type] + "PREV", # type: ignore[arg-type] + "NEW", # type: ignore[arg-type] + topology_id, ) self.assertEqual( repr(event), diff --git a/test/test_mypy.py b/test/test_mypy.py deleted file mode 100644 index c692c70789..0000000000 --- a/test/test_mypy.py +++ /dev/null @@ -1,403 +0,0 @@ -# Copyright 2020-present MongoDB, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Test that each file in mypy_fails/ actually fails mypy, and test some -sample client code that uses PyMongo typings.""" - -import os -import tempfile -import unittest -from typing import TYPE_CHECKING, Any, Dict, Iterable, Iterator, List - -try: - from typing import TypedDict # type: ignore[attr-defined] - - # Not available in Python 3.7 - class Movie(TypedDict): # type: ignore[misc] - name: str - year: int - -except ImportError: - TypeDict = None - - -try: - from mypy import api -except ImportError: - api = None # type: ignore[assignment] - -from test import IntegrationTest -from test.utils import rs_or_single_client - -from bson import CodecOptions, decode, decode_all, decode_file_iter, decode_iter, encode -from bson.raw_bson import RawBSONDocument -from bson.son import SON -from pymongo import ASCENDING, MongoClient -from pymongo.collection import Collection -from pymongo.operations import InsertOne -from pymongo.read_preferences import ReadPreference - -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") - - -def get_tests() -> Iterable[str]: - for dirpath, _, filenames in os.walk(TEST_PATH): - for filename in filenames: - yield os.path.join(dirpath, filename) - - -def only_type_check(func): - def inner(*args, **kwargs): - if not TYPE_CHECKING: - raise unittest.SkipTest("Used for Type Checking Only") - func(*args, **kwargs) - - return inner - - -class TestMypyFails(unittest.TestCase): - def ensure_mypy_fails(self, filename: str) -> None: - if api is None: - raise unittest.SkipTest("Mypy is not installed") - stdout, stderr, exit_status = api.run([filename]) - self.assertTrue(exit_status, msg=stdout) - - def test_mypy_failures(self) -> None: - for filename in get_tests(): - if filename == "typeddict_client.py" and TypedDict is None: - continue - with self.subTest(filename=filename): - self.ensure_mypy_fails(filename) - - -class TestPymongo(IntegrationTest): - coll: Collection - - @classmethod - def setUpClass(cls): - super().setUpClass() - cls.coll = cls.client.test.test - - def test_insert_find(self) -> None: - doc = {"my": "doc"} - coll2 = self.client.test.test2 - result = self.coll.insert_one(doc) - self.assertEqual(result.inserted_id, doc["_id"]) - retreived = self.coll.find_one({"_id": doc["_id"]}) - if retreived: - # Documents returned from find are mutable. - retreived["new_field"] = 1 - result2 = coll2.insert_one(retreived) - self.assertEqual(result2.inserted_id, result.inserted_id) - - def test_cursor_iterable(self) -> None: - def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: - return list(iterable) - - self.coll.insert_one({}) - cursor = self.coll.find() - docs = to_list(cursor) - self.assertTrue(docs) - - def test_bulk_write(self) -> None: - self.coll.insert_one({}) - requests = [InsertOne({})] - result = self.coll.bulk_write(requests) - self.assertTrue(result.acknowledged) - - def test_command(self) -> None: - result: Dict = self.client.admin.command("ping") - items = result.items() - - def test_list_collections(self) -> None: - cursor = self.client.test.list_collections() - value = cursor.next() - items = value.items() - - def test_list_databases(self) -> None: - cursor = self.client.list_databases() - value = cursor.next() - value.items() - - def test_default_document_type(self) -> None: - client = rs_or_single_client() - self.addCleanup(client.close) - coll = client.test.test - doc = {"my": "doc"} - coll.insert_one(doc) - retreived = coll.find_one({"_id": doc["_id"]}) - assert retreived is not None - retreived["a"] = 1 - - def test_aggregate_pipeline(self) -> None: - coll3 = self.client.test.test3 - coll3.insert_many( - [ - {"x": 1, "tags": ["dog", "cat"]}, - {"x": 2, "tags": ["cat"]}, - {"x": 2, "tags": ["mouse", "cat", "dog"]}, - {"x": 3, "tags": []}, - ] - ) - - class mydict(Dict[str, Any]): - pass - - result = coll3.aggregate( - [ - mydict({"$unwind": "$tags"}), - {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, - {"$sort": SON([("count", -1), ("_id", -1)])}, - ] - ) - self.assertTrue(len(list(result))) - - def test_with_transaction(self) -> None: - def execute_transaction(session): - pass - - with self.client.start_session() as session: - return session.with_transaction( - execute_transaction, read_preference=ReadPreference.PRIMARY - ) - - -class TestDecode(unittest.TestCase): - def test_bson_decode(self) -> None: - doc = {"_id": 1} - bsonbytes = encode(doc) - rt_document: Dict[str, Any] = decode(bsonbytes) - assert rt_document["_id"] == 1 - rt_document["foo"] = "bar" - - class MyDict(Dict[str, Any]): - def foo(self): - return "bar" - - codec_options = CodecOptions(document_class=MyDict) - bsonbytes2 = encode(doc, codec_options=codec_options) - rt_document2 = decode(bsonbytes2, codec_options=codec_options) - assert rt_document2.foo() == "bar" - - codec_options2 = CodecOptions(document_class=RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options2) - rt_document3 = decode(bsonbytes2, codec_options=codec_options2) - assert rt_document3.raw - - def test_bson_decode_all(self) -> None: - doc = {"_id": 1} - bsonbytes = encode(doc) - bsonbytes += encode(doc) - rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) - assert rt_documents[0]["_id"] == 1 - rt_documents[0]["foo"] = "bar" - - class MyDict(Dict[str, Any]): - def foo(self): - return "bar" - - codec_options2 = CodecOptions(MyDict) - bsonbytes2 = encode(doc, codec_options=codec_options2) - bsonbytes2 += encode(doc, codec_options=codec_options2) - rt_documents2 = decode_all(bsonbytes2, codec_options2) - assert rt_documents2[0].foo() == "bar" - - codec_options3 = CodecOptions(RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options3) - bsonbytes3 += encode(doc, codec_options=codec_options3) - rt_documents3 = decode_all(bsonbytes3, codec_options3) - assert rt_documents3[0].raw - - def test_bson_decode_iter(self) -> None: - doc = {"_id": 1} - bsonbytes = encode(doc) - bsonbytes += encode(doc) - rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) - assert next(rt_documents)["_id"] == 1 - next(rt_documents)["foo"] = "bar" - - class MyDict(Dict[str, Any]): - def foo(self): - return "bar" - - codec_options2 = CodecOptions(MyDict) - bsonbytes2 = encode(doc, codec_options=codec_options2) - bsonbytes2 += encode(doc, codec_options=codec_options2) - rt_documents2 = decode_iter(bsonbytes2, codec_options2) - assert next(rt_documents2).foo() == "bar" - - codec_options3 = CodecOptions(RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options3) - bsonbytes3 += encode(doc, codec_options=codec_options3) - rt_documents3 = decode_iter(bsonbytes3, codec_options3) - assert next(rt_documents3).raw - - def make_tempfile(self, content: bytes) -> Any: - fileobj = tempfile.TemporaryFile() - fileobj.write(content) - fileobj.seek(0) - self.addCleanup(fileobj.close) - return fileobj - - def test_bson_decode_file_iter(self) -> None: - doc = {"_id": 1} - bsonbytes = encode(doc) - bsonbytes += encode(doc) - fileobj = self.make_tempfile(bsonbytes) - rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) - assert next(rt_documents)["_id"] == 1 - next(rt_documents)["foo"] = "bar" - - class MyDict(Dict[str, Any]): - def foo(self): - return "bar" - - codec_options2 = CodecOptions(MyDict) - bsonbytes2 = encode(doc, codec_options=codec_options2) - bsonbytes2 += encode(doc, codec_options=codec_options2) - fileobj2 = self.make_tempfile(bsonbytes2) - rt_documents2 = decode_file_iter(fileobj2, codec_options2) - assert next(rt_documents2).foo() == "bar" - - codec_options3 = CodecOptions(RawBSONDocument) - bsonbytes3 = encode(doc, codec_options=codec_options3) - bsonbytes3 += encode(doc, codec_options=codec_options3) - fileobj3 = self.make_tempfile(bsonbytes3) - rt_documents3 = decode_file_iter(fileobj3, codec_options3) - assert next(rt_documents3).raw - - -class TestDocumentType(unittest.TestCase): - @only_type_check - def test_default(self) -> None: - client: MongoClient = MongoClient() - coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 - - @only_type_check - def test_explicit_document_type(self) -> None: - client: MongoClient[Dict[str, Any]] = MongoClient() - coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 - - @only_type_check - def test_typeddict_document_type(self) -> None: - client: MongoClient[Movie] = MongoClient() - coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert retreived["year"] == 1 - assert retreived["name"] == "a" - - @only_type_check - def test_raw_bson_document_type(self) -> None: - client = MongoClient(document_class=RawBSONDocument) - coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - assert len(retreived.raw) > 0 - - @only_type_check - def test_son_document_type(self) -> None: - client = MongoClient(document_class=SON[str, Any]) - coll = client.test.test - retreived = coll.find_one({"_id": "foo"}) - assert retreived is not None - retreived["a"] = 1 - - def test_son_document_type_runtime(self) -> None: - client = MongoClient(document_class=SON[str, Any], connect=False) - - @only_type_check - def test_create_index(self) -> None: - client: MongoClient[Dict[str, str]] = MongoClient("test") - db = client.test - with client.start_session() as session: - index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) - assert isinstance(index, str) - - -class TestCommandDocumentType(unittest.TestCase): - @only_type_check - def test_default(self) -> None: - client: MongoClient = MongoClient() - result: Dict = client.admin.command("ping") - result["a"] = 1 - - @only_type_check - def test_explicit_document_type(self) -> None: - client: MongoClient = MongoClient() - codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() - result = client.admin.command("ping", codec_options=codec_options) - result["a"] = 1 - - @only_type_check - def test_typeddict_document_type(self) -> None: - client: MongoClient = MongoClient() - codec_options: CodecOptions[Movie] = CodecOptions() - result = client.admin.command("ping", codec_options=codec_options) - assert result["year"] == 1 - assert result["name"] == "a" - - @only_type_check - def test_raw_bson_document_type(self) -> None: - client: MongoClient = MongoClient() - codec_options = CodecOptions(RawBSONDocument) - result = client.admin.command("ping", codec_options=codec_options) - assert len(result.raw) > 0 - - @only_type_check - def test_son_document_type(self) -> None: - client = MongoClient(document_class=SON[str, Any]) - codec_options = CodecOptions(SON[str, Any]) - result = client.admin.command("ping", codec_options=codec_options) - result["a"] = 1 - - -class TestCodecOptionsDocumentType(unittest.TestCase): - def test_default(self) -> None: - options: CodecOptions = CodecOptions() - obj = options.document_class() - obj["a"] = 1 - - def test_explicit_document_type(self) -> None: - options: CodecOptions[Dict[str, Any]] = CodecOptions() - obj = options.document_class() - obj["a"] = 1 - - def test_typeddict_document_type(self) -> None: - options: CodecOptions[Movie] = CodecOptions() - # Suppress: Cannot instantiate type "Type[Movie]". - obj = options.document_class(name="a", year=1) # type: ignore[misc] - assert obj["year"] == 1 - assert obj["name"] == "a" - - def test_raw_bson_document_type(self) -> None: - options = CodecOptions(RawBSONDocument) - doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" - obj = options.document_class(doc_bson) - assert len(obj.raw) > 0 - - def test_son_document_type(self) -> None: - options = CodecOptions(SON[str, Any]) - obj = options.document_class() - obj["a"] = 1 - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_objectid.py b/test/test_objectid.py index bb1af865c0..dbc61951d1 100644 --- a/test/test_objectid.py +++ b/test/test_objectid.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the objectid module.""" +from __future__ import annotations import datetime import pickle @@ -22,7 +23,7 @@ sys.path[0:0] = [""] from test import SkipTest, unittest -from test.utils import oid_generated_on_process +from test.utils_shared import oid_generated_on_process from bson.errors import InvalidId from bson.objectid import _MAX_COUNTER_VALUE, ObjectId @@ -86,18 +87,15 @@ def test_binary_str_equivalence(self): self.assertEqual(a, ObjectId(str(a))) def test_generation_time(self): - d1 = datetime.datetime.utcnow() + d1 = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d2 = ObjectId().generation_time self.assertEqual(utc, d2.tzinfo) d2 = d2.replace(tzinfo=None) - self.assertTrue(d2 - d1 < datetime.timedelta(seconds=2)) + self.assertLess(d2 - d1, datetime.timedelta(seconds=2)) def test_from_datetime(self): - if "PyPy 1.8.0" in sys.version: - # See https://bugs.pypy.org/issue1092 - raise SkipTest("datetime.timedelta is broken in pypy 1.8.0") - d = datetime.datetime.utcnow() + d = datetime.datetime.now(tz=datetime.timezone.utc).replace(tzinfo=None) d = d - datetime.timedelta(microseconds=d.microsecond) oid = ObjectId.from_datetime(d) self.assertEqual(d, oid.generation_time.replace(tzinfo=None)) diff --git a/test/test_ocsp_cache.py b/test/test_ocsp_cache.py index 0e6777a9f9..1cc025ccb2 100644 --- a/test/test_ocsp_cache.py +++ b/test/test_ocsp_cache.py @@ -13,11 +13,12 @@ # limitations under the License. """Test the pymongo ocsp_support module.""" +from __future__ import annotations import random import sys from collections import namedtuple -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from os import urandom from time import sleep from typing import Any @@ -61,7 +62,7 @@ def _create_mock_request(self): ) def _create_mock_response(self, this_update_delta_seconds, next_update_delta_seconds): - now = datetime.utcnow() + now = datetime.now(tz=timezone.utc).replace(tzinfo=None) this_update = now + timedelta(seconds=this_update_delta_seconds) if next_update_delta_seconds is not None: next_update = now + timedelta(seconds=next_update_delta_seconds) diff --git a/test/test_on_demand_csfle.py b/test/test_on_demand_csfle.py new file mode 100644 index 0000000000..648e46815a --- /dev/null +++ b/test/test_on_demand_csfle.py @@ -0,0 +1,115 @@ +# Copyright 2022-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test client side encryption with on demand credentials.""" +from __future__ import annotations + +import os +import sys +import unittest + +import pytest + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context + +from bson.codec_options import CodecOptions +from pymongo.synchronous.encryption import ( + _HAVE_PYMONGOCRYPT, + ClientEncryption, + EncryptionError, +) + +_IS_SYNC = True + +pytestmark = pytest.mark.kms + + +class TestonDemandGCPCredentials(IntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUp(self): + super().setUp() + self.master_key = { + "projectId": "devprod-drivers", + "location": "global", + "keyRing": "key-ring-csfle", + "keyName": "key-name-csfle", + } + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("gcp", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_GCP_AUTO"), "Not testing FLE GCP auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"gcp": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("gcp", self.master_key) + + +class TestonDemandAzureCredentials(IntegrationTest): + @unittest.skipUnless(_HAVE_PYMONGOCRYPT, "pymongocrypt is not installed") + @client_context.require_version_min(4, 2, -1) + def setUp(self): + super().setUp() + self.master_key = { + "keyVaultEndpoint": os.environ["KEY_VAULT_ENDPOINT"], + "keyName": os.environ["KEY_NAME"], + } + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_01_failure(self): + if os.environ["SUCCESS"].lower() == "true": + self.skipTest("Expecting success") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + with self.assertRaises(EncryptionError): + self.client_encryption.create_data_key("azure", self.master_key) + + @unittest.skipIf(not os.getenv("TEST_FLE_AZURE_AUTO"), "Not testing FLE Azure auto") + def test_02_success(self): + if os.environ["SUCCESS"].lower() == "false": + self.skipTest("Expecting failure") + self.client_encryption = ClientEncryption( + kms_providers={"azure": {}}, + key_vault_namespace="keyvault.datakeys", + key_vault_client=client_context.client, + codec_options=CodecOptions(), + ) + self.client_encryption.create_data_key("azure", self.master_key) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/test/test_operations.py b/test/test_operations.py new file mode 100644 index 0000000000..3ee6677735 --- /dev/null +++ b/test/test_operations.py @@ -0,0 +1,80 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test the operations module.""" +from __future__ import annotations + +from test import UnitTest, unittest + +from pymongo import ASCENDING, DESCENDING +from pymongo.collation import Collation +from pymongo.errors import OperationFailure +from pymongo.operations import IndexModel, SearchIndexModel + + +class TestOperationsBase(UnitTest): + """Base class for testing operations module.""" + + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + +class TestIndexModel(TestOperationsBase): + """Test IndexModel features.""" + + def test_repr(self): + # Based on examples in test_collection.py + self.assertRepr(IndexModel("hello")) + self.assertRepr(IndexModel([("hello", DESCENDING), ("world", ASCENDING)])) + self.assertRepr( + IndexModel([("hello", DESCENDING), ("world", ASCENDING)], name="hello_world") + ) + # Test all the kwargs + self.assertRepr(IndexModel("name", name="name")) + self.assertRepr(IndexModel("unique", unique=False)) + self.assertRepr(IndexModel("background", background=True)) + self.assertRepr(IndexModel("sparse", sparse=True)) + self.assertRepr(IndexModel("bucketSize", bucketSize=1)) + self.assertRepr(IndexModel("min", min=1)) + self.assertRepr(IndexModel("max", max=1)) + self.assertRepr(IndexModel("expireAfterSeconds", expireAfterSeconds=1)) + self.assertRepr( + IndexModel("partialFilterExpression", partialFilterExpression={"hello": "world"}) + ) + self.assertRepr(IndexModel("collation", collation=Collation(locale="en_US"))) + self.assertRepr(IndexModel("wildcardProjection", wildcardProjection={"$**": 1})) + self.assertRepr(IndexModel("hidden", hidden=False)) + # Test string literal + self.assertEqual(repr(IndexModel("hello")), "IndexModel({'hello': 1}, name='hello_1')") + self.assertEqual( + repr(IndexModel({"hello": 1, "world": -1})), + "IndexModel({'hello': 1, 'world': -1}, name='hello_1_world_-1')", + ) + + +class TestSearchIndexModel(TestOperationsBase): + """Test SearchIndexModel features.""" + + def test_repr(self): + self.assertRepr(SearchIndexModel({"hello": "hello"}, key=1)) + self.assertEqual( + repr(SearchIndexModel({"hello": "hello"}, key=1)), + "SearchIndexModel(definition={'hello': 'hello'}, key=1)", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_pooling.py b/test/test_pooling.py index 923c89d83b..cb5b206996 100644 --- a/test/test_pooling.py +++ b/test/test_pooling.py @@ -13,42 +13,44 @@ # limitations under the License. """Test built in connection-pooling with threads.""" +from __future__ import annotations +import asyncio import gc import random import socket import sys -import threading import time +from test.utils import flaky, get_pool, joinall from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.son import SON -from pymongo import MongoClient, message +from pymongo import MongoClient, message, timeout from pymongo.errors import AutoReconnect, ConnectionFailure, DuplicateKeyError +from pymongo.hello import HelloCompat +from pymongo.lock import _create_lock sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import delay, get_pool, joinall, rs_or_single_client +from test.helpers import ConcurrentRunner +from test.utils_shared import delay -from pymongo.pool import Pool, PoolOptions from pymongo.socket_checker import SocketChecker +from pymongo.synchronous.pool import Pool, PoolOptions - -@client_context.require_connection -def setUpModule(): - pass +_IS_SYNC = True N = 10 DB = "pymongo-pooling-tests" -def gc_collect_until_done(threads, timeout=60): +def gc_collect_until_done(tasks, timeout=60): start = time.time() - running = list(threads) + running = list(tasks) while running: - assert (time.time() - start) < timeout, "Threads timed out" + assert (time.time() - start) < timeout, "Tasks timed out" for t in running: t.join(0.1) if not t.is_alive(): @@ -56,12 +58,12 @@ def gc_collect_until_done(threads, timeout=60): gc.collect() -class MongoThread(threading.Thread): - """A thread that uses a MongoClient.""" +class MongoTask(ConcurrentRunner): + """A thread/Task that uses a MongoClient.""" def __init__(self, client): - super(MongoThread, self).__init__() - self.daemon = True # Don't hang whole test if thread hangs. + super().__init__() + self.daemon = True # Don't hang whole test if task hangs. self.client = client self.db = self.client[DB] self.passed = False @@ -74,21 +76,21 @@ def run_mongo_thread(self): raise NotImplementedError -class InsertOneAndFind(MongoThread): +class InsertOneAndFind(MongoTask): def run_mongo_thread(self): for _ in range(N): rand = random.randint(0, N) - _id = self.db.sf.insert_one({"x": rand}).inserted_id - assert rand == self.db.sf.find_one(_id)["x"] + _id = (self.db.sf.insert_one({"x": rand})).inserted_id + assert rand == (self.db.sf.find_one(_id))["x"] -class Unique(MongoThread): +class Unique(MongoTask): def run_mongo_thread(self): for _ in range(N): self.db.unique.insert_one({}) # no error -class NonUnique(MongoThread): +class NonUnique(MongoTask): def run_mongo_thread(self): for _ in range(N): try: @@ -99,7 +101,7 @@ def run_mongo_thread(self): raise AssertionError("Should have raised DuplicateKeyError") -class SocketGetter(MongoThread): +class SocketGetter(MongoTask): """Utility for TestPooling. Checks out a socket and holds it forever. Used in @@ -107,7 +109,7 @@ class SocketGetter(MongoThread): """ def __init__(self, client, pool): - super(SocketGetter, self).__init__(client) + super().__init__(client) self.state = "init" self.pool = pool self.sock = None @@ -116,51 +118,53 @@ def run_mongo_thread(self): self.state = "get_socket" # Call 'pin_cursor' so we can hold the socket. - with self.pool.get_socket() as sock: + with self.pool.checkout() as sock: sock.pin_cursor() self.sock = sock - self.state = "sock" + self.state = "connection" - def __del__(self): + def release_conn(self): if self.sock: - self.sock.close_socket(None) + self.sock.unpin() + self.sock = None + return True + return False def run_cases(client, cases): - threads = [] + tasks = [] n_runs = 5 for case in cases: - for i in range(n_runs): + for _i in range(n_runs): t = case(client) t.start() - threads.append(t) + tasks.append(t) - for t in threads: + for t in tasks: t.join() - for t in threads: + for t in tasks: assert t.passed, "%s.run() threw an exception" % repr(t) class _TestPoolingBase(IntegrationTest): """Base class for all connection-pool tests.""" + @client_context.require_connection def setUp(self): - super(_TestPoolingBase, self).setUp() - self.c = rs_or_single_client() + super().setUp() + self.c = self.rs_or_single_client() db = self.c[DB] db.unique.drop() db.test.drop() db.unique.insert_one({"_id": "jesse"}) db.test.insert_many([{} for _ in range(10)]) - def tearDown(self): - self.c.close() - super(_TestPoolingBase, self).tearDown() - - def create_pool(self, pair=(client_context.host, client_context.port), *args, **kwargs): + def create_pool(self, pair=None, *args, **kwargs): + if pair is None: + pair = (client_context.host, client_context.port) # Start the pool with the correct ssl options. pool_options = client_context.client._topology_settings.pool_options kwargs["ssl_context"] = pool_options._ssl_context @@ -188,36 +192,36 @@ def test_pool_reuses_open_socket(self): # Test Pool's _check_closed() method doesn't close a healthy socket. cx_pool = self.create_pool(max_pool_size=10) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket() as sock_info: + with cx_pool.checkout() as conn: pass - with cx_pool.get_socket() as new_sock_info: - self.assertEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) def test_get_socket_and_exception(self): # get_socket() returns socket after a non-network error. cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) with self.assertRaises(ZeroDivisionError): - with cx_pool.get_socket() as sock_info: + with cx_pool.checkout() as conn: 1 / 0 # Socket was returned, not closed. - with cx_pool.get_socket() as new_sock_info: - self.assertEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) def test_pool_removes_closed_socket(self): # Test that Pool removes explicitly closed socket. cx_pool = self.create_pool() - with cx_pool.get_socket() as sock_info: - # Use SocketInfo's API to close the socket. - sock_info.close_socket(None) + with cx_pool.checkout() as conn: + # Use Connection's API to close the socket. + conn.close_conn(None) - self.assertEqual(0, len(cx_pool.sockets)) + self.assertEqual(0, len(cx_pool.conns)) def test_pool_removes_dead_socket(self): # Test that Pool removes dead socket and the socket doesn't return @@ -225,20 +229,20 @@ def test_pool_removes_dead_socket(self): cx_pool = self.create_pool(max_pool_size=1, wait_queue_timeout=1) cx_pool._check_interval_seconds = 0 # Always check. - with cx_pool.get_socket() as sock_info: - # Simulate a closed socket without telling the SocketInfo it's + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's # closed. - sock_info.sock.close() - self.assertTrue(sock_info.socket_closed()) + conn.conn.close() + self.assertTrue(conn.conn_closed()) - with cx_pool.get_socket() as new_sock_info: - self.assertEqual(0, len(cx_pool.sockets)) - self.assertNotEqual(sock_info, new_sock_info) + with cx_pool.checkout() as new_connection: + self.assertEqual(0, len(cx_pool.conns)) + self.assertNotEqual(conn, new_connection) - self.assertEqual(1, len(cx_pool.sockets)) + self.assertEqual(1, len(cx_pool.conns)) # Semaphore was released. - with cx_pool.get_socket(): + with cx_pool.checkout(): pass def test_socket_closed(self): @@ -282,13 +286,13 @@ def test_socket_checker(self): def test_return_socket_after_reset(self): pool = self.create_pool() - with pool.get_socket() as sock: + with pool.checkout() as sock: self.assertEqual(pool.active_sockets, 1) self.assertEqual(pool.operation_count, 1) pool.reset() self.assertTrue(sock.closed) - self.assertEqual(0, len(pool.sockets)) + self.assertEqual(0, len(pool.conns)) self.assertEqual(pool.active_sockets, 0) self.assertEqual(pool.operation_count, 0) @@ -299,20 +303,20 @@ def test_pool_check(self): cx_pool._check_interval_seconds = 0 # Always check. self.addCleanup(cx_pool.close) - with cx_pool.get_socket() as sock_info: - # Simulate a closed socket without telling the SocketInfo it's + with cx_pool.checkout() as conn: + # Simulate a closed socket without telling the Connection it's # closed. - sock_info.sock.close() + conn.conn.close() # Swap pool's address with a bad one. address, cx_pool.address = cx_pool.address, ("foo.com", 1234) with self.assertRaises(AutoReconnect): - with cx_pool.get_socket(): + with cx_pool.checkout(): pass # Back to normal, semaphore was correctly released. cx_pool.address = address - with cx_pool.get_socket(): + with cx_pool.checkout(): pass def test_wait_queue_timeout(self): @@ -320,16 +324,17 @@ def test_wait_queue_timeout(self): pool = self.create_pool(max_pool_size=1, wait_queue_timeout=wait_queue_timeout) self.addCleanup(pool.close) - with pool.get_socket(): + with pool.checkout(): start = time.time() with self.assertRaises(ConnectionFailure): - with pool.get_socket(): + with pool.checkout(): pass duration = time.time() - start - self.assertTrue( - abs(wait_queue_timeout - duration) < 1, - "Waited %.2f seconds for a socket, expected %f" % (duration, wait_queue_timeout), + self.assertLess( + abs(wait_queue_timeout - duration), + 1, + f"Waited {duration:.2f} seconds for a socket, expected {wait_queue_timeout:f}", ) def test_no_wait_queue_timeout(self): @@ -338,7 +343,7 @@ def test_no_wait_queue_timeout(self): self.addCleanup(pool.close) # Reach max_size. - with pool.get_socket() as s1: + with pool.checkout() as s1: t = SocketGetter(self.c, pool) t.start() while t.state != "get_socket": @@ -347,11 +352,15 @@ def test_no_wait_queue_timeout(self): time.sleep(1) self.assertEqual(t.state, "get_socket") - while t.state != "sock": + while t.state != "connection": time.sleep(0.1) - self.assertEqual(t.state, "sock") + self.assertEqual(t.state, "connection") self.assertEqual(t.sock, s1) + # Cleanup + t.release_conn() + t.join() + pool.close() def test_checkout_more_than_max_pool_size(self): pool = self.create_pool(max_pool_size=2) @@ -359,25 +368,34 @@ def test_checkout_more_than_max_pool_size(self): socks = [] for _ in range(2): # Call 'pin_cursor' so we can hold the socket. - with pool.get_socket() as sock: + with pool.checkout() as sock: sock.pin_cursor() socks.append(sock) - threads = [] - for _ in range(30): + tasks = [] + for _ in range(10): t = SocketGetter(self.c, pool) t.start() - threads.append(t) + tasks.append(t) time.sleep(1) - for t in threads: + for t in tasks: self.assertEqual(t.state, "get_socket") - + # Cleanup for socket_info in socks: - socket_info.close_socket(None) + socket_info.unpin() + while tasks: + to_remove = [] + for t in tasks: + if t.release_conn(): + to_remove.append(t) + t.join() + for t in to_remove: + tasks.remove(t) + time.sleep(0.05) + pool.close() def test_maxConnecting(self): - client = rs_or_single_client() - self.addCleanup(client.close) + client = self.rs_or_single_client() self.client.test.test.insert_one({}) self.addCleanup(self.client.test.test.delete_many, {}) pool = get_pool(client) @@ -387,21 +405,21 @@ def test_maxConnecting(self): def find_one(): docs.append(client.test.test.find_one({})) - threads = [threading.Thread(target=find_one) for _ in range(50)] - for thread in threads: - thread.start() - for thread in threads: - thread.join(10) + tasks = [ConcurrentRunner(target=find_one) for _ in range(50)] + for task in tasks: + task.start() + for task in tasks: + task.join(10) self.assertEqual(len(docs), 50) - self.assertLessEqual(len(pool.sockets), 50) + self.assertLessEqual(len(pool.conns), 50) # TLS and auth make connection establishment more expensive than # the query which leads to more threads hitting maxConnecting. # The end result is fewer total connections and better latency. if client_context.tls and client_context.auth_enabled: - self.assertLessEqual(len(pool.sockets), 30) + self.assertLessEqual(len(pool.conns), 30) else: - self.assertLessEqual(len(pool.sockets), 50) + self.assertLessEqual(len(pool.conns), 50) # MongoDB 4.4.1 with auth + ssl: # maxConnecting = 2: 6 connections in ~0.231+ seconds # maxConnecting = unbounded: 50 connections in ~0.642+ seconds @@ -409,50 +427,130 @@ def find_one(): # MongoDB 4.4.1 with no-auth no-ssl Python 3.8: # maxConnecting = 2: 15-22 connections in ~0.108+ seconds # maxConnecting = unbounded: 30+ connections in ~0.140+ seconds - print(len(pool.sockets)) + print(len(pool.conns)) + + @client_context.require_failCommand_appName + def test_csot_timeout_message(self): + client = self.rs_or_single_client(appName="connectionTimeoutApp") + # Mock an operation failing due to pymongo.timeout(). + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + with timeout(0.5): + client.db.t.find_one({"$where": delay(2)}) + + self.assertIn("(configured timeouts: timeoutMS: 500.0ms", str(error.exception)) + + @client_context.require_failCommand_appName + def test_socket_timeout_message(self): + client = self.rs_or_single_client(socketTimeoutMS=500, appName="connectionTimeoutApp") + # Mock an operation failing due to socketTimeoutMS. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": ["find"], + "appName": "connectionTimeoutApp", + }, + } + + client.db.t.insert_one({"x": 1}) + + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client.db.t.find_one({"$where": delay(2)}) + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 20000.0ms)", + str(error.exception), + ) + + @client_context.require_failCommand_appName + def test_connection_timeout_message(self): + # Mock a connection creation failing due to timeout. + mock_connection_timeout = { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "blockConnection": True, + "blockTimeMS": 1000, + "failCommands": [HelloCompat.LEGACY_CMD, "hello"], + "appName": "connectionTimeoutApp", + }, + } + + client = self.rs_or_single_client( + connectTimeoutMS=500, + socketTimeoutMS=500, + appName="connectionTimeoutApp", + heartbeatFrequencyMS=1000000, + ) + client.admin.command("ping") + pool = get_pool(client) + pool.reset_without_pause() + with self.fail_point(mock_connection_timeout): + with self.assertRaises(Exception) as error: + client.admin.command("ping") + + self.assertIn( + "(configured timeouts: socketTimeoutMS: 500.0ms, connectTimeoutMS: 500.0ms)", + str(error.exception), + ) class TestPoolMaxSize(_TestPoolingBase): def test_max_pool_size(self): max_pool_size = 4 - c = rs_or_single_client(maxPoolSize=max_pool_size) - self.addCleanup(c.close) + c = self.rs_or_single_client(maxPoolSize=max_pool_size) collection = c[DB].test # Need one document. collection.drop() collection.insert_one({}) - # nthreads had better be much larger than max_pool_size to ensure that - # max_pool_size sockets are actually required at some point in this + # ntasks had better be much larger than max_pool_size to ensure that + # max_pool_size connections are actually required at some point in this # test's execution. cx_pool = get_pool(c) - nthreads = 10 - threads = [] - lock = threading.Lock() + ntasks = 10 + tasks = [] + lock = _create_lock() self.n_passed = 0 def f(): for _ in range(5): collection.find_one({"$where": delay(0.1)}) - assert len(cx_pool.sockets) <= max_pool_size + assert len(cx_pool.conns) <= max_pool_size with lock: self.n_passed += 1 - for i in range(nthreads): - t = threading.Thread(target=f) - threads.append(t) + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) t.start() - joinall(threads) - self.assertEqual(nthreads, self.n_passed) - self.assertTrue(len(cx_pool.sockets) > 1) + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(0, cx_pool.requests) def test_max_pool_size_none(self): - c = rs_or_single_client(maxPoolSize=None) - self.addCleanup(c.close) + c = self.rs_or_single_client(maxPoolSize=None) collection = c[DB].test # Need one document. @@ -460,9 +558,9 @@ def test_max_pool_size_none(self): collection.insert_one({}) cx_pool = get_pool(c) - nthreads = 10 - threads = [] - lock = threading.Lock() + ntasks = 10 + tasks = [] + lock = _create_lock() self.n_passed = 0 def f(): @@ -472,19 +570,18 @@ def f(): with lock: self.n_passed += 1 - for i in range(nthreads): - t = threading.Thread(target=f) - threads.append(t) + for _i in range(ntasks): + t = ConcurrentRunner(target=f) + tasks.append(t) t.start() - joinall(threads) - self.assertEqual(nthreads, self.n_passed) - self.assertTrue(len(cx_pool.sockets) > 1) + joinall(tasks) + self.assertEqual(ntasks, self.n_passed) + self.assertGreater(len(cx_pool.conns), 1) self.assertEqual(cx_pool.max_pool_size, float("inf")) def test_max_pool_size_zero(self): - c = rs_or_single_client(maxPoolSize=0) - self.addCleanup(c.close) + c = self.rs_or_single_client(maxPoolSize=0) pool = get_pool(c) self.assertEqual(pool.max_pool_size, float("inf")) @@ -500,9 +597,9 @@ def test_max_pool_size_with_connection_failure(self): # First call to get_socket fails; if pool doesn't release its semaphore # then the second call raises "ConnectionFailure: Timed out waiting for # socket from pool" instead of AutoReconnect. - for i in range(2): + for _i in range(2): with self.assertRaises(AutoReconnect) as context: - with test_pool.get_socket(): + with test_pool.checkout(): pass # Testing for AutoReconnect instead of ConnectionFailure, above, diff --git a/test/test_pymongo.py b/test/test_pymongo.py index 7ec32e16a6..fd8ece6c03 100644 --- a/test/test_pymongo.py +++ b/test/test_pymongo.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the pymongo module itself.""" +from __future__ import annotations import sys @@ -21,12 +22,21 @@ from test import unittest import pymongo +from pymongo._version import get_version_tuple class TestPyMongo(unittest.TestCase): def test_mongo_client_alias(self): # Testing that pymongo module imports mongo_client.MongoClient - self.assertEqual(pymongo.MongoClient, pymongo.mongo_client.MongoClient) + self.assertEqual(pymongo.MongoClient, pymongo.synchronous.mongo_client.MongoClient) + + def test_get_version_tuple(self): + self.assertEqual(get_version_tuple("4.8.0.dev1"), (4, 8, 0, ".dev1")) + self.assertEqual(get_version_tuple("4.8.1"), (4, 8, 1)) + self.assertEqual(get_version_tuple("5.0.0rc1"), (5, 0, 0, "rc1")) + self.assertEqual(get_version_tuple("5.0"), (5, 0)) + with self.assertRaises(ValueError): + get_version_tuple("5") if __name__ == "__main__": diff --git a/test/test_raw_bson.py b/test/test_raw_bson.py index d82e5104c0..4d9a3ceb05 100644 --- a/test/test_raw_bson.py +++ b/test/test_raw_bson.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import datetime import sys @@ -18,19 +19,19 @@ sys.path[0:0] = [""] -from test import client_context, unittest -from test.test_client import IntegrationTest +from test import IntegrationTest, client_context, unittest -from bson import decode, encode +from bson import Code, DBRef, decode, encode from bson.binary import JAVA_LEGACY, Binary, UuidRepresentation from bson.codec_options import CodecOptions from bson.errors import InvalidBSON from bson.raw_bson import DEFAULT_RAW_BSON_OPTIONS, RawBSONDocument from bson.son import SON +_IS_SYNC = True -class TestRawBSONDocument(IntegrationTest): +class TestRawBSONDocument(IntegrationTest): # {'_id': ObjectId('556df68b6e32ab21a95e0785'), # 'name': 'Sherlock', # 'addresses': [{'street': 'Baker Street'}]} @@ -199,6 +200,20 @@ def test_preserve_key_ordering(self): for rkey, elt in zip(rawdoc, keyvaluepairs): self.assertEqual(rkey, elt[0]) + def test_contains_code_with_scope(self): + doc = RawBSONDocument(encode({"value": Code("x=1", scope={})})) + + self.assertEqual(decode(encode(doc)), {"value": Code("x=1", {})}) + self.assertEqual(doc["value"].scope, RawBSONDocument(encode({}))) + + def test_contains_dbref(self): + doc = RawBSONDocument(encode({"value": DBRef("test", "id")})) + raw = {"$ref": "test", "$id": "id"} + raw_encoded = encode(decode(encode(raw))) + + self.assertEqual(decode(encode(doc)), {"value": DBRef("test", "id")}) + self.assertEqual(doc["value"].raw, raw_encoded) + if __name__ == "__main__": unittest.main() diff --git a/test/test_read_concern.py b/test/test_read_concern.py index d5df682fba..62b2491475 100644 --- a/test/test_read_concern.py +++ b/test/test_read_concern.py @@ -13,36 +13,36 @@ # limitations under the License. """Test the read_concern module.""" +from __future__ import annotations + +import sys +import unittest + +sys.path[0:0] = [""] from test import IntegrationTest, client_context -from test.utils import OvertCommandListener, rs_or_single_client, single_client +from test.utils_shared import OvertCommandListener from bson.son import SON from pymongo.errors import OperationFailure from pymongo.read_concern import ReadConcern +_IS_SYNC = True + class TestReadConcern(IntegrationTest): listener: OvertCommandListener - @classmethod @client_context.require_connection - def setUpClass(cls): - super(TestReadConcern, cls).setUpClass() - cls.listener = OvertCommandListener() - cls.client = single_client(event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test + def setUp(self): + super().setUp() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) + self.db = self.client.pymongo_test client_context.client.pymongo_test.create_collection("coll") - @classmethod - def tearDownClass(cls): - cls.client.close() - client_context.client.pymongo_test.drop_collection("coll") - super(TestReadConcern, cls).tearDownClass() - def tearDown(self): - self.listener.results.clear() - super(TestReadConcern, self).tearDown() + client_context.client.pymongo_test.drop_collection("coll") def test_read_concern(self): rc = ReadConcern() @@ -60,8 +60,8 @@ def test_read_concern(self): self.assertRaises(TypeError, ReadConcern, 42) def test_read_concern_uri(self): - uri = "mongodb://%s/?readConcernLevel=majority" % (client_context.pair,) - client = rs_or_single_client(uri, connect=False) + uri = f"mongodb://{client_context.pair}/?readConcernLevel=majority" + client = self.rs_or_single_client(uri, connect=False) self.assertEqual(ReadConcern("majority"), client.read_concern) def test_invalid_read_concern(self): @@ -73,14 +73,14 @@ def test_invalid_read_concern(self): def test_find_command(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.find({"field": "value"})) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + coll.find({"field": "value"}).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.find({"field": "value"})) + coll.find({"field": "value"}).to_list() self.assertEqualCommand( SON( [ @@ -89,30 +89,32 @@ def test_find_command(self): ("readConcern", {"level": "local"}), ] ), - self.listener.results["started"][0].command, + self.listener.started_events[0].command, ) def test_command_cursor(self): # readConcern not sent in command if not specified. coll = self.db.coll - tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertNotIn("readConcern", self.listener.started_events[0].command) - self.listener.results.clear() + self.listener.reset() # Explicitly set readConcern to 'local'. coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.aggregate([{"$match": {"field": "value"}}])) - self.assertEqual( - {"level": "local"}, self.listener.results["started"][0].command["readConcern"] - ) + (coll.aggregate([{"$match": {"field": "value"}}])).to_list() + self.assertEqual({"level": "local"}, self.listener.started_events[0].command["readConcern"]) def test_aggregate_out(self): coll = self.db.get_collection("coll", read_concern=ReadConcern("local")) - tuple(coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])) + (coll.aggregate([{"$match": {"field": "value"}}, {"$out": "output_collection"}])).to_list() # Aggregate with $out supports readConcern MongoDB 4.2 onwards. if client_context.version >= (4, 1): - self.assertIn("readConcern", self.listener.results["started"][0].command) + self.assertIn("readConcern", self.listener.started_events[0].command) else: - self.assertNotIn("readConcern", self.listener.results["started"][0].command) + self.assertNotIn("readConcern", self.listener.started_events[0].command) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_read_preferences.py b/test/test_read_preferences.py index ae2fa8bcee..084abdf3e1 100644 --- a/test/test_read_preferences.py +++ b/test/test_read_preferences.py @@ -13,22 +13,30 @@ # limitations under the License. """Test the replica_set_connection module.""" +from __future__ import annotations import contextlib import copy import pickle import random import sys +from typing import Any + +from pymongo.operations import _Op sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import ( - OvertCommandListener, +from test import ( + IntegrationTest, + SkipTest, + client_context, connected, + unittest, +) +from test.utils_shared import ( + OvertCommandListener, + _ignore_deprecations, one, - rs_client, - single_client, wait_until, ) from test.version import Version @@ -36,7 +44,6 @@ from bson.son import SON from pymongo.errors import ConfigurationError, OperationFailure from pymongo.message import _maybe_add_read_preference -from pymongo.mongo_client import MongoClient from pymongo.read_preferences import ( MovingAverage, Nearest, @@ -49,15 +56,21 @@ from pymongo.server_description import ServerDescription from pymongo.server_selectors import Selection, readable_server_selector from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern +_IS_SYNC = True + class TestSelections(IntegrationTest): @client_context.require_connection def test_bool(self): - client = single_client() + client = self.single_client() - wait_until(lambda: client.address, "discover primary") + def predicate(): + return client.address + + wait_until(predicate, "discover primary") selection = Selection.from_topology_description(client._topology.description) self.assertTrue(selection) @@ -87,13 +100,9 @@ def test_deepcopy(self): class TestReadPreferencesBase(IntegrationTest): - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super(TestReadPreferencesBase, cls).setUpClass() - def setUp(self): - super(TestReadPreferencesBase, self).setUp() + super().setUp() # Insert some data so we can use cursors in read_from_which_host self.client.pymongo_test.test.drop() self.client.get_database( @@ -119,24 +128,27 @@ def read_from_which_kind(self, client): return "secondary" else: self.fail( - "Cursor used address %s, expected either primary " - "%s or secondaries %s" % (address, client.primary, client.secondaries) + f"Cursor used address {address}, expected either primary " + f"{client.primary} or secondaries {client.secondaries}" ) def assertReadsFrom(self, expected, **kwargs): - c = rs_client(**kwargs) - wait_until(lambda: len(c.nodes - c.arbiters) == client_context.w, "discovered all nodes") + c = self.rs_client(**kwargs) + + def predicate(): + return len(c.nodes - c.arbiters) == client_context.w + + wait_until(predicate, "discovered all nodes") used = self.read_from_which_kind(c) - self.assertEqual(expected, used, "Cursor used %s, expected %s" % (used, expected)) + self.assertEqual(expected, used, f"Cursor used {used}, expected {expected}") class TestSingleSecondaryOk(TestReadPreferencesBase): def test_reads_from_secondary(self): - host, port = next(iter(self.client.secondaries)) # Direct connection to a secondary. - client = single_client(host, port) + client = self.single_client(host, port) self.assertFalse(client.is_primary) # Regardless of read preference, we should be able to do @@ -149,7 +161,7 @@ def test_reads_from_secondary(self): # Test find and find_one. self.assertIsNotNone(coll.find_one()) - self.assertEqual(10, len(list(coll.find()))) + self.assertEqual(10, len(coll.find().to_list())) # Test some database helpers. self.assertIsNotNone(db.list_collection_names()) @@ -172,19 +184,23 @@ def test_mode_validation(self): ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST, ): - self.assertEqual(mode, rs_client(read_preference=mode).read_preference) + self.assertEqual(mode, (self.rs_client(read_preference=mode)).read_preference) - self.assertRaises(TypeError, rs_client, read_preference="foo") + with self.assertRaises(TypeError): + self.rs_client(read_preference="foo") def test_tag_sets_validation(self): S = Secondary(tag_sets=[{}]) - self.assertEqual([{}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{}], (self.rs_client(read_preference=S)).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}]) - self.assertEqual([{"k": "v"}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual([{"k": "v"}], (self.rs_client(read_preference=S)).read_preference.tag_sets) S = Secondary(tag_sets=[{"k": "v"}, {}]) - self.assertEqual([{"k": "v"}, {}], rs_client(read_preference=S).read_preference.tag_sets) + self.assertEqual( + [{"k": "v"}, {}], + (self.rs_client(read_preference=S)).read_preference.tag_sets, + ) self.assertRaises(ValueError, Secondary, tag_sets=[]) @@ -197,20 +213,27 @@ def test_tag_sets_validation(self): def test_threshold_validation(self): self.assertEqual( - 17, rs_client(localThresholdMS=17, connect=False).options.local_threshold_ms + 17, + (self.rs_client(localThresholdMS=17, connect=False)).options.local_threshold_ms, ) self.assertEqual( - 42, rs_client(localThresholdMS=42, connect=False).options.local_threshold_ms + 42, + (self.rs_client(localThresholdMS=42, connect=False)).options.local_threshold_ms, ) self.assertEqual( - 666, rs_client(localThresholdMS=666, connect=False).options.local_threshold_ms + 666, + (self.rs_client(localThresholdMS=666, connect=False)).options.local_threshold_ms, ) - self.assertEqual(0, rs_client(localThresholdMS=0, connect=False).options.local_threshold_ms) + self.assertEqual( + 0, + (self.rs_client(localThresholdMS=0, connect=False)).options.local_threshold_ms, + ) - self.assertRaises(ValueError, rs_client, localthresholdms=-1) + with self.assertRaises(ValueError): + self.rs_client(localthresholdms=-1) def test_zero_latency(self): ping_times: set = set() @@ -220,7 +243,7 @@ def test_zero_latency(self): for ping_time, host in zip(ping_times, self.client.nodes): ServerDescription._host_to_round_trip_time[host] = ping_time try: - client = connected(rs_client(readPreference="nearest", localThresholdMS=0)) + client = connected(self.rs_client(readPreference="nearest", localThresholdMS=0)) wait_until(lambda: client.nodes == self.client.nodes, "discovered all nodes") host = self.read_from_which_host(client) for _ in range(5): @@ -233,7 +256,8 @@ def test_primary(self): def test_primary_with_tags(self): # Tags not allowed with PRIMARY - self.assertRaises(ConfigurationError, rs_client, tag_sets=[{"dc": "ny"}]) + with self.assertRaises(ConfigurationError): + self.rs_client(tag_sets=[{"dc": "ny"}]) def test_primary_preferred(self): self.assertReadsFrom("primary", read_preference=ReadPreference.PRIMARY_PREFERRED) @@ -247,7 +271,9 @@ def test_secondary_preferred(self): def test_nearest(self): # With high localThresholdMS, expect to read from any # member - c = rs_client(read_preference=ReadPreference.NEAREST, localThresholdMS=10000) # 10 seconds + c = self.rs_client( + read_preference=ReadPreference.NEAREST, localThresholdMS=10000 + ) # 10 seconds data_members = {self.client.primary} | self.client.secondaries @@ -264,14 +290,14 @@ def test_nearest(self): not_used = data_members.difference(used) latencies = ", ".join( - "%s: %dms" % (server.description.address, server.description.round_trip_time) - for server in c._get_topology().select_servers(readable_server_selector) + "%s: %sms" % (server.description.address, server.description.round_trip_time) + for server in (c._get_topology()).select_servers(readable_server_selector, _Op.TEST) ) self.assertFalse( not_used, "Expected to use primary and all secondaries for mode NEAREST," - " but didn't use %s\nlatencies: %s" % (not_used, latencies), + f" but didn't use {not_used}\nlatencies: {latencies}", ) @@ -280,24 +306,21 @@ def __init__(self, *args, **kwargs): self.has_read_from = set() client_options = client_context.client_options client_options.update(kwargs) - super(ReadPrefTester, self).__init__(*args, **client_options) + super().__init__(*args, **client_options) - @contextlib.contextmanager - def _socket_for_reads(self, read_preference, session): - context = super(ReadPrefTester, self)._socket_for_reads(read_preference, session) - with context as (sock_info, read_preference): - self.record_a_read(sock_info.address) - yield sock_info, read_preference + def _conn_for_reads(self, read_preference, session, operation): + context = super()._conn_for_reads(read_preference, session, operation) + return context @contextlib.contextmanager - def _socket_from_server(self, read_preference, server, session): - context = super(ReadPrefTester, self)._socket_from_server(read_preference, server, session) - with context as (sock_info, read_preference): - self.record_a_read(sock_info.address) - yield sock_info, read_preference + def _conn_from_server(self, read_preference, server, session): + context = super()._conn_from_server(read_preference, server, session) + with context as (conn, read_preference): + self.record_a_read(conn.address) + yield conn, read_preference def record_a_read(self, address): - server = self._get_topology().select_server_by_address(address, 0) + server = (self._get_topology()).select_server_by_address(address, _Op.TEST, 0) self.has_read_from.add(server) @@ -314,26 +337,23 @@ class TestCommandAndReadPreference(IntegrationTest): c: ReadPrefTester client_version: Version - @classmethod @client_context.require_secondaries_count(1) - def setUpClass(cls): - super(TestCommandAndReadPreference, cls).setUpClass() - cls.c = ReadPrefTester( - client_context.pair, + def setUp(self): + super().setUp() + self.c = ReadPrefTester( # Ignore round trip times, to test ReadPreference modes only. localThresholdMS=1000 * 1000, ) - cls.client_version = Version.from_client(cls.c) + self.client_version = Version.from_client(self.c) # mapReduce fails if the collection does not exist. - coll = cls.c.pymongo_test.get_collection( + coll = self.c.pymongo_test.get_collection( "test", write_concern=WriteConcern(w=client_context.w) ) coll.insert_one({}) - @classmethod - def tearDownClass(cls): - cls.c.drop_database("pymongo_test") - cls.c.close() + def tearDown(self): + self.c.drop_database("pymongo_test") + self.c.close() def executed_on_which_server(self, client, fn, *args, **kwargs): """Execute fn(*args, **kwargs) and return the Server instance used.""" @@ -360,7 +380,7 @@ def _test_fn(self, server_type, fn): break assert self.c.primary is not None - unused = self.c.secondaries.union(set([self.c.primary])).difference(used) + unused = (self.c.secondaries).union({self.c.primary}).difference(used) if unused: self.fail("Some members not used for NEAREST: %s" % (unused)) else: @@ -373,7 +393,10 @@ def _test_primary_helper(self, func): def _test_coll_helper(self, secondary_ok, coll, meth, *args, **kwargs): for mode, server_type in _PREF_MAP: new_coll = coll.with_options(read_preference=mode()) - func = lambda: getattr(new_coll, meth)(*args, **kwargs) + + def func(): + return getattr(new_coll, meth)(*args, **kwargs) + if secondary_ok: self._test_fn(server_type, func) else: @@ -383,17 +406,21 @@ def test_command(self): # Test that the generic command helper obeys the read preference # passed to it. for mode, server_type in _PREF_MAP: - func = lambda: self.c.pymongo_test.command("dbStats", read_preference=mode()) + + def func(): + return self.c.pymongo_test.command("dbStats", read_preference=mode()) + self._test_fn(server_type, func) def test_create_collection(self): # create_collection runs listCollections on the primary to check if # the collection already exists. - self._test_primary_helper( - lambda: self.c.pymongo_test.create_collection( + def func(): + return self.c.pymongo_test.create_collection( "some_collection%s" % random.randint(0, sys.maxsize) ) - ) + + self._test_primary_helper(func) def test_count_documents(self): self._test_coll_helper(True, self.c.pymongo_test.test, "count_documents", {}) @@ -434,7 +461,6 @@ def test_moving_average(self): class TestMongosAndReadPreference(IntegrationTest): def test_read_preference_document(self): - pref = Primary() self.assertEqual(pref.document, {"mode": "primary"}) @@ -496,33 +522,44 @@ def test_read_preference_document_hedge(self): for mode, cls in cases.items(): with self.assertRaises(TypeError): cls(hedge=[]) # type: ignore - - pref = cls(hedge={}) - self.assertEqual(pref.document, {"mode": mode}) - out = _maybe_add_read_preference({}, pref) - if cls == SecondaryPreferred: - # SecondaryPreferred without hedge doesn't add $readPreference. - self.assertEqual(out, {}) - else: + with _ignore_deprecations(): + pref = cls(hedge={}) + self.assertEqual(pref.document, {"mode": mode}) + out = _maybe_add_read_preference({}, pref) + if cls == SecondaryPreferred: + # SecondaryPreferred without hedge doesn't add $readPreference. + self.assertEqual(out, {}) + else: + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + + hedge: dict[str, Any] = {"enabled": True} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": True} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + hedge = {"enabled": False, "extra": "option"} + pref = cls(hedge=hedge) + self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) + out = _maybe_add_read_preference({}, pref) + self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) - hedge = {"enabled": False, "extra": "option"} - pref = cls(hedge=hedge) - self.assertEqual(pref.document, {"mode": mode, "hedge": hedge}) - out = _maybe_add_read_preference({}, pref) - self.assertEqual(out, SON([("$query", {}), ("$readPreference", pref.document)])) + def test_read_preference_hedge_deprecated(self): + cases = { + "primaryPreferred": PrimaryPreferred, + "secondary": Secondary, + "secondaryPreferred": SecondaryPreferred, + "nearest": Nearest, + } + for _, cls in cases.items(): + with self.assertRaises(DeprecationWarning): + cls(hedge={"enabled": True}) def test_send_hedge(self): cases = { @@ -533,15 +570,15 @@ def test_send_hedge(self): if client_context.supports_secondary_read_pref: cases["secondary"] = Secondary listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) client.admin.command("ping") - for mode, cls in cases.items(): - pref = cls(hedge={"enabled": True}) + for _mode, cls in cases.items(): + with _ignore_deprecations(): + pref = cls(hedge={"enabled": True}) coll = client.test.get_collection("test", read_preference=pref) listener.reset() coll.find_one() - started = listener.results["started"] + started = listener.started_events self.assertEqual(len(started), 1, started) cmd = started[0].command if client_context.is_rs or client_context.is_mongos: @@ -551,7 +588,6 @@ def test_send_hedge(self): self.assertNotIn("$readPreference", cmd) def test_maybe_add_read_preference(self): - # Primary doesn't add $readPreference out = _maybe_add_read_preference({}, Primary()) self.assertEqual(out, {}) @@ -635,10 +671,10 @@ def test_mongos(self): # tell what shard member a query ran on. for pref in (Primary(), PrimaryPreferred(), Secondary(), SecondaryPreferred(), Nearest()): qcoll = coll.with_options(read_preference=pref) - results = list(qcoll.find().sort([("_id", 1)])) + results = qcoll.find().sort([("_id", 1)]).to_list() self.assertEqual(first_id, results[0]["_id"]) self.assertEqual(last_id, results[-1]["_id"]) - results = list(qcoll.find().sort([("_id", -1)])) + results = qcoll.find().sort([("_id", -1)]).to_list() self.assertEqual(first_id, results[-1]["_id"]) self.assertEqual(last_id, results[0]["_id"]) @@ -661,14 +697,14 @@ def test_mongos_max_staleness(self): else: self.fail("mongos accepted invalid staleness") - coll = single_client( - readPreference="secondaryPreferred", maxStalenessSeconds=120 + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=120) ).pymongo_test.test # No error coll.find_one() - coll = single_client( - readPreference="secondaryPreferred", maxStalenessSeconds=10 + coll = ( + self.single_client(readPreference="secondaryPreferred", maxStalenessSeconds=10) ).pymongo_test.test try: coll.find_one() diff --git a/test/test_read_write_concern_spec.py b/test/test_read_write_concern_spec.py index 4dfc8f068c..4b816b7af9 100644 --- a/test/test_read_write_concern_spec.py +++ b/test/test_read_write_concern_spec.py @@ -13,23 +13,19 @@ # limitations under the License. """Run the read and write concern tests.""" +from __future__ import annotations import json import os import sys import warnings +from pathlib import Path sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( - EventListener, - TestCreator, - disable_replication, - enable_replication, - rs_or_single_client, -) -from test.utils_spec_runner import SpecRunner +from test.unified_format import generate_test_classes +from test.utils_shared import OvertCommandListener from pymongo import DESCENDING from pymongo.errors import ( @@ -39,20 +35,25 @@ WriteError, WTimeoutError, ) -from pymongo.mongo_client import MongoClient from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern +from pymongo.synchronous.mongo_client import MongoClient from pymongo.write_concern import WriteConcern -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "read_write_concern") +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "read_write_concern") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "read_write_concern") class TestReadWriteConcernSpec(IntegrationTest): def test_omit_default_read_write_concern(self): - listener = EventListener() + listener = OvertCommandListener() # Client with default readConcern and writeConcern - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -71,9 +72,12 @@ def insert_command_default_write_concern(): "insert", "collection", documents=[{}], write_concern=WriteConcern() ) + def aggregate_op(): + (collection.aggregate([])).to_list() + ops = [ - ("aggregate", lambda: list(collection.aggregate([]))), - ("find", lambda: list(collection.find())), + ("aggregate", aggregate_op), + ("find", lambda: collection.find().to_list()), ("insert_one", lambda: collection.insert_one({})), ("update_one", lambda: collection.update_one({}, {"$set": {"x": 1}})), ("update_many", lambda: collection.update_many({}, {"$set": {"x": 1}})), @@ -85,26 +89,28 @@ def insert_command_default_write_concern(): ] for name, f in ops: - listener.results.clear() + listener.reset() f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): + self.assertGreaterEqual(len(listener.started_events), 1) + for _i, event in enumerate(listener.started_events): self.assertNotIn( "readConcern", event.command, - "%s sent default readConcern with %s" % (name, event.command_name), + f"{name} sent default readConcern with {event.command_name}", ) self.assertNotIn( "writeConcern", event.command, - "%s sent default writeConcern with %s" % (name, event.command_name), + f"{name} sent default writeConcern with {event.command_name}", ) def assertWriteOpsRaise(self, write_concern, expected_exception): wc = write_concern.document # Set socket timeout to avoid indefinite stalls - client = rs_or_single_client(w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000) + client = self.rs_or_single_client( + w=wc["w"], wTimeoutMS=wc["wtimeout"], socketTimeoutMS=30000 + ) db = client.get_database("pymongo_test") coll = db.test @@ -167,11 +173,13 @@ def test_raise_write_concern_error(self): @client_context.require_test_commands def test_raise_wtimeout(self): self.addCleanup(client_context.client.drop_database, "pymongo_test") - self.addCleanup(enable_replication, client_context.client) + self.addCleanup(self.enable_replication, client_context.client) # Disable replication to guarantee a wtimeout error. - disable_replication(client_context.client) + self.disable_replication(client_context.client) self.assertWriteOpsRaise(WriteConcern(w=client_context.w, wtimeout=1), WTimeoutError) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 1 (included here instead of test_client_bulk_write.py) @client_context.require_failCommand_fail_point def test_error_includes_errInfo(self): expected_wce = { @@ -206,11 +214,12 @@ def test_error_includes_errInfo(self): } self.assertEqual(ctx.exception.details, expected_details) + # https://github.com/mongodb/specifications/tree/master/source/crud/tests + # Test 2 (included here instead of test_client_bulk_write.py) @client_context.require_version_min(4, 9) def test_write_error_details_exposes_errinfo(self): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener]) - self.addCleanup(client.close) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener]) db = client.errinfotest self.addCleanup(client.drop_database, "errinfotest") validator = {"x": {"$type": "string"}} @@ -221,7 +230,7 @@ def test_write_error_details_exposes_errinfo(self): self.assertIsNotNone(ctx.exception.details) assert ctx.exception.details is not None self.assertIsNotNone(ctx.exception.details.get("errInfo")) - for event in listener.results["succeeded"]: + for event in listener.succeeded_events: if event.command_name == "insert": self.assertEqual(event.reply["writeErrors"][0], ctx.exception.details) break @@ -280,7 +289,7 @@ def run_test(self): self.assertEqual(write_concern.acknowledged, test_case["isAcknowledged"]) self.assertEqual(write_concern.is_server_default, test_case["isServerDefault"]) if "readConcern" in test_case: - # Any string for 'level' is equaly valid + # Any string for 'level' is equally valid read_concern = ReadConcern(**test_case["readConcern"]) self.assertEqual(read_concern.document, test_case["readConcernDocument"]) self.assertEqual(not bool(read_concern.level), test_case["isServerDefault"]) @@ -289,7 +298,7 @@ def run_test(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): dirname = os.path.split(dirpath)[-1] if dirname == "operation": @@ -307,7 +316,7 @@ def create_tests(): fname = os.path.splitext(filename)[0] for test_case in test_cases: new_test = create_test(test_case) - test_name = "test_%s_%s_%s" % ( + test_name = "test_{}_{}_{}".format( dirname.replace("-", "_"), fname.replace("-", "_"), str(test_case["description"].lower().replace(" ", "_")), @@ -320,25 +329,15 @@ def create_tests(): create_tests() -class TestOperation(SpecRunner): - # Location of JSON test specifications. - TEST_PATH = os.path.join(_TEST_PATH, "operation") - - def get_outcome_coll_name(self, outcome, collection): - """Spec says outcome has an optional 'collection.name'.""" - return outcome["collection"].get("name", collection.name) - - -def create_operation_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_operation_test, TestOperation, TestOperation.TEST_PATH) -test_creator.create_tests() +# Generate unified tests. +# PyMongo does not support MapReduce. +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "operation"), + module=__name__, + expected_failures=["MapReduce .*"], + ) +) if __name__ == "__main__": diff --git a/test/test_replica_set_reconfig.py b/test/test_replica_set_reconfig.py index 898be99d4d..3371543f27 100644 --- a/test/test_replica_set_reconfig.py +++ b/test/test_replica_set_reconfig.py @@ -13,6 +13,7 @@ # limitations under the License. """Test clients and replica set configuration changes, using mocks.""" +from __future__ import annotations import sys @@ -20,7 +21,7 @@ from test import MockClientTest, client_context, client_knobs, unittest from test.pymongo_mocks import MockClient -from test.utils import wait_until +from test.utils_shared import wait_until from pymongo import ReadPreference from pymongo.errors import ConnectionFailure, ServerSelectionTimeoutError @@ -58,7 +59,8 @@ def test_client(self): with self.assertRaises(ServerSelectionTimeoutError): c.db.command("ping") - self.assertEqual(c.address, None) + with self.assertRaises(ServerSelectionTimeoutError): + _ = c.address # Client can still discover the primary node c.revive_host("a:1") @@ -83,7 +85,7 @@ def test_replica_set_client(self): c.mock_members.remove("c:3") c.mock_standalones.append("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update the list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update the list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -106,7 +108,7 @@ def test_replica_set_client(self): # C is removed. c.mock_hello_hosts.remove("c:3") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "update list of secondaries") + wait_until(lambda: {("b", 2)} == c.secondaries, "update list of secondaries") self.assertEqual(("a", 1), c.primary) @@ -148,7 +150,7 @@ def test_client(self): # MongoClient connects to primary by default. self.assertEqual(c.address, ("a", 1)) - self.assertEqual(set([("a", 1), ("b", 2)]), c.nodes) + self.assertEqual({("a", 1), ("b", 2)}, c.nodes) # C is added. c.mock_members.append("c:3") @@ -159,7 +161,7 @@ def test_client(self): self.assertEqual(c.address, ("a", 1)) wait_until( - lambda: set([("a", 1), ("b", 2), ("c", 3)]) == c.nodes, "reconnect to both secondaries" + lambda: {("a", 1), ("b", 2), ("c", 3)} == c.nodes, "reconnect to both secondaries" ) def test_replica_set_client(self): @@ -168,14 +170,14 @@ def test_replica_set_client(self): ) self.addCleanup(c.close) - wait_until(lambda: ("a", 1) == c.primary, "discover the primary") - wait_until(lambda: set([("b", 2)]) == c.secondaries, "discover the secondary") + wait_until(lambda: c.primary == ("a", 1), "discover the primary") + wait_until(lambda: {("b", 2)} == c.secondaries, "discover the secondary") # C is added. c.mock_members.append("c:3") c.mock_hello_hosts.append("c:3") - wait_until(lambda: set([("b", 2), ("c", 3)]) == c.secondaries, "discover the new secondary") + wait_until(lambda: {("b", 2), ("c", 3)} == c.secondaries, "discover the new secondary") self.assertEqual(("a", 1), c.primary) diff --git a/test/test_results.py b/test/test_results.py new file mode 100644 index 0000000000..deb09d7ed4 --- /dev/null +++ b/test/test_results.py @@ -0,0 +1,160 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test results module.""" +from __future__ import annotations + +import sys + +sys.path[0:0] = [""] + +from test import unittest + +from pymongo.errors import InvalidOperation +from pymongo.results import ( + BulkWriteResult, + DeleteResult, + InsertManyResult, + InsertOneResult, + UpdateResult, +) + + +class TestResults(unittest.TestCase): + def repr_test(self, cls, result_arg): + for acknowledged in (True, False): + result = cls(result_arg, acknowledged) + expected_repr = "%s(%r, acknowledged=%r)" % (cls.__name__, result_arg, acknowledged) + self.assertEqual(acknowledged, result.acknowledged) + self.assertEqual(expected_repr, repr(result)) + + def test_bulk_write_result(self): + raw_result = { + "writeErrors": [], + "writeConcernErrors": [], + "nInserted": 1, + "nUpserted": 2, + "nMatched": 2, + "nModified": 2, + "nRemoved": 2, + "upserted": [ + {"index": 5, "_id": 1}, + {"index": 9, "_id": 2}, + ], + } + self.repr_test(BulkWriteResult, raw_result) + + result = BulkWriteResult(raw_result, True) + self.assertEqual(raw_result, result.bulk_api_result) + self.assertEqual(raw_result["nInserted"], result.inserted_count) + self.assertEqual(raw_result["nMatched"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["nRemoved"], result.deleted_count) + self.assertEqual(raw_result["nUpserted"], result.upserted_count) + self.assertEqual({5: 1, 9: 2}, result.upserted_ids) + + result = BulkWriteResult(raw_result, False) + self.assertEqual(raw_result, result.bulk_api_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.inserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_ids + + def test_delete_result(self): + raw_result = {"n": 5} + self.repr_test(DeleteResult, {"n": 0}) + + result = DeleteResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.deleted_count) + + result = DeleteResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.deleted_count + + def test_insert_many_result(self): + inserted_ids = [1, 2, 3] + self.repr_test(InsertManyResult, inserted_ids) + + for acknowledged in (True, False): + result = InsertManyResult(inserted_ids, acknowledged) + self.assertEqual(inserted_ids, result.inserted_ids) + + def test_insert_one_result(self): + self.repr_test(InsertOneResult, 0) + + for acknowledged in (True, False): + result = InsertOneResult(0, acknowledged) + self.assertEqual(0, result.inserted_id) + + def test_update_result(self): + raw_result = { + "n": 1, + "nModified": 1, + "upserted": None, + } + self.repr_test(UpdateResult, raw_result) + + result = UpdateResult(raw_result, True) + self.assertEqual(raw_result, result.raw_result) + self.assertEqual(raw_result["n"], result.matched_count) + self.assertEqual(raw_result["nModified"], result.modified_count) + self.assertEqual(raw_result["upserted"], result.upserted_id) + self.assertEqual(result.did_upsert, True) + + raw_result_2 = { + "n": 1, + "nModified": 1, + "upserted": [ + {"index": 5, "_id": 1}, + ], + } + self.repr_test(UpdateResult, raw_result_2) + + result = UpdateResult(raw_result_2, True) + self.assertEqual(result.did_upsert, True) + + raw_result_3 = { + "n": 1, + "nModified": 1, + } + self.repr_test(UpdateResult, raw_result_3) + + result = UpdateResult(raw_result_3, True) + self.assertEqual(result.did_upsert, False) + + result = UpdateResult(raw_result, False) + self.assertEqual(raw_result, result.raw_result) + error_msg = "A value for .* is not available when" + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.matched_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.modified_count + with self.assertRaisesRegex(InvalidOperation, error_msg): + result.upserted_id + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_retryable_reads.py b/test/test_retryable_reads.py index 2b8bc17c58..c9f72ae547 100644 --- a/test/test_retryable_reads.py +++ b/test/test_retryable_reads.py @@ -13,11 +13,15 @@ # limitations under the License. """Test retryable reads spec.""" +from __future__ import annotations import os import pprint import sys import threading +from test.utils import set_fail_point + +from pymongo.errors import OperationFailure sys.path[0:0] = [""] @@ -28,120 +32,39 @@ client_knobs, unittest, ) -from test.utils import ( +from test.utils_shared import ( CMAPListener, OvertCommandListener, - TestCreator, - rs_or_single_client, ) -from test.utils_spec_runner import SpecRunner -from pymongo.mongo_client import MongoClient from pymongo.monitoring import ( ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, PoolClearedEvent, ) -from pymongo.write_concern import WriteConcern -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "legacy") +_IS_SYNC = True class TestClientOptions(PyMongoTestCase): def test_default(self): - client = MongoClient(connect=False) + client = self.simple_client(connect=False) self.assertEqual(client.options.retry_reads, True) def test_kwargs(self): - client = MongoClient(retryReads=True, connect=False) + client = self.simple_client(retryReads=True, connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient(retryReads=False, connect=False) + client = self.simple_client(retryReads=False, connect=False) self.assertEqual(client.options.retry_reads, False) def test_uri(self): - client = MongoClient("mongodb://h/?retryReads=true", connect=False) + client = self.simple_client("mongodb://h/?retryReads=true", connect=False) self.assertEqual(client.options.retry_reads, True) - client = MongoClient("mongodb://h/?retryReads=false", connect=False) + client = self.simple_client("mongodb://h/?retryReads=false", connect=False) self.assertEqual(client.options.retry_reads, False) -class TestSpec(SpecRunner): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True - - @classmethod - @client_context.require_failCommand_fail_point - # TODO: remove this once PYTHON-1948 is done. - @client_context.require_no_mmap - def setUpClass(cls): - super(TestSpec, cls).setUpClass() - - def maybe_skip_scenario(self, test): - super(TestSpec, self).maybe_skip_scenario(test) - skip_names = ["listCollectionObjects", "listIndexNames", "listDatabaseObjects"] - for name in skip_names: - if name.lower() in test["description"].lower(): - self.skipTest("PyMongo does not support %s" % (name,)) - - # Serverless does not support $out and collation. - if client_context.serverless: - for operation in test["operations"]: - if operation["name"] == "aggregate": - for stage in operation["arguments"]["pipeline"]: - if "$out" in stage: - self.skipTest("MongoDB Serverless does not support $out") - if "collation" in operation["arguments"]: - self.skipTest("MongoDB Serverless does not support collations") - - # Skip changeStream related tests on MMAPv1 and serverless. - test_name = self.id().rsplit(".")[-1] - if "changestream" in test_name.lower(): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support change streams.") - if client_context.serverless: - self.skipTest("Serverless does not support change streams.") - - def get_scenario_coll_name(self, scenario_def): - """Override a test's collection name to support GridFS tests.""" - if "bucket_name" in scenario_def: - return scenario_def["bucket_name"] - return super(TestSpec, self).get_scenario_coll_name(scenario_def) - - def setup_scenario(self, scenario_def): - """Override a test's setup to support GridFS tests.""" - if "bucket_name" in scenario_def: - data = scenario_def["data"] - db_name = self.get_scenario_db_name(scenario_def) - db = client_context.client[db_name] - # Create a bucket for the retryable reads GridFS tests with as few - # majority writes as possible. - wc = WriteConcern(w="majority") - if data: - db["fs.chunks"].drop() - db["fs.files"].drop() - db["fs.chunks"].insert_many(data["fs.chunks"]) - db.get_collection("fs.files", write_concern=wc).insert_many(data["fs.files"]) - else: - db.get_collection("fs.chunks").drop() - db.get_collection("fs.files", write_concern=wc).drop() - else: - super(TestSpec, self).setup_scenario(scenario_def) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestSpec, _TEST_PATH) -test_creator.create_tests() - - class FindThread(threading.Thread): def __init__(self, collection): super().__init__() @@ -157,15 +80,19 @@ def run(self): class TestPoolPausedError(IntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False + @client_context.require_sync @client_context.require_failCommand_blockConnection @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_pool_paused_error_is_retryable(self): + if "PyPy" in sys.version: + # Tracked in PYTHON-3519 + self.skipTest("Test is flaky on PyPy") cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) for _ in range(10): cmap_listener.reset() cmd_listener.reset() @@ -208,14 +135,130 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) +class TestRetryableReads(IntegrationTest): + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_a_different_mongos_when_one_is_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = self.rs_or_single_client(mongos) + set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + client_context.mongos_seeds(), + event_listeners=[listener], + retryReads=True, + ) + + with self.assertRaises(OperationFailure): + client.t.t.find_one({}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + + # Assert that both events occurred on different mongos. + assert listener.failed_events[0].connection_id != listener.failed_events[1].connection_id + + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_mongos_when_no_others_are_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": ["find"], "errorCode": 6}, + } + + host = client_context.mongos_seeds().split(",")[0] + mongos_client = self.rs_or_single_client(host) + set_fail_point(mongos_client, fail_command) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + host, + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.find_one({}) + + # Disable failpoint. + fail_command["mode"] = "off" + set_fail_point(mongos_client, fail_command) + + # Assert that exactly one failed command event and one succeeded command event occurred. + self.assertEqual(len(listener.failed_events), 1) + self.assertEqual(len(listener.succeeded_events), 1) + + # Assert that both events occurred on the same mongos. + assert listener.succeeded_events[0].connection_id == listener.failed_events[0].connection_id + + @client_context.require_failCommand_fail_point + def test_retryable_reads_are_retried_on_the_same_implicit_session(self): + listener = OvertCommandListener() + client = self.rs_or_single_client( + directConnection=False, + event_listeners=[listener], + retryReads=True, + ) + + client.t.t.insert_one({"x": 1}) + + commands = [ + ("aggregate", lambda: client.t.t.count_documents({})), + ("aggregate", lambda: client.t.t.aggregate([{"$match": {}}])), + ("count", lambda: client.t.t.estimated_document_count()), + ("distinct", lambda: client.t.t.distinct("x")), + ("find", lambda: client.t.t.find_one({})), + ("listDatabases", lambda: client.list_databases()), + ("listCollections", lambda: client.t.list_collections()), + ("listIndexes", lambda: client.t.t.list_indexes()), + ] + + for command_name, operation in commands: + listener.reset() + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": {"failCommands": [command_name], "errorCode": 6}, + } + + with self.fail_point(fail_command): + operation() + + # Assert that both events occurred on the same session. + command_docs = [ + event.command + for event in listener.started_events + if event.command_name == command_name + ] + self.assertEqual(len(command_docs), 2) + self.assertEqual(command_docs[0]["lsid"], command_docs[1]["lsid"]) + self.assertIsNot(command_docs[0], command_docs[1]) + + if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_reads_unified.py b/test/test_retryable_reads_unified.py index 6bf4157763..b1c6435c9a 100644 --- a/test/test_retryable_reads_unified.py +++ b/test/test_retryable_reads_unified.py @@ -13,20 +13,34 @@ # limitations under the License. """Test the Retryable Reads unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_reads", "unified") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_reads/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_reads/unified") # Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) +# PyMongo does not support MapReduce, ListDatabaseObjects or ListCollectionObjects. +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + expected_failures=["ListDatabaseObjects .*", "ListCollectionObjects .*", "MapReduce .*"], + ) +) if __name__ == "__main__": unittest.main() diff --git a/test/test_retryable_writes.py b/test/test_retryable_writes.py index 0eb863f4cf..a74a3e8030 100644 --- a/test/test_retryable_writes.py +++ b/test/test_retryable_writes.py @@ -1,4 +1,4 @@ -# Copyright 2017 MongoDB, Inc. +# Copyright 2017-present MongoDB, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,24 +13,30 @@ # limitations under the License. """Test retryable writes.""" +from __future__ import annotations +import asyncio import copy -import os import pprint import sys import threading +from test.utils import flaky, set_fail_point sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, client_knobs, unittest -from test.utils import ( +from test import ( + IntegrationTest, + SkipTest, + client_context, + unittest, +) +from test.helpers import client_knobs +from test.utils_shared import ( CMAPListener, DeprecationFilter, + EventListener, OvertCommandListener, - TestCreator, - rs_or_single_client, ) -from test.utils_spec_runner import SpecRunner from test.version import Version from bson.codec_options import DEFAULT_CODEC_OPTIONS @@ -38,13 +44,14 @@ from bson.raw_bson import RawBSONDocument from bson.son import SON from pymongo.errors import ( + AutoReconnect, ConnectionFailure, OperationFailure, ServerSelectionTimeoutError, WriteConcernError, ) -from pymongo.mongo_client import MongoClient from pymongo.monitoring import ( + CommandSucceededEvent, ConnectionCheckedOutEvent, ConnectionCheckOutFailedEvent, ConnectionCheckOutFailedReason, @@ -60,69 +67,50 @@ ) from pymongo.write_concern import WriteConcern -# Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "legacy") - - -class TestAllScenarios(SpecRunner): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True - - def get_object_name(self, op): - return op.get("object", "collection") - - def get_scenario_db_name(self, scenario_def): - return scenario_def.get("database_name", "pymongo_test") - - def get_scenario_coll_name(self, scenario_def): - return scenario_def.get("collection_name", "test") - - def run_test_ops(self, sessions, collection, test): - # Transform retryable writes spec format into transactions. - operation = test["operation"] - outcome = test["outcome"] - if "error" in outcome: - operation["error"] = outcome["error"] - if "result" in outcome: - operation["result"] = outcome["result"] - test["operations"] = [operation] - super(TestAllScenarios, self).run_test_ops(sessions, collection, test) - - -def create_test(scenario_def, test, name): - @client_context.require_test_commands - @client_context.require_no_mmap - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestAllScenarios, _TEST_PATH) -test_creator.create_tests() +_IS_SYNC = True + + +class InsertEventListener(EventListener): + def succeeded(self, event: CommandSucceededEvent) -> None: + super().succeeded(event) + if ( + event.command_name == "insert" + and event.reply.get("writeConcernError", {}).get("code", None) == 91 + ): + client_context.client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "errorCode": 10107, + "errorLabels": ["RetryableWriteError", "NoWritesPerformed"], + "failCommands": ["insert"], + }, + } + ) def retryable_single_statement_ops(coll): return [ (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {}), (coll.bulk_write, [[InsertOne({}), InsertOne({})]], {"ordered": False}), - (coll.bulk_write, [[ReplaceOne({}, {})]], {}), - (coll.bulk_write, [[ReplaceOne({}, {}), ReplaceOne({}, {})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a1": 1})]], {}), + (coll.bulk_write, [[ReplaceOne({}, {"a2": 1}), ReplaceOne({}, {"a3": 1})]], {}), ( coll.bulk_write, - [[UpdateOne({}, {"$set": {"a": 1}}), UpdateOne({}, {"$set": {"a": 1}})]], + [[UpdateOne({}, {"$set": {"a4": 1}}), UpdateOne({}, {"$set": {"a5": 1}})]], {}, ), (coll.bulk_write, [[DeleteOne({})]], {}), (coll.bulk_write, [[DeleteOne({}), DeleteOne({})]], {}), (coll.insert_one, [{}], {}), (coll.insert_many, [[{}, {}]], {}), - (coll.replace_one, [{}, {}], {}), - (coll.update_one, [{}, {"$set": {"a": 1}}], {}), + (coll.replace_one, [{}, {"a6": 1}], {}), + (coll.update_one, [{}, {"$set": {"a7": 1}}], {}), (coll.delete_one, [{}], {}), - (coll.find_one_and_replace, [{}, {"a": 3}], {}), - (coll.find_one_and_update, [{}, {"$set": {"a": 1}}], {}), - (coll.find_one_and_delete, [{}, {}], {}), + (coll.find_one_and_replace, [{}, {"a8": 1}], {}), + (coll.find_one_and_update, [{}, {"$set": {"a9": 1}}], {}), + (coll.find_one_and_delete, [{}, {"a10": 1}], {}), ] @@ -141,75 +129,30 @@ def non_retryable_single_statement_ops(coll): class IgnoreDeprecationsTest(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True deprecation_filter: DeprecationFilter - @classmethod - def setUpClass(cls): - super(IgnoreDeprecationsTest, cls).setUpClass() - cls.deprecation_filter = DeprecationFilter() - - @classmethod - def tearDownClass(cls): - cls.deprecation_filter.stop() - super(IgnoreDeprecationsTest, cls).tearDownClass() + def setUp(self) -> None: + super().setUp() + self.deprecation_filter = DeprecationFilter() - -class TestRetryableWritesMMAPv1(IgnoreDeprecationsTest): - knobs: client_knobs - - @classmethod - def setUpClass(cls): - super(TestRetryableWritesMMAPv1, cls).setUpClass() - # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.client = rs_or_single_client(retryWrites=True) - cls.db = cls.client.pymongo_test - - @classmethod - def tearDownClass(cls): - cls.knobs.disable() - cls.client.close() - super(TestRetryableWritesMMAPv1, cls).tearDownClass() - - @client_context.require_no_standalone - def test_actionable_error_message(self): - if client_context.storage_engine != "mmapv1": - raise SkipTest("This cluster is not running MMAPv1") - - expected_msg = ( - "This MongoDB deployment does not support retryable " - "writes. Please add retryWrites=false to your " - "connection string." - ) - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - with self.assertRaisesRegex(OperationFailure, expected_msg): - method(*args, **kwargs) + def tearDown(self) -> None: + super().tearDown() + self.deprecation_filter.stop() class TestRetryableWrites(IgnoreDeprecationsTest): listener: OvertCommandListener knobs: client_knobs - @classmethod - @client_context.require_no_mmap - def setUpClass(cls): - super(TestRetryableWrites, cls).setUpClass() + def setUp(self) -> None: + super().setUp() # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - cls.listener = OvertCommandListener() - cls.client = rs_or_single_client(retryWrites=True, event_listeners=[cls.listener]) - cls.db = cls.client.pymongo_test - - @classmethod - def tearDownClass(cls): - cls.knobs.disable() - cls.client.close() - super(TestRetryableWrites, cls).tearDownClass() - - def setUp(self): + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() + self.listener = OvertCommandListener() + self.client = self.rs_or_single_client(retryWrites=True, event_listeners=[self.listener]) + self.db = self.client.pymongo_test + if client_context.is_rs and client_context.test_commands_enabled: self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "alwaysOn")]) @@ -220,77 +163,37 @@ def tearDown(self): self.client.admin.command( SON([("configureFailPoint", "onPrimaryTransactionalWrite"), ("mode", "off")]) ) + self.knobs.disable() + super().tearDown() def test_supported_single_statement_no_retry(self): listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=False, event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(retryWrites=False, event_listeners=[listener]) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() method(*args, **kwargs) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) - @client_context.require_no_standalone - def test_supported_single_statement_supported_cluster(self): - for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() - method(*args, **kwargs) - commands_started = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), 1, msg) - first_attempt = commands_started[0] - self.assertIn( - "lsid", - first_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), - ) - initial_session_id = first_attempt.command["lsid"] - self.assertIn( - "txnNumber", - first_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), - ) - - # There should be no retry when the failpoint is not active. - if client_context.is_mongos or not client_context.test_commands_enabled: - self.assertEqual(len(commands_started), 1) - continue - - initial_transaction_id = first_attempt.command["txnNumber"] - retry_attempt = commands_started[1] - self.assertIn( - "lsid", - retry_attempt.command, - "%s sent no lsid with %s" % (msg, first_attempt.command_name), - ) - self.assertEqual(retry_attempt.command["lsid"], initial_session_id, msg) - self.assertIn( - "txnNumber", - retry_attempt.command, - "%s sent no txnNumber with %s" % (msg, first_attempt.command_name), - ) - self.assertEqual(retry_attempt.command["txnNumber"], initial_transaction_id, msg) - def test_supported_single_statement_unsupported_cluster(self): if client_context.is_rs or client_context.is_mongos: raise SkipTest("This cluster supports retryable writes") for method, args, kwargs in retryable_single_statement_ops(self.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() method(*args, **kwargs) - for event in self.listener.results["started"]: + for event in self.listener.started_events: self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_unsupported_single_statement(self): @@ -300,34 +203,34 @@ def test_unsupported_single_statement(self): for method, args, kwargs in non_retryable_single_statement_ops( coll ) + retryable_single_statement_ops(coll_w0): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - self.listener.results.clear() + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + self.listener.reset() method(*args, **kwargs) - started_events = self.listener.results["started"] - self.assertEqual(len(self.listener.results["succeeded"]), len(started_events), msg) - self.assertEqual(len(self.listener.results["failed"]), 0, msg) + started_events = self.listener.started_events + self.assertEqual(len(self.listener.succeeded_events), len(started_events), msg) + self.assertEqual(len(self.listener.failed_events), 0, msg) for event in started_events: self.assertNotIn( "txnNumber", event.command, - "%s sent txnNumber with %s" % (msg, event.command_name), + f"{msg} sent txnNumber with {event.command_name}", ) def test_server_selection_timeout_not_retried(self): """A ServerSelectionTimeoutError is not retried.""" listener = OvertCommandListener() - client = MongoClient( + client = self.simple_client( "somedomainthatdoesntexist.org", serverSelectionTimeoutMS=1, retryWrites=True, event_listeners=[listener], ) for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() with self.assertRaises(ServerSelectionTimeoutError, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 0, msg) + self.assertEqual(len(listener.started_events), 0, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -336,8 +239,7 @@ def test_retry_timeout_raises_original_error(self): original error. """ listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) topology = client._topology select_server = topology.select_server @@ -352,12 +254,12 @@ def raise_error(*args, **kwargs): return server for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - listener.results.clear() + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + listener.reset() topology.select_server = mock_select_server with self.assertRaises(ConnectionFailure, msg=msg): method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) + self.assertEqual(len(listener.started_events), 1, msg) @client_context.require_replica_set @client_context.require_test_commands @@ -366,7 +268,7 @@ def test_batch_splitting(self): large = "s" * 1024 * 1024 * 15 coll = self.db.retryable_write_test coll.delete_many({}) - self.listener.results.clear() + self.listener.reset() bulk_result = coll.bulk_write( [ InsertOne({"_id": 1, "l": large}), @@ -381,7 +283,7 @@ def test_batch_splitting(self): # Each command should fail and be retried. # With OP_MSG 3 inserts are one batch. 2 updates another. # 2 deletes a third. - self.assertEqual(len(self.listener.results["started"]), 6) + self.assertEqual(len(self.listener.started_events), 6) self.assertEqual(coll.find_one(), {"_id": 1, "count": 1}) # Assert the final result expected_result = { @@ -412,9 +314,9 @@ def test_batch_splitting_retry_fails(self): ] ) ) - self.listener.results.clear() + self.listener.reset() with self.client.start_session() as session: - initial_txn = session._server_session._transaction_id + initial_txn = session._transaction_id try: coll.bulk_write( [ @@ -430,9 +332,9 @@ def test_batch_splitting_retry_fails(self): else: self.fail("bulk_write should have failed") - started = self.listener.results["started"] + started = self.listener.started_events self.assertEqual(len(started), 3) - self.assertEqual(len(self.listener.results["succeeded"]), 1) + self.assertEqual(len(self.listener.succeeded_events), 1) expected_txn = Int64(initial_txn + 1) self.assertEqual(started[0].command["txnNumber"], expected_txn) self.assertEqual(started[0].command["lsid"], session.session_id) @@ -442,23 +344,59 @@ def test_batch_splitting_retry_fails(self): started[1].command.pop("$clusterTime") started[2].command.pop("$clusterTime") self.assertEqual(started[1].command, started[2].command) - final_txn = session._server_session._transaction_id + final_txn = session._transaction_id self.assertEqual(final_txn, expected_txn) self.assertEqual(coll.find_one(projection={"_id": True}), {"_id": 1}) + @client_context.require_multiple_mongoses + @client_context.require_failCommand_fail_point + def test_retryable_writes_in_sharded_cluster_multiple_available(self): + fail_command = { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "failCommands": ["insert"], + "closeConnection": True, + "appName": "retryableWriteTest", + }, + } + + mongos_clients = [] + + for mongos in client_context.mongos_seeds().split(","): + client = self.rs_or_single_client(mongos) + set_fail_point(client, fail_command) + mongos_clients.append(client) + + listener = OvertCommandListener() + client = self.rs_or_single_client( + client_context.mongos_seeds(), + appName="retryableWriteTest", + event_listeners=[listener], + retryWrites=True, + ) + + with self.assertRaises(AutoReconnect): + client.t.t.insert_one({"x": 1}) + + # Disable failpoints on each mongos + for client in mongos_clients: + fail_command["mode"] = "off" + set_fail_point(client, fail_command) + + self.assertEqual(len(listener.failed_events), 2) + self.assertEqual(len(listener.succeeded_events), 0) + class TestWriteConcernError(IntegrationTest): RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True fail_insert: dict - @classmethod @client_context.require_replica_set - @client_context.require_no_mmap @client_context.require_failCommand_fail_point - def setUpClass(cls): - super(TestWriteConcernError, cls).setUpClass() - cls.fail_insert = { + def setUp(self) -> None: + super().setUp() + self.fail_insert = { "configureFailPoint": "failCommand", "mode": {"times": 2}, "data": { @@ -468,9 +406,10 @@ def setUpClass(cls): } @client_context.require_version_min(4, 0) + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) def test_RetryableWriteError_error_label(self): listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) # Ensure collection exists. client.pymongo_test.testcoll.insert_one({}) @@ -482,9 +421,7 @@ def test_RetryableWriteError_error_label(self): if client_context.version >= Version(4, 4): # In MongoDB 4.4+ we rely on the server returning the error label. - self.assertIn( - "RetryableWriteError", listener.results["succeeded"][-1].reply["errorLabels"] - ) + self.assertIn("RetryableWriteError", listener.succeeded_events[-1].reply["errorLabels"]) @client_context.require_version_min(4, 4) def test_RetryableWriteError_error_label_RawBSONDocument(self): @@ -496,7 +433,7 @@ def test_RetryableWriteError_error_label_RawBSONDocument(self): "insert", "testcoll", documents=[{"_id": 1}], - txnNumber=s._server_session.transaction_id, + txnNumber=s._transaction_id, session=s, codec_options=DEFAULT_CODEC_OPTIONS.with_options( document_class=RawBSONDocument @@ -522,16 +459,18 @@ def run(self): class TestPoolPausedError(IntegrationTest): # Pools don't get paused in load balanced mode. RUN_ON_LOAD_BALANCER = False - RUN_ON_SERVERLESS = False + @client_context.require_sync @client_context.require_failCommand_blockConnection @client_context.require_retryable_writes @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + @flaky(reason="PYTHON-5291") def test_pool_paused_error_is_retryable(self): cmap_listener = CMAPListener() cmd_listener = OvertCommandListener() - client = rs_or_single_client(maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client( + maxPoolSize=1, event_listeners=[cmap_listener, cmd_listener] + ) for _ in range(10): cmap_listener.reset() cmd_listener.reset() @@ -574,26 +513,61 @@ def test_pool_paused_error_is_retryable(self): # Connection check out failures are not reflected in command # monitoring because we only publish command events _after_ checking # out a connection. - started = cmd_listener.results["started"] + started = cmd_listener.started_events msg = pprint.pformat(cmd_listener.results) self.assertEqual(3, len(started), msg) - succeeded = cmd_listener.results["succeeded"] + succeeded = cmd_listener.succeeded_events self.assertEqual(2, len(succeeded), msg) - failed = cmd_listener.results["failed"] + failed = cmd_listener.failed_events self.assertEqual(1, len(failed), msg) + @client_context.require_sync + @client_context.require_failCommand_fail_point + @client_context.require_replica_set + @client_context.require_version_min( + 6, 0, 0 + ) # the spec requires that this prose test only be run on 6.0+ + @client_knobs(heartbeat_frequency=0.05, min_heartbeat_interval=0.05) + def test_returns_original_error_code( + self, + ): + cmd_listener = InsertEventListener() + client = self.rs_or_single_client(retryWrites=True, event_listeners=[cmd_listener]) + client.test.test.drop() + cmd_listener.reset() + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": {"times": 1}, + "data": { + "writeConcernError": { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }, + "failCommands": ["insert"], + }, + } + ) + with self.assertRaises(WriteConcernError) as exc: + client.test.test.insert_one({"_id": 1}) + self.assertEqual(exc.exception.code, 91) + client.admin.command( + { + "configureFailPoint": "failCommand", + "mode": "off", + } + ) + # TODO: Make this a real integration test where we stepdown the primary. class TestRetryableWritesTxnNumber(IgnoreDeprecationsTest): @client_context.require_replica_set - @client_context.require_no_mmap def test_increment_transaction_id_without_sending_command(self): """Test that the txnNumber field is properly incremented, even when the first attempt fails before sending the command. """ listener = OvertCommandListener() - client = rs_or_single_client(retryWrites=True, event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(retryWrites=True, event_listeners=[listener]) topology = client._topology select_server = topology.select_server @@ -604,21 +578,21 @@ def raise_connection_err_select_server(*args, **kwargs): raise ConnectionFailure("Connection refused") for method, args, kwargs in retryable_single_statement_ops(client.db.retryable_write_test): - listener.results.clear() + listener.reset() topology.select_server = raise_connection_err_select_server with client.start_session() as session: kwargs = copy.deepcopy(kwargs) kwargs["session"] = session - msg = "%s(*%r, **%r)" % (method.__name__, args, kwargs) - initial_txn_id = session._server_session.transaction_id + msg = f"{method.__name__}(*{args!r}, **{kwargs!r})" + initial_txn_id = session._transaction_id # Each operation should fail on the first attempt and succeed # on the second. method(*args, **kwargs) - self.assertEqual(len(listener.results["started"]), 1, msg) - retry_cmd = listener.results["started"][0].command + self.assertEqual(len(listener.started_events), 1, msg) + retry_cmd = listener.started_events[0].command sent_txn_id = retry_cmd["txnNumber"] - final_txn_id = session._server_session.transaction_id + final_txn_id = session._transaction_id self.assertEqual(Int64(initial_txn_id + 1), sent_txn_id, msg) self.assertEqual(sent_txn_id, final_txn_id, msg) diff --git a/test/test_retryable_writes_unified.py b/test/test_retryable_writes_unified.py index 4e97c14d4b..036c410e24 100644 --- a/test/test_retryable_writes_unified.py +++ b/test/test_retryable_writes_unified.py @@ -13,17 +13,24 @@ # limitations under the License. """Test the Retryable Writes unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "retryable_writes", "unified") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "retryable_writes/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "retryable_writes/unified") # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_run_command.py b/test/test_run_command.py new file mode 100644 index 0000000000..d2ef43b97e --- /dev/null +++ b/test/test_run_command.py @@ -0,0 +1,41 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Command unified tests.""" +from __future__ import annotations + +import os +import unittest +from pathlib import Path +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "run_command") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "run_command") + + +globals().update( + generate_test_classes( + os.path.join(TEST_PATH, "unified"), + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_saslprep.py b/test/test_saslprep.py index c07870dad6..e825cafa35 100644 --- a/test/test_saslprep.py +++ b/test/test_saslprep.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import sys @@ -24,7 +25,7 @@ class TestSASLprep(unittest.TestCase): def test_saslprep(self): try: - import stringprep # noqa + import stringprep except ImportError: self.assertRaises(TypeError, saslprep, "anything...") # Bytes strings are ignored. diff --git a/test/test_sdam_monitoring_spec.py b/test/test_sdam_monitoring_spec.py index d7b3744399..2167e561cf 100644 --- a/test/test_sdam_monitoring_spec.py +++ b/test/test_sdam_monitoring_spec.py @@ -13,54 +13,61 @@ # limitations under the License. """Run the sdam monitoring spec tests.""" +from __future__ import annotations +import asyncio import json import os import sys import time +from pathlib import Path sys.path[0:0] = [""] from test import IntegrationTest, client_context, client_knobs, unittest -from test.utils import ( +from test.utils_shared import ( ServerAndTopologyEventListener, - rs_or_single_client, server_name_to_type, wait_until, ) from bson.json_util import object_hook from pymongo import MongoClient, monitoring -from pymongo.collection import Collection from pymongo.common import clean_node from pymongo.errors import ConnectionFailure, NotPrimaryError from pymongo.hello import Hello -from pymongo.monitor import Monitor from pymongo.server_description import ServerDescription +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.monitor import Monitor from pymongo.topology_description import TOPOLOGY_TYPE +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sdam_monitoring") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sdam_monitoring") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sdam_monitoring") def compare_server_descriptions(expected, actual): - if (not expected["address"] == "%s:%s" % actual.address) or ( - not server_name_to_type(expected["type"]) == actual.server_type + if (expected["address"] != "{}:{}".format(*actual.address)) or ( + server_name_to_type(expected["type"]) != actual.server_type ): return False expected_hosts = set(expected["arbiters"] + expected["passives"] + expected["hosts"]) - return expected_hosts == set("%s:%s" % s for s in actual.all_hosts) + return expected_hosts == {"{}:{}".format(*s) for s in actual.all_hosts} def compare_topology_descriptions(expected, actual): - if not (TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) == actual.topology_type): + if TOPOLOGY_TYPE.__getattribute__(expected["topologyType"]) != actual.topology_type: return False expected = expected["servers"] actual = actual.server_descriptions() if len(expected) != len(actual): return False for exp_server in expected: - for address, actual_server in actual.items(): + for _address, actual_server in actual.items(): if compare_server_descriptions(exp_server, actual_server): break else: @@ -79,22 +86,23 @@ def compare_events(expected_dict, actual): if expected_type == "server_opening_event": if not isinstance(actual, monitoring.ServerOpeningEvent): return False, "Expected ServerOpeningEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerOpeningEvent published with wrong address (expected" - " %s, got %s" % (expected["address"], actual.server_address), + "ServerOpeningEvent published with wrong address (expected" " {}, got {}".format( + expected["address"], actual.server_address + ), ) elif expected_type == "server_description_changed_event": - if not isinstance(actual, monitoring.ServerDescriptionChangedEvent): return (False, "Expected ServerDescriptionChangedEvent, got %s" % (actual.__class__)) - if not expected["address"] == "%s:%s" % actual.server_address: + if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerDescriptionChangedEvent has wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + "ServerDescriptionChangedEvent has wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), ) if not compare_server_descriptions(expected["newDescription"], actual.new_description): @@ -110,16 +118,17 @@ def compare_events(expected_dict, actual): elif expected_type == "server_closed_event": if not isinstance(actual, monitoring.ServerClosedEvent): return False, "Expected ServerClosedEvent, got %s" % (actual.__class__) - if not expected["address"] == "%s:%s" % actual.server_address: + if expected["address"] != "{}:{}".format(*actual.server_address): return ( False, - "ServerClosedEvent published with wrong address" - " (expected %s, got %s" % (expected["address"], actual.server_address), + "ServerClosedEvent published with wrong address" " (expected {}, got {}".format( + expected["address"], actual.server_address + ), ) elif expected_type == "topology_opening_event": if not isinstance(actual, monitoring.TopologyOpenedEvent): - return False, "Expected TopologyOpeningEvent, got %s" % (actual.__class__) + return False, "Expected TopologyOpenedEvent, got %s" % (actual.__class__) elif expected_type == "topology_description_changed_event": if not isinstance(actual, monitoring.TopologyDescriptionChangedEvent): @@ -145,7 +154,7 @@ def compare_events(expected_dict, actual): return False, "Expected TopologyClosedEvent, got %s" % (actual.__class__) else: - return False, "Incorrect event: expected %s, actual %s" % (expected_type, actual) + return False, f"Incorrect event: expected {expected_type}, actual {actual}" return True, "" @@ -170,13 +179,13 @@ def compare_multiple_events(i, expected_results, actual_results): class TestAllScenarios(IntegrationTest): def setUp(self): - super(TestAllScenarios, self).setUp() + super().setUp() self.all_listener = ServerAndTopologyEventListener() def create_test(scenario_def): def run_scenario(self): - with client_knobs(events_queue_frequency=0.1): + with client_knobs(events_queue_frequency=0.05, min_heartbeat_interval=0.05): _run_scenario(self) def _run_scenario(self): @@ -196,7 +205,7 @@ def _run(self): try: for phase in scenario_def["phases"]: - for (source, response) in phase.get("responses", []): + for source, response in phase.get("responses", []): source_address = clean_node(source) topology.on_change( ServerDescription( @@ -213,7 +222,7 @@ def _run(self): ) # Wait some time to catch possible lagging extra events. - time.sleep(0.5) + wait_until(lambda: topology._events.empty(), "publish lagging events") i = 0 while i < expected_len: @@ -235,7 +244,7 @@ def _run(self): # Assert no extra events. extra_events = self.all_listener.results[expected_len:] if extra_events: - self.fail("Extra events %r" % (extra_events,)) + self.fail(f"Extra events {extra_events!r}") self.all_listener.reset() finally: @@ -245,13 +254,13 @@ def _run(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): for filename in filenames: with open(os.path.join(dirpath, filename)) as scenario_stream: scenario_def = json.load(scenario_stream, object_hook=object_hook) # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s" % (os.path.splitext(filename)[0],) + test_name = f"test_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) @@ -266,29 +275,33 @@ class TestSdamMonitoring(IntegrationTest): coll: Collection @classmethod - @client_context.require_failCommand_fail_point def setUpClass(cls): - super(TestSdamMonitoring, cls).setUpClass() # Speed up the tests by decreasing the event publish frequency. - cls.knobs = client_knobs(events_queue_frequency=0.1) + cls.knobs = client_knobs( + events_queue_frequency=0.1, heartbeat_frequency=0.1, min_heartbeat_interval=0.1 + ) cls.knobs.enable() cls.listener = ServerAndTopologyEventListener() - retry_writes = client_context.supports_transactions() - cls.test_client = rs_or_single_client( - event_listeners=[cls.listener], retryWrites=retry_writes - ) - cls.coll = cls.test_client[cls.client.db.name].test - cls.coll.insert_one({}) @classmethod def tearDownClass(cls): - cls.test_client.close() cls.knobs.disable() - super(TestSdamMonitoring, cls).tearDownClass() + @client_context.require_failCommand_fail_point def setUp(self): + super().setUp() + + retry_writes = client_context.supports_transactions() + self.test_client = self.rs_or_single_client( + event_listeners=[self.listener], retryWrites=retry_writes + ) + self.coll = self.test_client[self.client.db.name].test + self.coll.insert_one({}) self.listener.reset() + def tearDown(self): + super().tearDown() + def _test_app_error(self, fail_command_opts, expected_error): address = self.test_client.address @@ -330,7 +343,7 @@ def marked_unknown_and_rediscovered(): and len(self.listener.matching(discovered_node)) >= 1 ) - # Topology events are published asynchronously + # Topology events are not published synchronously wait_until(marked_unknown_and_rediscovered, "rediscover node") # Expect a single ServerDescriptionChangedEvent for the network error. diff --git a/test/test_server.py b/test/test_server.py index 064d77d024..ab5a40a79b 100644 --- a/test/test_server.py +++ b/test/test_server.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the server module.""" +from __future__ import annotations import sys @@ -21,16 +22,16 @@ from test import unittest from pymongo.hello import Hello -from pymongo.server import Server from pymongo.server_description import ServerDescription +from pymongo.synchronous.server import Server class TestServer(unittest.TestCase): def test_repr(self): hello = Hello({"ok": 1}) sd = ServerDescription(("localhost", 27017), hello) - server = Server(sd, pool=object(), monitor=object()) - self.assertTrue("Standalone" in str(server)) + server = Server(sd, pool=object(), monitor=object()) # type: ignore[arg-type] + self.assertIn("Standalone", str(server)) if __name__ == "__main__": diff --git a/test/test_server_description.py b/test/test_server_description.py index bb49141d2f..e8c0098cb6 100644 --- a/test/test_server_description.py +++ b/test/test_server_description.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the server_description module.""" +from __future__ import annotations import sys @@ -22,6 +23,7 @@ from bson.int64 import Int64 from bson.objectid import ObjectId +from pymongo import common from pymongo.hello import Hello, HelloCompat from pymongo.server_description import ServerDescription from pymongo.server_type import SERVER_TYPE @@ -117,7 +119,7 @@ def test_fields(self): "maxBsonObjectSize": 2, "maxWriteBatchSize": 3, "minWireVersion": 4, - "maxWireVersion": 5, + "maxWireVersion": 25, "setName": "rs", } ) @@ -129,13 +131,15 @@ def test_fields(self): self.assertEqual(2, s.max_bson_size) self.assertEqual(3, s.max_write_batch_size) self.assertEqual(4, s.min_wire_version) - self.assertEqual(5, s.max_wire_version) + self.assertEqual(25, s.max_wire_version) - def test_default_max_message_size(self): - s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True, "maxBsonObjectSize": 2}) - - # Twice max_bson_size. - self.assertEqual(4, s.max_message_size) + def test_defaults(self): + s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) + self.assertEqual(common.MAX_BSON_SIZE, s.max_bson_size) + self.assertEqual(common.MAX_MESSAGE_SIZE, s.max_message_size) + self.assertEqual(common.MIN_WIRE_VERSION, s.min_wire_version) + self.assertEqual(common.MAX_WIRE_VERSION, s.max_wire_version) + self.assertEqual(common.MAX_WRITE_BATCH_SIZE, s.max_write_batch_size) def test_standalone(self): s = parse_hello_response({"ok": 1, HelloCompat.LEGACY_CMD: True}) diff --git a/test/test_server_selection.py b/test/test_server_selection.py index a80d5f13d9..4384deac2b 100644 --- a/test/test_server_selection.py +++ b/test/test_server_selection.py @@ -13,41 +13,52 @@ # limitations under the License. """Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations import os import sys +from pathlib import Path from pymongo import MongoClient, ReadPreference from pymongo.errors import ServerSelectionTimeoutError from pymongo.hello import HelloCompat +from pymongo.operations import _Op from pymongo.server_selectors import writable_server_selector -from pymongo.settings import TopologySettings -from pymongo.topology import Topology +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology +from pymongo.typings import strip_optional sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( - EventListener, - FunctionCallRecorder, - rs_or_single_client, - wait_until, -) +from test.utils import wait_until from test.utils_selection_tests import ( create_selection_tests, - get_addresses, get_topology_settings_dict, +) +from test.utils_selection_tests_shared import ( + get_addresses, make_server_description, ) +from test.utils_shared import ( + FunctionCallRecorder, + OvertCommandListener, +) + +_IS_SYNC = True # Location of JSON test specifications. -_TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), - os.path.join("server_selection", "server_selection"), -) +if _IS_SYNC: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent, "server_selection", "server_selection" + ) +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "server_selection" + ) -class SelectionStoreSelector(object): +class SelectionStoreSelector: """No-op selector that keeps track of what was passed to it.""" def __init__(self): @@ -58,7 +69,7 @@ def __call__(self, selection): return selection -class TestAllScenarios(create_selection_tests(_TEST_PATH)): # type: ignore +class TestAllScenarios(create_selection_tests(TEST_PATH)): # type: ignore pass @@ -72,20 +83,24 @@ def custom_selector(servers): return [servers[idx]] # Initialize client with appropriate listeners. - listener = EventListener() - client = rs_or_single_client(server_selector=custom_selector, event_listeners=[listener]) - self.addCleanup(client.close) + listener = OvertCommandListener() + client = self.rs_or_single_client( + server_selector=custom_selector, event_listeners=[listener] + ) coll = client.get_database("testdb", read_preference=ReadPreference.NEAREST).coll self.addCleanup(client.drop_database, "testdb") # Wait the node list to be fully populated. def all_hosts_started(): - return len(client.admin.command(HelloCompat.LEGACY_CMD)["hosts"]) == len( + return len((client.admin.command(HelloCompat.LEGACY_CMD))["hosts"]) == len( client._topology._description.readable_servers ) wait_until(all_hosts_started, "receive heartbeat from all hosts") - expected_port = max([n.address[1] for n in client._topology._description.readable_servers]) + + expected_port = max( + [strip_optional(n.address[1]) for n in client._topology._description.readable_servers] + ) # Insert 1 record and access it 10 times. coll.insert_one({"name": "John Doe"}) @@ -93,13 +108,13 @@ def all_hosts_started(): coll.find_one({"name": "John Doe"}) # Confirm all find commands are run against appropriate host. - for command in listener.results["started"]: + for command in listener.started_events: if command.command_name == "find": self.assertEqual(command.connection_id[1], expected_port) def test_invalid_server_selector(self): # Client initialization must fail if server_selector is not callable. - for selector_candidate in [list(), 10, "string", {}]: + for selector_candidate in [[], 10, "string", {}]: with self.assertRaisesRegex(ValueError, "must be a callable"): MongoClient(connect=False, server_selector=selector_candidate) @@ -111,17 +126,16 @@ def test_selector_called(self): selector = FunctionCallRecorder(lambda x: x) # Client setup. - mongo_client = rs_or_single_client(server_selector=selector) + mongo_client = self.rs_or_single_client(server_selector=selector) test_collection = mongo_client.testdb.test_collection - self.addCleanup(mongo_client.close) self.addCleanup(mongo_client.drop_database, "testdb") - # Do N operations and test selector is called at least N times. + # Do N operations and test selector is called at least N-1 times due to fast path. test_collection.insert_one({"age": 20, "name": "John"}) test_collection.insert_one({"age": 31, "name": "Jane"}) test_collection.update_one({"name": "Jane"}, {"$set": {"age": 21}}) test_collection.find_one({"name": "Roe"}) - self.assertGreaterEqual(selector.call_count, 4) + self.assertGreaterEqual(selector.call_count, 3) @client_context.require_replica_set def test_latency_threshold_application(self): @@ -153,7 +167,7 @@ def test_latency_threshold_application(self): # Invoke server selection and assert no filtering based on latency # prior to custom server selection logic kicking in. - server = topology.select_server(ReadPreference.NEAREST) + server = topology.select_server(ReadPreference.NEAREST, _Op.TEST) assert selector.selection is not None self.assertEqual(len(selector.selection), len(topology.description.server_descriptions())) @@ -188,7 +202,7 @@ def test_server_selector_bypassed(self): # Invoke server selection and assert no calls to our custom selector. with self.assertRaisesRegex(ServerSelectionTimeoutError, "No primary available for writes"): - topology.select_server(writable_server_selector, server_selection_timeout=0.1) + topology.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) self.assertEqual(selector.call_count, 0) diff --git a/test/test_server_selection_in_window.py b/test/test_server_selection_in_window.py index cae2d7661b..fcf2cce0e0 100644 --- a/test/test_server_selection_in_window.py +++ b/test/test_server_selection_in_window.py @@ -13,26 +13,36 @@ # limitations under the License. """Test the topology module's Server Selection Spec implementation.""" +from __future__ import annotations +import asyncio import os import threading +from pathlib import Path from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.helpers import ConcurrentRunner +from test.utils import flaky +from test.utils_selection_tests import create_topology +from test.utils_shared import ( + CMAPListener, OvertCommandListener, - TestCreator, - get_pool, - rs_client, wait_until, ) -from test.utils_selection_tests import create_topology +from test.utils_spec_runner import SpecTestCreator from pymongo.common import clean_node +from pymongo.monitoring import ConnectionReadyEvent +from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference +_IS_SYNC = True # Location of JSON test specifications. -TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), os.path.join("server_selection", "in_window") -) +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection", "in_window") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "server_selection", "in_window" + ) class TestAllScenarios(unittest.TestCase): @@ -46,12 +56,12 @@ def run_scenario(self, scenario_def): server.pool.operation_count = mock["operation_count"] pref = ReadPreference.NEAREST - counts = dict((address, 0) for address in topology._description.server_descriptions()) + counts = {address: 0 for address in topology._description.server_descriptions()} # Number of times to repeat server selection iterations = scenario_def["iterations"] for _ in range(iterations): - server = topology.select_server(pref, server_selection_timeout=0) + server = topology.select_server(pref, _Op.TEST, server_selection_timeout=0) counts[server.description.address] += 1 # Verify expected_frequencies @@ -76,7 +86,7 @@ def run_scenario(self): return run_scenario -class CustomTestCreator(TestCreator): +class CustomSpecTestCreator(SpecTestCreator): def tests(self, scenario_def): """Extract the tests from a spec file. @@ -86,12 +96,12 @@ def tests(self, scenario_def): return [scenario_def] -CustomTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() +CustomSpecTestCreator(create_test, TestAllScenarios, TEST_PATH).create_tests() -class FinderThread(threading.Thread): +class FinderTask(ConcurrentRunner): def __init__(self, collection, iterations): - super(FinderThread, self).__init__() + super().__init__() self.daemon = True self.collection = collection self.iterations = iterations @@ -106,17 +116,17 @@ def run(self): class TestProse(IntegrationTest): def frequencies(self, client, listener, n_finds=10): coll = client.test.test - N_THREADS = 10 - threads = [FinderThread(coll, n_finds) for _ in range(N_THREADS)] - for thread in threads: - thread.start() - for thread in threads: - thread.join() - for thread in threads: - self.assertTrue(thread.passed) - - events = listener.results["started"] - self.assertEqual(len(events), n_finds * N_THREADS) + N_TASKS = 10 + tasks = [FinderTask(coll, n_finds) for _ in range(N_TASKS)] + for task in tasks: + task.start() + for task in tasks: + task.join() + for task in tasks: + self.assertTrue(task.passed) + + events = listener.started_events + self.assertEqual(len(events), n_finds * N_TASKS) nodes = client.nodes self.assertEqual(len(nodes), 2) freqs = {address: 0.0 for address in nodes} @@ -128,21 +138,23 @@ def frequencies(self, client, listener, n_finds=10): @client_context.require_failCommand_appName @client_context.require_multiple_mongoses + @flaky(reason="PYTHON-3689") def test_load_balancing(self): listener = OvertCommandListener() + cmap_listener = CMAPListener() # PYTHON-2584: Use a large localThresholdMS to avoid the impact of # varying RTTs. - client = rs_client( + client = self.rs_client( client_context.mongos_seeds(), appName="loadBalancingTest", - event_listeners=[listener], + event_listeners=[listener, cmap_listener], localThresholdMS=30000, minPoolSize=10, ) - self.addCleanup(client.close) wait_until(lambda: len(client.nodes) == 2, "discover both nodes") - wait_until(lambda: len(get_pool(client).sockets) >= 10, "create 10 connections") - # Delay find commands on + # Wait for both pools to be populated. + cmap_listener.wait_for_event(ConnectionReadyEvent, 20) + # Delay find commands on only one mongos. delay_finds = { "configureFailPoint": "failCommand", "mode": {"times": 10000}, @@ -160,7 +172,7 @@ def test_load_balancing(self): freqs = self.frequencies(client, listener) self.assertLessEqual(freqs[delayed_server], 0.25) listener.reset() - freqs = self.frequencies(client, listener, n_finds=100) + freqs = self.frequencies(client, listener, n_finds=150) self.assertAlmostEqual(freqs[delayed_server], 0.50, delta=0.15) diff --git a/test/test_server_selection_logging.py b/test/test_server_selection_logging.py new file mode 100644 index 0000000000..d53d8dc84f --- /dev/null +++ b/test/test_server_selection_logging.py @@ -0,0 +1,45 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run the server selection logging unified format spec tests.""" +from __future__ import annotations + +import os +import sys +from pathlib import Path + +sys.path[0:0] = [""] + +from test import unittest +from test.unified_format import generate_test_classes + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection_logging") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection_logging") + + +globals().update( + generate_test_classes( + TEST_PATH, + module=__name__, + ) +) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_server_selection_rtt.py b/test/test_server_selection_rtt.py index d2d8768809..2aef36a585 100644 --- a/test/test_server_selection_rtt.py +++ b/test/test_server_selection_rtt.py @@ -13,22 +13,29 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations import json import os import sys +from pathlib import Path sys.path[0:0] = [""] -from test import unittest +from test import PyMongoTestCase, unittest from pymongo.read_preferences import MovingAverage +_IS_SYNC = True + # Location of JSON test specifications. -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "server_selection/rtt") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "server_selection/rtt") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "server_selection/rtt") -class TestAllScenarios(unittest.TestCase): +class TestAllScenarios(PyMongoTestCase): pass @@ -48,7 +55,7 @@ def run_scenario(self): def create_tests(): - for dirpath, _, filenames in os.walk(_TEST_PATH): + for dirpath, _, filenames in os.walk(TEST_PATH): dirname = os.path.split(dirpath)[-1] for filename in filenames: @@ -57,7 +64,7 @@ def create_tests(): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/test_session.py b/test/test_session.py index f22a2d5eab..9aa56a711e 100644 --- a/test/test_session.py +++ b/test/test_session.py @@ -13,54 +13,67 @@ # limitations under the License. """Test the client_session module.""" +from __future__ import annotations +import asyncio import copy import sys import time +from inspect import iscoroutinefunction from io import BytesIO +from test.helpers import ExceptionCatchingTask from typing import Any, Callable, List, Set, Tuple -from pymongo.mongo_client import MongoClient +from pymongo.synchronous.mongo_client import MongoClient sys.path[0:0] = [""] -from test import IntegrationTest, SkipTest, client_context, unittest -from test.utils import ( +from test import ( + IntegrationTest, + SkipTest, + UnitTest, + client_context, + unittest, +) +from test.helpers import client_knobs +from test.utils_shared import ( EventListener, - ExceptionCatchingThread, - rs_or_single_client, + HeartbeatEventListener, + OvertCommandListener, wait_until, ) from bson import DBRef -from gridfs import GridFS, GridFSBucket -from pymongo import ASCENDING, IndexModel, InsertOne, monitoring -from pymongo.command_cursor import CommandCursor +from gridfs.synchronous.grid_file import GridFS, GridFSBucket +from pymongo import ASCENDING, MongoClient, _csot, monitoring from pymongo.common import _MAX_END_SESSIONS -from pymongo.cursor import Cursor from pymongo.errors import ConfigurationError, InvalidOperation, OperationFailure -from pymongo.operations import UpdateOne +from pymongo.operations import IndexModel, InsertOne, UpdateOne from pymongo.read_concern import ReadConcern +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor + +_IS_SYNC = True # Ignore auth commands like saslStart, so we can assert lsid is in all commands. class SessionTestListener(EventListener): def started(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).started(event) + super().started(event) def succeeded(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).succeeded(event) + super().succeeded(event) def failed(self, event): if not event.command_name.startswith("sasl"): - super(SessionTestListener, self).failed(event) + super().failed(event) def first_command_started(self): - assert len(self.results["started"]) >= 1, "No command-started events" + assert len(self.started_events) >= 1, "No command-started events" - return self.results["started"][0] + return self.started_events[0] def session_ids(client): @@ -71,71 +84,65 @@ class TestSession(IntegrationTest): client2: MongoClient sensitive_commands: Set[str] - @classmethod @client_context.require_sessions - def setUpClass(cls): - super(TestSession, cls).setUpClass() + def setUp(self): + super().setUp() # Create a second client so we can make sure clients cannot share # sessions. - cls.client2 = rs_or_single_client() + self.client2 = self.rs_or_single_client() # Redact no commands, so we can test user-admin commands have "lsid". - cls.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() + self.sensitive_commands = monitoring._SENSITIVE_COMMANDS.copy() monitoring._SENSITIVE_COMMANDS.clear() - @classmethod - def tearDownClass(cls): - monitoring._SENSITIVE_COMMANDS.update(cls.sensitive_commands) - cls.client2.close() - super(TestSession, cls).tearDownClass() - - def setUp(self): self.listener = SessionTestListener() self.session_checker_listener = SessionTestListener() - self.client = rs_or_single_client( + self.client = self.rs_or_single_client( event_listeners=[self.listener, self.session_checker_listener] ) - self.addCleanup(self.client.close) self.db = self.client.pymongo_test - self.initial_lsids = set(s["id"] for s in session_ids(self.client)) + self.initial_lsids = {s["id"] for s in session_ids(self.client)} def tearDown(self): - """All sessions used in the test must be returned to the pool.""" + monitoring._SENSITIVE_COMMANDS.update(self.sensitive_commands) self.client.drop_database("pymongo_test") used_lsids = self.initial_lsids.copy() - for event in self.session_checker_listener.results["started"]: + for event in self.session_checker_listener.started_events: if "lsid" in event.command: used_lsids.add(event.command["lsid"]["id"]) - current_lsids = set(s["id"] for s in session_ids(self.client)) + current_lsids = {s["id"] for s in session_ids(self.client)} self.assertLessEqual(used_lsids, current_lsids) + super().tearDown() + def _test_ops(self, client, *ops): listener = client.options.event_listeners[0] for f, args, kw in ops: with client.start_session() as s: + listener.reset() + s._materialize() last_use = s._server_session.last_use start = time.monotonic() self.assertLessEqual(last_use, start) - listener.results.clear() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw["session"] = s f(*args, **kw) - self.assertGreaterEqual(s._server_session.last_use, start) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: - self.assertTrue( - "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong lsid with {event.command_name}", ) self.assertFalse(s.has_ended) @@ -151,20 +158,22 @@ def _test_ops(self, client, *ops): kw = copy.copy(kw) kw["session"] = s with self.assertRaisesRegex( - InvalidOperation, "Can only use session with the MongoClient that started it" + InvalidOperation, + "Can only use session with the MongoClient that started it", ): f(*args, **kw) # No explicit session. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) lsids = [] - for event in listener.results["started"]: - self.assertTrue( - "lsid" in event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{f.__name__} sent no lsid with {event.command_name}", ) lsids.append(event.command["lsid"]) @@ -176,7 +185,7 @@ def _test_ops(self, client, *ops): self.assertIn( lsid, session_ids(client), - "%s did not return implicit session to pool" % (f.__name__,), + f"{f.__name__} did not return implicit session to pool", ) def test_implicit_sessions_checkout(self): @@ -184,10 +193,11 @@ def test_implicit_sessions_checkout(self): # successful connection checkout" test from Driver Sessions Spec. succeeded = False lsid_set = set() - failures = 0 - for _ in range(5): - listener = EventListener() - client = rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + listener = OvertCommandListener() + client = self.rs_or_single_client(event_listeners=[listener], maxPoolSize=1) + # Retry up to 10 times because there is a known race condition that can cause multiple + # sessions to be used: connection check in happens before session check in + for _ in range(10): cursor = client.db.test.find({}) ops: List[Tuple[Callable, List[Any]]] = [ (client.db.test.find_one, [{"_id": 1}]), @@ -204,34 +214,34 @@ def test_implicit_sessions_checkout(self): (cursor.distinct, ["_id"]), (client.db.list_collections, []), ] - threads = [] - listener.results.clear() - - def thread_target(op, *args): - res = op(*args) + tasks = [] + listener.reset() + + def target(op, *args): + if iscoroutinefunction(op): + res = op(*args) + else: + res = op(*args) if isinstance(res, (Cursor, CommandCursor)): - list(res) + res.to_list() for op, args in ops: - threads.append( - ExceptionCatchingThread( - target=thread_target, args=[op, *args], name=op.__name__ - ) + tasks.append( + ExceptionCatchingTask(target=target, args=[op, *args], name=op.__name__) ) - threads[-1].start() - self.assertEqual(len(threads), len(ops)) - for thread in threads: - thread.join() - self.assertIsNone(thread.exc) - client.close() + tasks[-1].start() + self.assertEqual(len(tasks), len(ops)) + for t in tasks: + t.join() + self.assertIsNone(t.exc) lsid_set.clear() - for i in listener.results["started"]: + for i in listener.started_events: if i.command.get("lsid"): lsid_set.add(i.command.get("lsid")["id"]) if len(lsid_set) == 1: + # Break on first success. succeeded = True - else: - failures += 1 + break self.assertTrue(succeeded, lsid_set) def test_pool_lifo(self): @@ -270,9 +280,11 @@ def test_end_session(self): def test_end_sessions(self): # Use a new client so that the tearDown hook does not error. listener = SessionTestListener() - client = rs_or_single_client(event_listeners=[listener]) + client = self.rs_or_single_client(event_listeners=[listener]) # Start many sessions. sessions = [client.start_session() for _ in range(_MAX_END_SESSIONS + 1)] + for s in sessions: + s._materialize() for s in sessions: s.end_session() @@ -280,13 +292,13 @@ def test_end_sessions(self): self.assertEqual(len(client._topology._session_pool), _MAX_END_SESSIONS + 1) client.close() self.assertEqual(len(client._topology._session_pool), 0) - end_sessions = [e for e in listener.results["started"] if e.command_name == "endSessions"] + end_sessions = [e for e in listener.started_events if e.command_name == "endSessions"] self.assertEqual(len(end_sessions), 2) # Closing again should not send any commands. - listener.results.clear() + listener.reset() client.close() - self.assertEqual(len(listener.results["started"]), 0) + self.assertEqual(len(listener.started_events), 0) def test_client(self): client = self.client @@ -365,21 +377,21 @@ def test_cursor_clone(self): with self.client.start_session() as s: cursor = coll.find(session=s) - self.assertTrue(cursor.session is s) + self.assertIs(cursor.session, s) clone = cursor.clone() - self.assertTrue(clone.session is s) + self.assertIs(clone.session, s) # No explicit session. cursor = coll.find(batch_size=2) next(cursor) # Session is "owned" by cursor. self.assertIsNone(cursor.session) - self.assertIsNotNone(cursor._Cursor__session) + self.assertIsNotNone(cursor._session) clone = cursor.clone() next(clone) self.assertIsNone(clone.session) - self.assertIsNotNone(clone._Cursor__session) - self.assertFalse(cursor._Cursor__session is clone._Cursor__session) + self.assertIsNotNone(clone._session) + self.assertIsNot(cursor._session, clone._session) cursor.close() clone.close() @@ -390,28 +402,37 @@ def test_cursor(self): coll.insert_many([{} for _ in range(1000)]) # Test all cursor methods. - ops = [ - ("find", lambda session: list(coll.find(session=session))), - ("getitem", lambda session: coll.find(session=session)[0]), - ("distinct", lambda session: coll.find(session=session).distinct("a")), - ("explain", lambda session: coll.find(session=session).explain()), - ] + if _IS_SYNC: + # getitem is only supported in the synchronous API + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("getitem", lambda session: coll.find(session=session)[0]), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] + else: + ops = [ + ("find", lambda session: coll.find(session=session).to_list()), + ("distinct", lambda session: coll.find(session=session).distinct("a")), + ("explain", lambda session: coll.find(session=session).explain()), + ] for name, f in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() f(session=s) - self.assertGreaterEqual(len(listener.results["started"]), 1) - for event in listener.results["started"]: - self.assertTrue( - "lsid" in event.command, - "%s sent no lsid with %s" % (name, event.command_name), + self.assertGreaterEqual(len(listener.started_events), 1) + for event in listener.started_events: + self.assertIn( + "lsid", + event.command, + f"{name} sent no lsid with {event.command_name}", ) self.assertEqual( s.session_id, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) with self.assertRaisesRegex(InvalidOperation, "ended session"): @@ -419,24 +440,22 @@ def test_cursor(self): # No explicit session. for name, f in ops: - listener.results.clear() + listener.reset() f(session=None) event0 = listener.first_command_started() - self.assertTrue( - "lsid" in event0.command, "%s sent no lsid with %s" % (name, event0.command_name) - ) + self.assertIn("lsid", event0.command, f"{name} sent no lsid with {event0.command_name}") lsid = event0.command["lsid"] - for event in listener.results["started"][1:]: - self.assertTrue( - "lsid" in event.command, "%s sent no lsid with %s" % (name, event.command_name) + for event in listener.started_events[1:]: + self.assertIn( + "lsid", event.command, f"{name} sent no lsid with {event.command_name}" ) self.assertEqual( lsid, event.command["lsid"], - "%s sent wrong lsid with %s" % (name, event.command_name), + f"{name} sent wrong lsid with {event.command_name}", ) def test_gridfs(self): @@ -450,20 +469,32 @@ def new_file(session=None): grid_file.close() def find(session=None): - files = list(fs.find({"_id": 1}, session=session)) + files = fs.find({"_id": 1}, session=session).to_list() for f in files: f.read() + def get(session=None): + (fs.get(1, session=session)).read() + + def get_version(session=None): + (fs.get_version("f", session=session)).read() + + def get_last_version(session=None): + (fs.get_last_version("f", session=session)).read() + + def find_list(session=None): + fs.find(session=session).to_list() + self._test_ops( client, (new_file, [], {}), (fs.put, [b"data"], {}), - (lambda session=None: fs.get(1, session=session).read(), [], {}), - (lambda session=None: fs.get_version("f", session=session).read(), [], {}), - (lambda session=None: fs.get_last_version("f", session=session).read(), [], {}), + (get, [], {}), + (get_version, [], {}), + (get_last_version, [], {}), (fs.list, [], {}), (fs.find_one, [1], {}), - (lambda session=None: list(fs.find(session=session)), [], {}), + (find_list, [], {}), (fs.exists, [1], {}), (find, [], {}), (fs.delete, [1], {}), @@ -492,7 +523,7 @@ def open_download_stream_by_name(session=None): stream.read() def find(session=None): - files = list(bucket.find({"_id": 1}, session=session)) + files = bucket.find({"_id": 1}, session=session).to_list() for f in files: f.read() @@ -510,9 +541,10 @@ def find(session=None): (bucket.download_to_stream_by_name, ["f", sio], {}), (find, [], {}), (bucket.rename, [1, "f2"], {}), + (bucket.rename_by_name, ["f2", "f3"], {}), # Delete both files so _test_ops can run these operations twice. (bucket.delete, [1], {}), - (bucket.delete, [2], {}), + (bucket.delete_by_name, ["f"], {}), ) def test_gridfsbucket_cursor(self): @@ -537,12 +569,12 @@ def test_gridfsbucket_cursor(self): cursor = bucket.find(batch_size=1) files = [cursor.next()] - s = cursor._Cursor__session + s = cursor._session self.assertFalse(s.has_ended) cursor.__del__() self.assertTrue(s.has_ended) - self.assertIsNone(cursor._Cursor__session) + self.assertIsNone(cursor._session) # Files are still valid, they use their own sessions. for f in files: @@ -553,7 +585,7 @@ def test_gridfsbucket_cursor(self): cursor = bucket.find(session=s) assert cursor.session is not None s = cursor.session - files = list(cursor) + files = cursor.to_list() cursor.__del__() self.assertFalse(s.has_ended) @@ -571,7 +603,7 @@ def test_aggregate(self): coll = client.pymongo_test.collection def agg(session=None): - list(coll.aggregate([], batchSize=2, session=session)) + (coll.aggregate([], batchSize=2, session=session)).to_list() # With empty collection. self._test_ops(client, (agg, [], {})) @@ -600,7 +632,7 @@ def test_aggregate_error(self): # 3.6.0 mongos only validates the aggregate pipeline when the # database exists. coll.insert_one({}) - listener.results.clear() + listener.reset() with self.assertRaises(OperationFailure): coll.aggregate([{"$badOperation": {"bar": 1}}]) @@ -618,7 +650,7 @@ def _test_cursor_helper(self, create_cursor, close_cursor): cursor = create_cursor(coll, None) next(cursor) # Session is "owned" by cursor. - session = getattr(cursor, "_%s__session" % cursor.__class__.__name__) + session = cursor._session self.assertIsNotNone(session) lsid = session.session_id next(cursor) @@ -640,46 +672,63 @@ def _test_cursor_helper(self, create_cursor, close_cursor): self.assertIn(lsid, session_ids(self.client)) def test_cursor_close(self): - self._test_cursor_helper( - lambda coll, session: coll.find(session=session), lambda cursor: cursor.close() - ) + def find(coll, session): + return coll.find(session=session) + + self._test_cursor_helper(find, lambda cursor: cursor.close()) def test_command_cursor_close(self): - self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), lambda cursor: cursor.close() - ) + def aggregate(coll, session): + return coll.aggregate([], session=session) + + self._test_cursor_helper(aggregate, lambda cursor: cursor.close()) def test_cursor_del(self): - self._test_cursor_helper( - lambda coll, session: coll.find(session=session), lambda cursor: cursor.__del__() - ) + def find(coll, session): + return coll.find(session=session) + + def delete(cursor): + return cursor.__del__() + + self._test_cursor_helper(find, delete) def test_command_cursor_del(self): - self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), - lambda cursor: cursor.__del__(), - ) + def aggregate(coll, session): + return coll.aggregate([], session=session) + + def delete(cursor): + return cursor.__del__() + + self._test_cursor_helper(aggregate, delete) def test_cursor_exhaust(self): - self._test_cursor_helper( - lambda coll, session: coll.find(session=session), lambda cursor: list(cursor) - ) + def find(coll, session): + return coll.find(session=session) + + self._test_cursor_helper(find, lambda cursor: cursor.to_list()) def test_command_cursor_exhaust(self): - self._test_cursor_helper( - lambda coll, session: coll.aggregate([], session=session), lambda cursor: list(cursor) - ) + def aggregate(coll, session): + return coll.aggregate([], session=session) + + self._test_cursor_helper(aggregate, lambda cursor: cursor.to_list()) def test_cursor_limit_reached(self): + def find(coll, session): + return coll.find(limit=4, batch_size=2, session=session) + self._test_cursor_helper( - lambda coll, session: coll.find(limit=4, batch_size=2, session=session), - lambda cursor: list(cursor), + find, + lambda cursor: cursor.to_list(), ) def test_command_cursor_limit_reached(self): + def aggregate(coll, session): + return coll.aggregate([], batchSize=900, session=session) + self._test_cursor_helper( - lambda coll, session: coll.aggregate([], batchSize=900, session=session), - lambda cursor: list(cursor), + aggregate, + lambda cursor: cursor.to_list(), ) def _test_unacknowledged_ops(self, client, *ops): @@ -687,56 +736,55 @@ def _test_unacknowledged_ops(self, client, *ops): for f, args, kw in ops: with client.start_session() as s: - listener.results.clear() + listener.reset() # In case "f" modifies its inputs. args = copy.copy(args) kw = copy.copy(kw) kw["session"] = s with self.assertRaises( - ConfigurationError, msg="%s did not raise ConfigurationError" % (f.__name__,) + ConfigurationError, msg=f"{f.__name__} did not raise ConfigurationError" ): f(*args, **kw) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) # Should not run any command before raising an error. - self.assertFalse(listener.results["started"], "%s sent command" % (f.__name__,)) + self.assertFalse(listener.started_events, f"{f.__name__} sent command") self.assertTrue(s.has_ended) # Unacknowledged write without a session does not send an lsid. for f, args, kw in ops: - listener.results.clear() + listener.reset() f(*args, **kw) - self.assertGreaterEqual(len(listener.results["started"]), 1) + self.assertGreaterEqual(len(listener.started_events), 1) if f.__name__ == "create_collection": # create_collection runs listCollections first. - event = listener.results["started"].pop(0) + event = listener.started_events.pop(0) self.assertEqual("listCollections", event.command_name) self.assertIn( "lsid", event.command, - "%s sent no lsid with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent no lsid with {event.command_name}", ) - for event in listener.results["started"]: + for event in listener.started_events: self.assertNotIn( - "lsid", event.command, "%s sent lsid with %s" % (f.__name__, event.command_name) + "lsid", event.command, f"{f.__name__} sent lsid with {event.command_name}" ) def test_unacknowledged_writes(self): # Ensure the collection exists. self.client.pymongo_test.test_unacked_writes.insert_one({}) - client = rs_or_single_client(w=0, event_listeners=[self.listener]) - self.addCleanup(client.close) + client = self.rs_or_single_client(w=0, event_listeners=[self.listener]) db = client.pymongo_test coll = db.test_unacked_writes ops: list = [ @@ -777,48 +825,41 @@ def test_session_not_copyable(self): self.assertRaises(TypeError, lambda: copy.copy(s)) -class TestCausalConsistency(unittest.TestCase): +class TestCausalConsistency(UnitTest): listener: SessionTestListener client: MongoClient - @classmethod - def setUpClass(cls): - cls.listener = SessionTestListener() - cls.client = rs_or_single_client(event_listeners=[cls.listener]) - - @classmethod - def tearDownClass(cls): - cls.client.close() - @client_context.require_sessions def setUp(self): - super(TestCausalConsistency, self).setUp() + super().setUp() + self.listener = SessionTestListener() + self.client = self.rs_or_single_client(event_listeners=[self.listener]) @client_context.require_no_standalone def test_core(self): with self.client.start_session() as sess: self.assertIsNone(sess.cluster_time) self.assertIsNone(sess.operation_time) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one(session=sess) - started = self.listener.results["started"][0] + started = self.listener.started_events[0] cmd = started.command self.assertIsNone(cmd.get("readConcern")) op_time = sess.operation_time self.assertIsNotNone(op_time) - succeeded = self.listener.results["succeeded"][0] + succeeded = self.listener.succeeded_events[0] reply = succeeded.reply self.assertEqual(op_time, reply.get("operationTime")) # No explicit session self.client.pymongo_test.test.insert_one({}) self.assertEqual(sess.operation_time, op_time) - self.listener.results.clear() + self.listener.reset() try: self.client.pymongo_test.command("doesntexist", session=sess) except: pass - failed = self.listener.results["failed"][0] + failed = self.listener.failed_events[0] failed_op_time = failed.failure.get("operationTime") # Some older builds of MongoDB 3.5 / 3.6 return None for # operationTime when a command fails. Make sure we don't @@ -848,14 +889,14 @@ def _test_reads(self, op, exception=None): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() if exception: with self.assertRaises(exception): op(coll, sess) else: op(coll, sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -865,21 +906,26 @@ def _test_reads(self, op, exception=None): def test_reads(self): # Make sure the collection exists. self.client.pymongo_test.test.insert_one({}) - self._test_reads(lambda coll, session: list(coll.aggregate([], session=session))) - self._test_reads(lambda coll, session: list(coll.find({}, session=session))) + + def aggregate(coll, session): + return (coll.aggregate([], session=session)).to_list() + + def aggregate_raw(coll, session): + return (coll.aggregate_raw_batches([], session=session)).to_list() + + def find_raw(coll, session): + return coll.find_raw_batches({}, session=session).to_list() + + self._test_reads(aggregate) + self._test_reads(lambda coll, session: coll.find({}, session=session).to_list()) self._test_reads(lambda coll, session: coll.find_one({}, session=session)) self._test_reads(lambda coll, session: coll.count_documents({}, session=session)) self._test_reads(lambda coll, session: coll.distinct("foo", session=session)) - self._test_reads( - lambda coll, session: list(coll.aggregate_raw_batches([], session=session)) - ) - self._test_reads(lambda coll, session: list(coll.find_raw_batches({}, session=session))) + self._test_reads(aggregate_raw) + self._test_reads(find_raw) - self.assertRaises( - ConfigurationError, - self._test_reads, - lambda coll, session: coll.estimated_document_count(session=session), - ) + with self.assertRaises(ConfigurationError): + self._test_reads(lambda coll, session: coll.estimated_document_count(session=session)) def _test_writes(self, op): coll = self.client.pymongo_test.test @@ -887,10 +933,10 @@ def _test_writes(self, op): op(coll, sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=sess) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -898,7 +944,9 @@ def _test_writes(self, op): @client_context.require_no_standalone def test_writes(self): - self._test_writes(lambda coll, session: coll.bulk_write([InsertOne({})], session=session)) + self._test_writes( + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) + ) self._test_writes(lambda coll, session: coll.insert_one({}, session=session)) self._test_writes(lambda coll, session: coll.insert_many([{}], session=session)) self._test_writes( @@ -936,15 +984,15 @@ def _test_no_read_concern(self, op): coll.find_one({}, session=sess) operation_time = sess.operation_time self.assertIsNotNone(operation_time) - self.listener.results.clear() + self.listener.reset() op(coll, sess) - rc = self.listener.results["started"][0].command.get("readConcern") + rc = self.listener.started_events[0].command.get("readConcern") self.assertIsNone(rc) @client_context.require_no_standalone def test_writes_do_not_include_read_concern(self): self._test_no_read_concern( - lambda coll, session: coll.bulk_write([InsertOne({})], session=session) + lambda coll, session: coll.bulk_write([InsertOne[dict]({})], session=session) ) self._test_no_read_concern(lambda coll, session: coll.insert_one({}, session=session)) self._test_no_read_concern(lambda coll, session: coll.insert_many([{}], session=session)) @@ -982,13 +1030,6 @@ def test_writes_do_not_include_read_concern(self): # Not a write, but explain also doesn't support readConcern. self._test_no_read_concern(lambda coll, session: coll.find({}, session=session).explain()) - @client_context.require_no_standalone - @client_context.require_version_max(4, 1, 0) - def test_aggregate_out_does_not_include_read_concern(self): - self._test_no_read_concern( - lambda coll, session: list(coll.aggregate([{"$out": "aggout"}], session=session)) - ) - @client_context.require_no_standalone def test_get_more_does_not_include_read_concern(self): coll = self.client.pymongo_test.test @@ -999,19 +1040,19 @@ def test_get_more_does_not_include_read_concern(self): coll.insert_many([{}, {}]) cursor = coll.find({}).batch_size(1) next(cursor) - self.listener.results.clear() - list(cursor) - started = self.listener.results["started"][0] + self.listener.reset() + cursor.to_list() + started = self.listener.started_events[0] self.assertEqual(started.command_name, "getMore") self.assertIsNone(started.command.get("readConcern")) def test_session_not_causal(self): with self.client.start_session(causal_consistency=False) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) @@ -1021,32 +1062,31 @@ def test_session_not_causal(self): def test_server_not_causal(self): with self.client.start_session(causal_consistency=True) as s: self.client.pymongo_test.test.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}, session=s) act = ( - self.listener.results["started"][0] + self.listener.started_events[0] .command.get("readConcern", {}) .get("afterClusterTime") ) self.assertIsNone(act) @client_context.require_no_standalone - @client_context.require_no_mmap def test_read_concern(self): with self.client.start_session(causal_consistency=True) as s: coll = self.client.pymongo_test.test coll.insert_one({}, session=s) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertIsNone(read_concern.get("level")) self.assertIsNotNone(read_concern.get("afterClusterTime")) coll = coll.with_options(read_concern=ReadConcern("majority")) - self.listener.results.clear() + self.listener.reset() coll.find_one({}, session=s) - read_concern = self.listener.results["started"][0].command.get("readConcern") + read_concern = self.listener.started_events[0].command.get("readConcern") self.assertIsNotNone(read_concern) self.assertEqual(read_concern.get("level"), "majority") self.assertIsNotNone(read_concern.get("afterClusterTime")) @@ -1054,31 +1094,30 @@ def test_read_concern(self): @client_context.require_no_standalone def test_cluster_time_with_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNotNone(after_cluster_time) @client_context.require_standalone def test_cluster_time_no_server_support(self): self.client.pymongo_test.test.insert_one({}) - self.listener.results.clear() + self.listener.reset() self.client.pymongo_test.test.find_one({}) - after_cluster_time = self.listener.results["started"][0].command.get("$clusterTime") + after_cluster_time = self.listener.started_events[0].command.get("$clusterTime") self.assertIsNone(after_cluster_time) class TestClusterTime(IntegrationTest): def setUp(self): - super(TestClusterTime, self).setUp() - if "$clusterTime" not in client_context.hello: + super().setUp() + if "$clusterTime" not in (client_context.hello): raise SkipTest("$clusterTime not supported") + # Sessions prose test: 3) $clusterTime in commands def test_cluster_time(self): listener = SessionTestListener() - # Prevent heartbeats from updating $clusterTime between operations. - client = rs_or_single_client(event_listeners=[listener], heartbeatFrequencyMS=999999) - self.addCleanup(client.close) + client = self.rs_or_single_client(event_listeners=[listener]) collection = client.pymongo_test.collection # Prepare for tests of find() and aggregate(). collection.insert_many([{} for _ in range(10)]) @@ -1101,7 +1140,7 @@ def insert_and_find(): cursor.close() def insert_and_aggregate(): - cursor = collection.aggregate([], batchSize=1).batch_size(1) + cursor = (collection.aggregate([], batchSize=1)).batch_size(1) for _ in range(5): # Advance the cluster time. collection.insert_one({}) @@ -1109,11 +1148,14 @@ def insert_and_aggregate(): cursor.close() + def aggregate(): + (collection.aggregate([])).to_list() + ops = [ # Tests from Driver Sessions Spec. ("ping", lambda: client.admin.command("ping")), - ("aggregate", lambda: list(collection.aggregate([]))), - ("find", lambda: list(collection.find())), + ("aggregate", lambda: aggregate()), + ("find", lambda: collection.find().to_list()), ("insert_one", lambda: collection.insert_one({})), # Additional PyMongo tests. ("insert_and_find", insert_and_find), @@ -1126,35 +1168,68 @@ def insert_and_aggregate(): ("rename_and_drop", rename_and_drop), ] - for name, f in ops: - listener.results.clear() + for _name, f in ops: + listener.reset() # Call f() twice, insert to advance clusterTime, call f() again. f() f() collection.insert_one({}) f() - self.assertGreaterEqual(len(listener.results["started"]), 1) - for i, event in enumerate(listener.results["started"]): - self.assertTrue( - "$clusterTime" in event.command, - "%s sent no $clusterTime with %s" % (f.__name__, event.command_name), + self.assertGreaterEqual(len(listener.started_events), 1) + for i, event in enumerate(listener.started_events): + self.assertIn( + "$clusterTime", + event.command, + f"{f.__name__} sent no $clusterTime with {event.command_name}", ) if i > 0: - succeeded = listener.results["succeeded"][i - 1] - self.assertTrue( - "$clusterTime" in succeeded.reply, - "%s received no $clusterTime with %s" - % (f.__name__, succeeded.command_name), + succeeded = listener.succeeded_events[i - 1] + self.assertIn( + "$clusterTime", + succeeded.reply, + f"{f.__name__} received no $clusterTime with {succeeded.command_name}", ) self.assertTrue( event.command["$clusterTime"]["clusterTime"] >= succeeded.reply["$clusterTime"]["clusterTime"], - "%s sent wrong $clusterTime with %s" % (f.__name__, event.command_name), + f"{f.__name__} sent wrong $clusterTime with {event.command_name}", ) + # Sessions prose test: 20) Drivers do not gossip `$clusterTime` on SDAM commands + def test_cluster_time_not_used_by_sdam(self): + heartbeat_listener = HeartbeatEventListener() + cmd_listener = OvertCommandListener() + with client_knobs(min_heartbeat_interval=0.01): + c1 = self.single_client( + event_listeners=[heartbeat_listener, cmd_listener], heartbeatFrequencyMS=10 + ) + cluster_time = (c1.admin.command({"ping": 1}))["$clusterTime"] + self.assertEqual(c1._topology.max_cluster_time(), cluster_time) + + # Advance the server's $clusterTime by performing an insert via another client. + self.db.test.insert_one({"advance": "$clusterTime"}) + # Wait until the client C1 processes the next pair of SDAM heartbeat started + succeeded events. + heartbeat_listener.reset() + + def next_heartbeat(): + events = heartbeat_listener.events + for i in range(len(events) - 1): + if isinstance(events[i], monitoring.ServerHeartbeatStartedEvent): + if isinstance(events[i + 1], monitoring.ServerHeartbeatSucceededEvent): + return True + return False + + wait_until(next_heartbeat, "never found pair of heartbeat started + succeeded events") + # Assert that C1's max $clusterTime is still the same and has not been updated by SDAM. + cmd_listener.reset() + c1.admin.command({"ping": 1}) + started = cmd_listener.started_events[0] + self.assertEqual(started.command_name, "ping") + self.assertEqual(started.command["$clusterTime"], cluster_time) + if __name__ == "__main__": unittest.main() diff --git a/test/test_sessions_unified.py b/test/test_sessions_unified.py index 8a6b8bc9bf..3c80c70d38 100644 --- a/test/test_sessions_unified.py +++ b/test/test_sessions_unified.py @@ -13,17 +13,25 @@ # limitations under the License. """Test the Sessions unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] from test import unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sessions") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "sessions") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "sessions") + # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_son.py b/test/test_son.py index 5c1f43594d..36a6834889 100644 --- a/test/test_son.py +++ b/test/test_son.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the son module.""" +from __future__ import annotations import copy import pickle @@ -47,7 +48,7 @@ def test_equality(self): self.assertEqual(a1, SON({"hello": "world"})) self.assertEqual(b2, SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertEqual(b2, dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertEqual(b2, {"hello_": "mike", "mike": "awesome", "hello": "world"}) self.assertNotEqual(a1, b2) self.assertNotEqual(b2, SON((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) @@ -55,7 +56,7 @@ def test_equality(self): # Explicitly test inequality self.assertFalse(a1 != SON({"hello": "world"})) self.assertFalse(b2 != SON((("hello", "world"), ("mike", "awesome"), ("hello_", "mike")))) - self.assertFalse(b2 != dict((("hello_", "mike"), ("mike", "awesome"), ("hello", "world")))) + self.assertFalse(b2 != {"hello_": "mike", "mike": "awesome", "hello": "world"}) # Embedded SON. d4 = SON([("blah", {"foo": SON()})]) @@ -84,7 +85,6 @@ def test_to_dict(self): self.assertEqual(SON, d4["blah"]["foo"].__class__) def test_pickle(self): - simple_son = SON([]) complex_son = SON([("son", simple_son), ("list", [simple_son, simple_son])]) @@ -97,10 +97,10 @@ def test_pickle_backwards_compatability(self): # This string was generated by pickling a SON object in pymongo # version 2.1.1 pickled_with_2_1_1 = ( - "ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" - "c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" - "S'_SON__keys'\np7\n(lp8\nsb." - ).encode("utf8") + b"ccopy_reg\n_reconstructor\np0\n(cbson.son\nSON\np1\n" + b"c__builtin__\ndict\np2\n(dp3\ntp4\nRp5\n(dp6\n" + b"S'_SON__keys'\np7\n(lp8\nsb." + ) son_2_1_1 = pickle.loads(pickled_with_2_1_1) self.assertEqual(son_2_1_1, SON([])) @@ -138,29 +138,23 @@ def test_copying(self): self.assertEqual(id(reflexive_son1), id(reflexive_son1["reflexive"])) def test_iteration(self): - """ - Test __iter__ - """ + """Test __iter__""" # test success case test_son = SON([(1, 100), (2, 200), (3, 300)]) for ele in test_son: self.assertEqual(ele * 100, test_son[ele]) def test_contains_has(self): - """ - has_key and __contains__ - """ + """has_key and __contains__""" test_son = SON([(1, 100), (2, 200), (3, 300)]) self.assertIn(1, test_son) - self.assertTrue(2 in test_son, "in failed") - self.assertFalse(22 in test_son, "in succeeded when it shouldn't") - self.assertTrue(test_son.has_key(2), "has_key failed") # noqa - self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") # noqa + self.assertIn(2, test_son, "in failed") + self.assertNotIn(22, test_son, "in succeeded when it shouldn't") + self.assertTrue(test_son.has_key(2), "has_key failed") + self.assertFalse(test_son.has_key(22), "has_key succeeded when it shouldn't") def test_clears(self): - """ - Test clear() - """ + """Test clear()""" test_son = SON([(1, 100), (2, 200), (3, 300)]) test_son.clear() self.assertNotIn(1, test_son) @@ -169,9 +163,7 @@ def test_clears(self): self.assertEqual({}, test_son.to_dict()) def test_len(self): - """ - Test len - """ + """Test len""" test_son = SON() self.assertEqual(0, len(test_son)) test_son = SON([(1, 100), (2, 200), (3, 300)]) diff --git a/test/test_srv_polling.py b/test/test_srv_polling.py index 0b54171dc9..f5096bea01 100644 --- a/test/test_srv_polling.py +++ b/test/test_srv_polling.py @@ -13,26 +13,31 @@ # limitations under the License. """Run the SRV support tests.""" +from __future__ import annotations +import asyncio import sys -from time import sleep +import time +from test.utils import flaky +from test.utils_shared import FunctionCallRecorder from typing import Any sys.path[0:0] = [""] -from test import client_knobs, unittest -from test.utils import FunctionCallRecorder, wait_until +from test import PyMongoTestCase, client_knobs, unittest +from test.utils import wait_until import pymongo from pymongo import common from pymongo.errors import ConfigurationError -from pymongo.mongo_client import MongoClient -from pymongo.srv_resolver import _HAVE_DNSPYTHON +from pymongo.synchronous.srv_resolver import _have_dnspython + +_IS_SYNC = True WAIT_TIME = 0.1 -class SrvPollingKnobs(object): +class SrvPollingKnobs: def __init__( self, ttl_time=None, @@ -50,7 +55,9 @@ def __init__( def enable(self): self.old_min_srv_rescan_interval = common.MIN_SRV_RESCAN_INTERVAL - self.old_dns_resolver_response = pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl + self.old_dns_resolver_response = ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl + ) if self.min_srv_rescan_interval is not None: common.MIN_SRV_RESCAN_INTERVAL = self.min_srv_rescan_interval @@ -70,23 +77,22 @@ def mock_get_hosts_and_min_ttl(resolver, *args): else: patch_func = mock_get_hosts_and_min_ttl - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = patch_func # type: ignore def __enter__(self): self.enable() def disable(self): common.MIN_SRV_RESCAN_INTERVAL = self.old_min_srv_rescan_interval # type: ignore - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore - self.old_dns_resolver_response # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl = ( # type: ignore + self.old_dns_resolver_response ) def __exit__(self, exc_type, exc_val, exc_tb): self.disable() -class TestSrvPolling(unittest.TestCase): - +class TestSrvPolling(PyMongoTestCase): BASE_SRV_RESPONSE = [ ("localhost.test.build.10gen.cc", 27017), ("localhost.test.build.10gen.cc", 27018), @@ -95,8 +101,6 @@ class TestSrvPolling(unittest.TestCase): CONNECTION_STRING = "mongodb+srv://test1.test.build.10gen.cc" def setUp(self): - if not _HAVE_DNSPYTHON: - raise unittest.SkipTest("SRV polling tests require the dnspython module") # Patch timeouts to ensure short rescan SRV interval. self.client_knobs = client_knobs( heartbeat_frequency=WAIT_TIME, @@ -111,7 +115,7 @@ def tearDown(self): def get_nodelist(self, client): return client._topology.description.server_descriptions().keys() - def assert_nodelist_change(self, expected_nodelist, client): + def assert_nodelist_change(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): """Check if the client._topology eventually sees all nodes in the expected_nodelist. """ @@ -122,27 +126,38 @@ def predicate(): return True return False - wait_until(predicate, "see expected nodelist", timeout=100 * WAIT_TIME) + wait_until(predicate, "see expected nodelist", timeout=timeout) - def assert_nodelist_nochange(self, expected_nodelist, client): + def assert_nodelist_nochange(self, expected_nodelist, client, timeout=(100 * WAIT_TIME)): """Check if the client._topology ever deviates from seeing all nodes in the expected_nodelist. Consistency is checked after sleeping for (WAIT_TIME * 10) seconds. Also check that the resolver is called at least once. """ - sleep(WAIT_TIME * 10) + + def predicate(): + if set(expected_nodelist) == set(self.get_nodelist(client)): + return ( + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count + >= 1 + ) + return False + + wait_until(predicate, "Node list equals expected nodelist", timeout=timeout) + nodelist = self.get_nodelist(client) if set(expected_nodelist) != set(nodelist): msg = "Client nodelist %s changed unexpectedly (expected %s)" raise self.fail(msg % (nodelist, expected_nodelist)) self.assertGreaterEqual( - pymongo.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore + pymongo.synchronous.srv_resolver._SrvResolver.get_hosts_and_min_ttl.call_count, # type: ignore 1, "resolver was never called", ) return True def run_scenario(self, dns_response, expect_change): + self.assertEqual(_have_dnspython(), True) if callable(dns_response): dns_resolver_response = dns_response else: @@ -161,7 +176,8 @@ def dns_resolver_response(): # Patch timeouts to ensure short test running times. with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING) + client = self.simple_client(self.CONNECTION_STRING) + client._connect() self.assert_nodelist_change(self.BASE_SRV_RESPONSE, client) # Patch list of hosts returned by DNS query. with SrvPollingKnobs( @@ -169,6 +185,9 @@ def dns_resolver_response(): ): assertion_method(expected_response, client) + # Close the client early to avoid affecting the next scenario run. + client.close() + def test_addition(self): response = self.BASE_SRV_RESPONSE[:] response.append(("localhost.test.build.10gen.cc", 27019)) @@ -206,6 +225,20 @@ def response_callback(*args): self.run_scenario(response_callback, False) + @flaky(reason="PYTHON-5500", max_runs=3) + def test_dns_failures_logging(self): + from dns import exception + + with self.assertLogs("pymongo.topology", level="DEBUG") as cm: + + def response_callback(*args): + raise exception.Timeout("DNS Failure!") + + self.run_scenario(response_callback, False) + + srv_failure_logs = [r for r in cm.records if "SRV monitor check failed" in r.getMessage()] + self.assertEqual(len(srv_failure_logs), 1) + def test_dns_record_lookup_empty(self): response: list = [] self.run_scenario(response, False) @@ -225,7 +258,8 @@ def final_callback(): count_resolver_calls=True, ): # Client uses unpatched method to get initial nodelist - client = MongoClient(self.CONNECTION_STRING) + client = self.simple_client(self.CONNECTION_STRING) + client._connect() # Invalid DNS resolver response should not change nodelist. self.assert_nodelist_nochange(self.BASE_SRV_RESPONSE, client) @@ -235,12 +269,14 @@ def final_callback(): # Nodelist should reflect new valid DNS resolver response. self.assert_nodelist_change(response_final, client) + @flaky(reason="PYTHON-5315") def test_recover_from_initially_empty_seedlist(self): def empty_seedlist(): return [] self._test_recover_from_initial(empty_seedlist) + @flaky(reason="PYTHON-5315") def test_recover_from_initially_erroring_seedlist(self): def erroring_seedlist(): raise ConfigurationError @@ -258,8 +294,8 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=0) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=0) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -273,8 +309,8 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) @@ -289,20 +325,20 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=2) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=2) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): - sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) + time.sleep(2 * common.MIN_SRV_RESCAN_INTERVAL) final_topology = set(client.topology_description.server_descriptions()) self.assertIn(("localhost.test.build.10gen.cc", 27017), final_topology) self.assertEqual(len(final_topology), 2) def test_does_not_flipflop(self): with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient(self.CONNECTION_STRING, srvMaxHosts=1) - self.addCleanup(client.close) + client = self.simple_client(self.CONNECTION_STRING, srvMaxHosts=1) + client._connect() old = set(client.topology_description.server_descriptions()) - sleep(4 * WAIT_TIME) + time.sleep(4 * WAIT_TIME) new = set(client.topology_description.server_descriptions()) self.assertSetEqual(old, new) @@ -317,12 +353,35 @@ def nodelist_callback(): return response with SrvPollingKnobs(ttl_time=WAIT_TIME, min_srv_rescan_interval=WAIT_TIME): - client = MongoClient( + client = self.simple_client( "mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname" ) + client._connect() with SrvPollingKnobs(nodelist_callback=nodelist_callback): self.assert_nodelist_change(response, client) + def test_srv_waits_to_poll(self): + modified = [("localhost.test.build.10gen.cc", 27019)] + + def resolver_response(): + return modified + + with SrvPollingKnobs( + ttl_time=WAIT_TIME, + min_srv_rescan_interval=WAIT_TIME, + nodelist_callback=resolver_response, + ): + client = self.simple_client(self.CONNECTION_STRING) + client._connect() + with self.assertRaises(AssertionError): + self.assert_nodelist_change(modified, client, timeout=WAIT_TIME / 2) + + def test_import_dns_resolver(self): + # Regression test for PYTHON-4407 + import dns.resolver + + self.assertTrue(hasattr(dns.resolver, "resolve") or hasattr(dns.resolver, "query")) + if __name__ == "__main__": unittest.main() diff --git a/test/test_ssl.py b/test/test_ssl.py index 9b58c2251b..b1e9a65eb5 100644 --- a/test/test_ssl.py +++ b/test/test_ssl.py @@ -13,39 +13,48 @@ # limitations under the License. """Tests for SSL support.""" +from __future__ import annotations import os +import pathlib import socket import sys -from typing import Any sys.path[0:0] = [""] -from test import HAVE_IPADDRESS, IntegrationTest, SkipTest, client_context, unittest -from test.utils import ( +from test import ( + HAVE_IPADDRESS, + IntegrationTest, + PyMongoTestCase, + SkipTest, + client_context, + connected, + remove_all_users, + unittest, +) +from test.utils_shared import ( EventListener, + OvertCommandListener, cat_files, - connected, ignore_deprecations, - remove_all_users, ) from urllib.parse import quote_plus from pymongo import MongoClient, ssl_support from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure from pymongo.hello import HelloCompat -from pymongo.ssl_support import HAVE_SSL, _ssl, get_ssl_context +from pymongo.ssl_support import HAVE_PYSSL, HAVE_SSL, _ssl, get_ssl_context from pymongo.write_concern import WriteConcern _HAVE_PYOPENSSL = False try: # All of these must be available to use PyOpenSSL - import OpenSSL # noqa - import requests # noqa - import service_identity # noqa + import OpenSSL + import requests + import service_identity # Ensure service_identity>=18.1 is installed - from service_identity.pyopenssl import verify_ip_address # noqa + from service_identity.pyopenssl import verify_ip_address from pymongo.ocsp_support import _load_trusted_ca_certs @@ -57,7 +66,13 @@ if HAVE_SSL: import ssl -CERT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "certificates") +_IS_SYNC = True + +if _IS_SYNC: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent, "certificates") +else: + CERT_PATH = os.path.join(pathlib.Path(__file__).resolve().parent.parent, "certificates") + CLIENT_PEM = os.path.join(CERT_PATH, "client.pem") CLIENT_ENCRYPTED_PEM = os.path.join(CERT_PATH, "password_protected.pem") CA_PEM = os.path.join(CERT_PATH, "ca.pem") @@ -76,50 +91,50 @@ # use 'localhost' for the hostname of all hosts. -class TestClientSSL(unittest.TestCase): +class TestClientSSL(PyMongoTestCase): @unittest.skipIf(HAVE_SSL, "The ssl module is available, can't test what happens without it.") def test_no_ssl_module(self): # Explicit - self.assertRaises(ConfigurationError, MongoClient, ssl=True) + self.assertRaises(ConfigurationError, self.simple_client, ssl=True) # Implied - self.assertRaises(ConfigurationError, MongoClient, tlsCertificateKeyFile=CLIENT_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tlsCertificateKeyFile=CLIENT_PEM) @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") @ignore_deprecations def test_config_ssl(self): # Tests various ssl configurations - self.assertRaises(ValueError, MongoClient, ssl="foo") + self.assertRaises(ValueError, self.simple_client, ssl="foo") self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM ) - self.assertRaises(TypeError, MongoClient, ssl=0) - self.assertRaises(TypeError, MongoClient, ssl=5.5) - self.assertRaises(TypeError, MongoClient, ssl=[]) + self.assertRaises(TypeError, self.simple_client, ssl=0) + self.assertRaises(TypeError, self.simple_client, ssl=5.5) + self.assertRaises(TypeError, self.simple_client, ssl=[]) - self.assertRaises(IOError, MongoClient, tlsCertificateKeyFile="NoSuchFile") - self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=True) - self.assertRaises(TypeError, MongoClient, tlsCertificateKeyFile=[]) + self.assertRaises(IOError, self.simple_client, tlsCertificateKeyFile="NoSuchFile") + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=True) + self.assertRaises(TypeError, self.simple_client, tlsCertificateKeyFile=[]) # Test invalid combinations self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsCertificateKeyFile=CLIENT_PEM + ConfigurationError, self.simple_client, tls=False, tlsCertificateKeyFile=CLIENT_PEM ) - self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCAFile=CA_PEM) - self.assertRaises(ConfigurationError, MongoClient, tls=False, tlsCRLFile=CRL_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCAFile=CA_PEM) + self.assertRaises(ConfigurationError, self.simple_client, tls=False, tlsCRLFile=CRL_PEM) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsAllowInvalidCertificates=False + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidCertificates=False ) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsAllowInvalidHostnames=False + ConfigurationError, self.simple_client, tls=False, tlsAllowInvalidHostnames=False ) self.assertRaises( - ConfigurationError, MongoClient, tls=False, tlsDisableOCSPEndpointCheck=False + ConfigurationError, self.simple_client, tls=False, tlsDisableOCSPEndpointCheck=False ) @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") def test_use_pyopenssl_when_available(self): - self.assertTrue(_ssl.IS_PYOPENSSL) + self.assertTrue(HAVE_PYSSL) @unittest.skipUnless(_HAVE_PYOPENSSL, "Cannot test without PyOpenSSL") def test_load_trusted_ca_certs(self): @@ -136,56 +151,56 @@ def assertClientWorks(self, client): ) coll.drop() coll.insert_one({"ssl": True}) - self.assertTrue(coll.find_one()["ssl"]) + self.assertTrue((coll.find_one())["ssl"]) coll.drop() - @classmethod @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") - def setUpClass(cls): - super(TestSSL, cls).setUpClass() + def setUp(self): + super().setUp() # MongoClient should connect to the primary by default. - cls.saved_port = MongoClient.PORT + self.saved_port = MongoClient.PORT MongoClient.PORT = client_context.port - @classmethod - def tearDownClass(cls): - MongoClient.PORT = cls.saved_port - super(TestSSL, cls).tearDownClass() + def tearDown(self): + MongoClient.PORT = self.saved_port @client_context.require_tls def test_simple_ssl(self): + if "PyPy" in sys.version: + self.skipTest("Test is flaky on PyPy") # Expects the server to be running with ssl and with # no --sslPEMKeyFile or with --sslWeakCertificateValidation self.assertClientWorks(self.client) @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_tlsCertificateKeyFilePassword(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - if not hasattr(ssl, "SSLContext") and not _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "SSLContext") and not HAVE_PYSSL: self.assertRaises( ConfigurationError, - MongoClient, + self.simple_client, "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_ENCRYPTED_PEM, tlsCertificateKeyFilePassword="qwerty", tlsCAFile=CA_PEM, serverSelectionTimeoutMS=5000, - **self.credentials # type: ignore[arg-type] + **self.credentials, # type: ignore[arg-type] ) ) @@ -195,7 +210,7 @@ def test_tlsCertificateKeyFilePassword(self): "&tlsCAFile=%s&serverSelectionTimeoutMS=5000" ) connected( - MongoClient(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + self.simple_client(uri_fmt % (CLIENT_ENCRYPTED_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) @client_context.require_tlsCertificateKeyFile @@ -209,7 +224,7 @@ def test_cert_ssl_implicitly_set(self): # # test that setting tlsCertificateKeyFile causes ssl to be set to True - client = MongoClient( + client = self.simple_client( client_context.host, client_context.port, tlsAllowInvalidCertificates=True, @@ -217,7 +232,7 @@ def test_cert_ssl_implicitly_set(self): ) response = client.admin.command(HelloCompat.LEGACY_CMD) if "setName" in response: - client = MongoClient( + client = self.simple_client( client_context.pair, replicaSet=response["setName"], w=len(response["hosts"]), @@ -236,7 +251,7 @@ def test_cert_ssl_validation(self): # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem # - client = MongoClient( + client = self.simple_client( "localhost", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -251,7 +266,7 @@ def test_cert_ssl_validation(self): "Cannot validate hostname in the certificate" ) - client = MongoClient( + client = self.simple_client( "localhost", replicaSet=response["setName"], w=len(response["hosts"]), @@ -264,7 +279,7 @@ def test_cert_ssl_validation(self): self.assertClientWorks(client) if HAVE_IPADDRESS: - client = MongoClient( + client = self.simple_client( "127.0.0.1", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -286,43 +301,50 @@ def test_cert_ssl_uri_support(self): "mongodb://localhost/?ssl=true&tlsCertificateKeyFile=%s&tlsAllowInvalidCertificates" "=%s&tlsCAFile=%s&tlsAllowInvalidHostnames=false" ) - client = MongoClient(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) + client = self.simple_client(uri_fmt % (CLIENT_PEM, "true", CA_PEM)) self.assertClientWorks(client) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @client_context.require_no_api_version @ignore_deprecations def test_cert_ssl_validation_hostname_matching(self): # Expects the server to be running with server.pem and ca.pem # # --sslPEMKeyFile=/path/to/pymongo/test/certificates/server.pem # --sslCAFile=/path/to/pymongo/test/certificates/ca.pem - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, True, False, False) + ctx = get_ssl_context(None, None, None, None, True, False, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, True, False) + ctx = get_ssl_context(None, None, None, None, False, True, False, _IS_SYNC) self.assertFalse(ctx.check_hostname) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) self.assertTrue(ctx.check_hostname) response = self.client.admin.command(HelloCompat.LEGACY_CMD) - with self.assertRaises(ConnectionFailure): + with self.assertRaises(ConnectionFailure) as cm: connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, tlsAllowInvalidCertificates=False, tlsCAFile=CA_PEM, serverSelectionTimeoutMS=500, - **self.credentials # type: ignore[arg-type] + **self.credentials, # type: ignore[arg-type] ) ) + # PYTHON-5414 Check for "module service_identity has no attribute SICertificateError" + self.assertNotIn("has no attribute", str(cm.exception)) connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsCertificateKeyFile=CLIENT_PEM, @@ -330,14 +352,14 @@ def test_cert_ssl_validation_hostname_matching(self): tlsCAFile=CA_PEM, tlsAllowInvalidHostnames=True, serverSelectionTimeoutMS=500, - **self.credentials # type: ignore[arg-type] + **self.credentials, # type: ignore[arg-type] ) ) if "setName" in response: with self.assertRaises(ConnectionFailure): connected( - MongoClient( + self.simple_client( "server", replicaSet=response["setName"], ssl=True, @@ -345,12 +367,12 @@ def test_cert_ssl_validation_hostname_matching(self): tlsAllowInvalidCertificates=False, tlsCAFile=CA_PEM, serverSelectionTimeoutMS=500, - **self.credentials # type: ignore[arg-type] + **self.credentials, # type: ignore[arg-type] ) ) connected( - MongoClient( + self.simple_client( "server", replicaSet=response["setName"], ssl=True, @@ -359,60 +381,67 @@ def test_cert_ssl_validation_hostname_matching(self): tlsCAFile=CA_PEM, tlsAllowInvalidHostnames=True, serverSelectionTimeoutMS=500, - **self.credentials # type: ignore[arg-type] + **self.credentials, # type: ignore[arg-type] ) ) @client_context.require_tlsCertificateKeyFile + @client_context.require_sync + @client_context.require_no_api_version @ignore_deprecations def test_tlsCRLFile_support(self): - if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or _ssl.IS_PYOPENSSL: + if not hasattr(ssl, "VERIFY_CRL_CHECK_LEAF") or HAVE_PYSSL: self.assertRaises( ConfigurationError, - MongoClient, + self.simple_client, "localhost", ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) else: connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCAFile=CA_PEM, - serverSelectionTimeoutMS=100, - **self.credentials # type: ignore[arg-type] + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] ) ) with self.assertRaises(ConnectionFailure): connected( - MongoClient( + self.simple_client( "localhost", ssl=True, tlsCAFile=CA_PEM, tlsCRLFile=CRL_PEM, - serverSelectionTimeoutMS=100, - **self.credentials # type: ignore[arg-type] + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] ) ) - uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=100" - connected(MongoClient(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore + uri_fmt = "mongodb://localhost/?ssl=true&tlsCAFile=%s&serverSelectionTimeoutMS=1000" + connected(self.simple_client(uri_fmt % (CA_PEM,), **self.credentials)) # type: ignore uri_fmt = ( "mongodb://localhost/?ssl=true&tlsCRLFile=%s" - "&tlsCAFile=%s&serverSelectionTimeoutMS=100" + "&tlsCAFile=%s&serverSelectionTimeoutMS=1000" ) with self.assertRaises(ConnectionFailure): connected( - MongoClient(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] + self.simple_client(uri_fmt % (CRL_PEM, CA_PEM), **self.credentials) # type: ignore[arg-type] ) + @unittest.skipIf( + "PyPy" in sys.version and not _IS_SYNC, + "https://github.com/pypy/pypy/issues/5131 flaky on async PyPy due to SSL EOF", + ) @client_context.require_tlsCertificateKeyFile @client_context.require_server_resolvable + @client_context.require_no_api_version @ignore_deprecations def test_validation_with_system_ca_certs(self): # Expects the server to be running with server.pem and ca.pem. @@ -425,40 +454,44 @@ def test_validation_with_system_ca_certs(self): with self.assertRaises(ConnectionFailure): # Server cert is verified but hostname matching fails connected( - MongoClient("server", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + self.simple_client( + "server", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] ) # Server cert is verified. Disable hostname matching. connected( - MongoClient( + self.simple_client( "server", ssl=True, tlsAllowInvalidHostnames=True, - serverSelectionTimeoutMS=100, - **self.credentials # type: ignore[arg-type] + serverSelectionTimeoutMS=1000, + **self.credentials, # type: ignore[arg-type] ) ) # Server cert and hostname are verified. connected( - MongoClient("localhost", ssl=True, serverSelectionTimeoutMS=100, **self.credentials) # type: ignore[arg-type] + self.simple_client( + "localhost", ssl=True, serverSelectionTimeoutMS=1000, **self.credentials + ) # type: ignore[arg-type] ) # Server cert and hostname are verified. connected( - MongoClient( - "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=100", **self.credentials # type: ignore[arg-type] + self.simple_client( + "mongodb://localhost/?ssl=true&serverSelectionTimeoutMS=1000", + **self.credentials, # type: ignore[arg-type] ) ) def test_system_certs_config_error(self): - ctx = get_ssl_context(None, None, None, None, True, True, False) + ctx = get_ssl_context(None, None, None, None, True, True, False, _IS_SYNC) if (sys.platform != "win32" and hasattr(ctx, "set_default_verify_paths")) or hasattr( ctx, "load_default_certs" ): raise SkipTest("Can't test when system CA certificates are loadable.") - ssl_support: Any have_certifi = ssl_support.HAVE_CERTIFI have_wincertstore = ssl_support.HAVE_WINCERTSTORE # Force the test regardless of environment. @@ -466,7 +499,7 @@ def test_system_certs_config_error(self): ssl_support.HAVE_WINCERTSTORE = False try: with self.assertRaises(ConfigurationError): - MongoClient("mongodb://localhost/?ssl=true") + self.simple_client("mongodb://localhost/?ssl=true") finally: ssl_support.HAVE_CERTIFI = have_certifi ssl_support.HAVE_WINCERTSTORE = have_wincertstore @@ -477,7 +510,6 @@ def test_certifi_support(self): # with SSLContext and SSLContext provides no information # about ca_certs. raise SkipTest("Can't test when SSLContext available.") - ssl_support: Any if not ssl_support.HAVE_CERTIFI: raise SkipTest("Need certifi to test certifi support.") @@ -485,11 +517,11 @@ def test_certifi_support(self): # Force the test on Windows, regardless of environment. ssl_support.HAVE_WINCERTSTORE = False try: - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support.certifi.where()) finally: @@ -506,16 +538,17 @@ def test_wincertstore(self): if not ssl_support.HAVE_WINCERTSTORE: raise SkipTest("Need wincertstore to test wincertstore.") - ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False) + ctx = get_ssl_context(None, None, CA_PEM, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, CA_PEM) - ctx = get_ssl_context(None, None, None, None, False, False, False) + ctx = get_ssl_context(None, None, None, None, False, False, False, _IS_SYNC) ssl_sock = ctx.wrap_socket(socket.socket()) self.assertEqual(ssl_sock.ca_certs, ssl_support._WINCERTS.name) @client_context.require_auth @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_mongodb_x509_auth(self): host, port = client_context.host, client_context.port @@ -531,19 +564,18 @@ def test_mongodb_x509_auth(self): ], ) - noauth = MongoClient( + noauth = self.simple_client( client_context.pair, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, ) - self.addCleanup(noauth.close) with self.assertRaises(OperationFailure): noauth.pymongo_test.test.find_one() listener = EventListener() - auth = MongoClient( + auth = self.simple_client( client_context.pair, authMechanism="MONGODB-X509", ssl=True, @@ -551,7 +583,6 @@ def test_mongodb_x509_auth(self): tlsCertificateKeyFile=CLIENT_PEM, event_listeners=[listener], ) - self.addCleanup(auth.close) # No error auth.pymongo_test.test.find_one() @@ -567,18 +598,16 @@ def test_mongodb_x509_auth(self): host, port, ) - client = MongoClient( + client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() uri = "mongodb://%s:%d/?authMechanism=MONGODB-X509" % (host, port) - client = MongoClient( + client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(client.close) # No error client.pymongo_test.test.find_one() # Auth should fail if username and certificate do not match @@ -588,15 +617,14 @@ def test_mongodb_x509_auth(self): port, ) - bad_client = MongoClient( + bad_client = self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM ) - self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() - bad_client = MongoClient( + bad_client = self.simple_client( client_context.pair, username="not the username", authMechanism="MONGODB-X509", @@ -604,7 +632,6 @@ def test_mongodb_x509_auth(self): tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CLIENT_PEM, ) - self.addCleanup(bad_client.close) with self.assertRaises(OperationFailure): bad_client.pymongo_test.test.find_one() @@ -617,12 +644,12 @@ def test_mongodb_x509_auth(self): ) try: connected( - MongoClient( + self.simple_client( uri, ssl=True, tlsAllowInvalidCertificates=True, tlsCertificateKeyFile=CA_PEM, - serverSelectionTimeoutMS=100, + serverSelectionTimeoutMS=1000, ) ) except (ConnectionFailure, ConfigurationError): @@ -631,6 +658,7 @@ def test_mongodb_x509_auth(self): self.fail("Invalid certificate accepted.") @client_context.require_tlsCertificateKeyFile + @client_context.require_no_api_version @ignore_deprecations def test_connect_with_ca_bundle(self): def remove(path): @@ -643,11 +671,19 @@ def remove(path): self.addCleanup(remove, temp_ca_bundle) # Add the CA cert file to the bundle. cat_files(temp_ca_bundle, CA_BUNDLE_PEM, CA_PEM) - with MongoClient( + with self.simple_client( "localhost", tls=True, tlsCertificateKeyFile=CLIENT_PEM, tlsCAFile=temp_ca_bundle ) as client: self.assertTrue(client.admin.command("ping")) + @client_context.require_async + @unittest.skipUnless(_HAVE_PYOPENSSL, "PyOpenSSL is not available.") + @unittest.skipUnless(HAVE_SSL, "The ssl module is not available.") + def test_pyopenssl_ignored_in_async(self): + client = MongoClient("mongodb://localhost:27017?tls=true&tlsAllowInvalidCertificates=true") + client.admin.command("ping") # command doesn't matter, just needs it to connect + client.close() + if __name__ == "__main__": unittest.main() diff --git a/test/test_streaming_protocol.py b/test/test_streaming_protocol.py index 72df717901..927230091f 100644 --- a/test/test_streaming_protocol.py +++ b/test/test_streaming_protocol.py @@ -13,6 +13,7 @@ # limitations under the License. """Test the database module.""" +from __future__ import annotations import sys import time @@ -20,29 +21,28 @@ sys.path[0:0] = [""] from test import IntegrationTest, client_context, unittest -from test.utils import ( +from test.utils_shared import ( HeartbeatEventListener, ServerEventListener, - rs_or_single_client, - single_client, wait_until, ) from pymongo import monitoring from pymongo.hello import HelloCompat +_IS_SYNC = True + class TestStreamingProtocol(IntegrationTest): @client_context.require_failCommand_appName def test_failCommand_streaming(self): listener = ServerEventListener() hb_listener = HeartbeatEventListener() - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName="failingHeartbeatTest", ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") address = client.address @@ -79,7 +79,7 @@ def marked_unknown(): def rediscovered(): return len(listener.matching(_discovered_node)) >= 1 - # Topology events are published asynchronously + # Topology events are not published synchronously wait_until(marked_unknown, "mark node unknown") wait_until(rediscovered, "rediscover node") @@ -106,10 +106,9 @@ def test_streaming_rtt(self): }, } with self.fail_point(delay_hello): - client = rs_or_single_client( + client = self.rs_or_single_client( event_listeners=[listener, hb_listener], heartbeatFrequencyMS=500, appName=name ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") address = client.address @@ -122,6 +121,7 @@ def rtt_exceeds_250_ms(): # XXX: Add a public TopologyDescription getter to MongoClient? topology = client._topology sd = topology.description.server_descriptions()[address] + assert sd.round_trip_time is not None return sd.round_trip_time > 0.250 wait_until(rtt_exceeds_250_ms, "exceed 250ms RTT") @@ -139,11 +139,10 @@ def changed_event(event): self.assertEqual(1, len(events)) self.assertGreater(events[0].new_description.round_trip_time, 0) - @client_context.require_version_min(4, 9, -1) @client_context.require_failCommand_appName def test_monitor_waits_after_server_check_error(self): # This test implements: - # https://github.com/mongodb/specifications/blob/6c5b2ac/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.rst#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks + # https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-tests.md#monitors-sleep-at-least-minheartbeatfreqencyms-between-checks fail_hello = { "mode": {"times": 5}, "data": { @@ -154,10 +153,9 @@ def test_monitor_waits_after_server_check_error(self): } with self.fail_point(fail_hello): start = time.time() - client = single_client( + client = self.single_client( appName="SDAMMinHeartbeatFrequencyTest", serverSelectionTimeoutMS=5000 ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") duration = time.time() - start @@ -174,17 +172,16 @@ def test_monitor_waits_after_server_check_error(self): # 2504ms: application handshake succeeds # 2505ms: ping command succeeds self.assertGreaterEqual(duration, 2) - self.assertLessEqual(duration, 3.5) + self.assertLessEqual(duration, 4.0) @client_context.require_failCommand_appName def test_heartbeat_awaited_flag(self): hb_listener = HeartbeatEventListener() - client = single_client( + client = self.single_client( event_listeners=[hb_listener], heartbeatFrequencyMS=500, appName="heartbeatEventAwaitedFlag", ) - self.addCleanup(client.close) # Force a connection. client.admin.command("ping") diff --git a/test/test_threads.py b/test/test_threads.py index 2c73de52e7..3e469e28fe 100644 --- a/test/test_threads.py +++ b/test/test_threads.py @@ -13,6 +13,7 @@ # limitations under the License. """Test that pymongo is thread safe.""" +from __future__ import annotations import threading from test import IntegrationTest, client_context, unittest @@ -30,7 +31,7 @@ def __init__(self, collection, num): self.coll = collection self.num = num self.success = False - self.setDaemon(True) + self.daemon = True def run(self): for i in range(self.num): @@ -44,7 +45,7 @@ class SaveAndFind(threading.Thread): def __init__(self, collection): threading.Thread.__init__(self) self.collection = collection - self.setDaemon(True) + self.daemon = True self.passed = False def run(self): @@ -62,7 +63,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -85,7 +86,7 @@ def __init__(self, collection, n, expect_exception): self.collection = collection self.n = n self.expect_exception = expect_exception - self.setDaemon(True) + self.daemon = True def run(self): for _ in range(self.n): @@ -104,6 +105,7 @@ def run(self): class TestThreads(IntegrationTest): def setUp(self): + super().setUp() self.db = self.client.pymongo_test def test_threading(self): @@ -111,7 +113,7 @@ def test_threading(self): self.db.test.insert_many([{"x": i} for i in range(1000)]) threads = [] - for i in range(10): + for _i in range(10): t = SaveAndFind(self.db.test) t.start() threads.append(t) diff --git a/test/test_timestamp.py b/test/test_timestamp.py index 3602fe2808..ef7d8bde15 100644 --- a/test/test_timestamp.py +++ b/test/test_timestamp.py @@ -13,6 +13,7 @@ # limitations under the License. """Tests for the Timestamp class.""" +from __future__ import annotations import copy import datetime @@ -32,7 +33,7 @@ def test_timestamp(self): t = Timestamp(123, 456) self.assertEqual(t.time, 123) self.assertEqual(t.inc, 456) - self.assertTrue(isinstance(t, Timestamp)) + self.assertIsInstance(t, Timestamp) def test_datetime(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) diff --git a/test/test_topology.py b/test/test_topology.py index d7bae9229f..d3bbcd9060 100644 --- a/test/test_topology.py +++ b/test/test_topology.py @@ -13,27 +13,32 @@ # limitations under the License. """Test the topology module.""" +from __future__ import annotations import sys +from pymongo.operations import _Op + sys.path[0:0] = [""] from test import client_knobs, unittest from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, wait_until +from test.utils import MockPool, flaky +from test.utils_shared import wait_until from bson.objectid import ObjectId from pymongo import common from pymongo.errors import AutoReconnect, ConfigurationError, ConnectionFailure from pymongo.hello import Hello, HelloCompat -from pymongo.monitor import Monitor -from pymongo.pool import PoolOptions -from pymongo.read_preferences import ReadPreference, Secondary +from pymongo.read_preferences import Primary, ReadPreference, Secondary from pymongo.server_description import ServerDescription from pymongo.server_selectors import any_server_selector, writable_server_selector from pymongo.server_type import SERVER_TYPE -from pymongo.settings import TopologySettings -from pymongo.topology import Topology, _ErrorContext +from pymongo.synchronous.monitor import Monitor +from pymongo.synchronous.pool import PoolOptions +from pymongo.synchronous.server import Server +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology, _ErrorContext, _filter_servers from pymongo.topology_description import TOPOLOGY_TYPE @@ -46,13 +51,16 @@ def get_topology_type(self): def create_mock_topology( - seeds=None, replica_set_name=None, monitor_class=DummyMonitor, direct_connection=False + seeds=None, + replica_set_name=None, + monitor_class=DummyMonitor, + direct_connection=False, ): partitioned_seeds = list(map(common.partition_node, seeds or ["a"])) topology_settings = TopologySettings( partitioned_seeds, replica_set_name=replica_set_name, - pool_class=MockPool, + pool_class=MockPool, # type: ignore[arg-type] monitor_class=monitor_class, direct_connection=direct_connection, ) @@ -89,7 +97,7 @@ class TopologyTest(unittest.TestCase): """Disables periodic monitoring, to make tests deterministic.""" def setUp(self): - super(TopologyTest, self).setUp() + super().setUp() self.client_knobs = client_knobs(heartbeat_frequency=999999) self.client_knobs.enable() self.addCleanup(self.client_knobs.disable) @@ -116,7 +124,26 @@ def test_timeout_configuration(self): self.assertEqual(1, monitor._pool.opts.socket_timeout) # The monitor, not its pool, is responsible for calling hello. - self.assertFalse(monitor._pool.handshake) + self.assertTrue(monitor._pool.is_sdam) + + def test_selector_fast_path(self): + topology = create_mock_topology(seeds=["a", "b:27018"], replica_set_name="foo") + description = topology.description + description._topology_type = TOPOLOGY_TYPE.ReplicaSetWithPrimary + + # There is no primary yet, so it should give an empty list. + self.assertEqual(description.apply_selector(Primary()), []) + + # If we set a primary server, we should get it back. + sd = list(description._server_descriptions.values())[0] + sd._server_type = SERVER_TYPE.RSPrimary + self.assertEqual(description.apply_selector(Primary()), [sd]) + + # If there is a custom selector, it should be applied. + def custom_selector(servers): + return [] + + self.assertEqual(description.apply_selector(Primary(), custom_selector=custom_selector), []) class TestSingleServerTopology(TopologyTest): @@ -129,7 +156,7 @@ def test_direct_connection(self): HelloCompat.LEGACY_CMD: True, "hosts": ["a"], "setName": "rs", - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, }, ), ( @@ -140,12 +167,17 @@ def test_direct_connection(self): "secondary": True, "hosts": ["a"], "setName": "rs", - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, }, ), ( SERVER_TYPE.Mongos, - {"ok": 1, HelloCompat.LEGACY_CMD: True, "msg": "isdbgrid", "maxWireVersion": 6}, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "msg": "isdbgrid", + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, ), ( SERVER_TYPE.RSArbiter, @@ -155,20 +187,34 @@ def test_direct_connection(self): "arbiterOnly": True, "hosts": ["a"], "setName": "rs", - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), + ( + SERVER_TYPE.Standalone, + { + "ok": 1, + HelloCompat.LEGACY_CMD: True, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, }, ), - (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: True, "maxWireVersion": 6}), # A "slave" in a master-slave deployment. # This replication type was removed in MongoDB # 4.0. - (SERVER_TYPE.Standalone, {"ok": 1, HelloCompat.LEGACY_CMD: False, "maxWireVersion": 6}), + ( + SERVER_TYPE.Standalone, + { + "ok": 1, + HelloCompat.LEGACY_CMD: False, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, + }, + ), ]: t = create_mock_topology(direct_connection=True) # Can't select a server while the only server is of type Unknown. with self.assertRaisesRegex(ConnectionFailure, "No servers found yet"): - t.select_servers(any_server_selector, server_selection_timeout=0) + t.select_servers(any_server_selector, _Op.TEST, server_selection_timeout=0) got_hello(t, address, hello_response) @@ -177,7 +223,7 @@ def test_direct_connection(self): # No matter whether the server is writable, # select_servers() returns it. - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) self.assertEqual(server_type, s.description.server_type) # Topology type single is always readable and writable regardless @@ -209,13 +255,16 @@ def test_round_trip_time(self): class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): if available: - return (Hello({"ok": 1, "maxWireVersion": 6}), round_trip_time) + return ( + Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), + round_trip_time, + ) else: raise AutoReconnect("mock monitor error") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) self.assertEqual(125, s.description.round_trip_time) round_trip_time = 25 @@ -230,7 +279,7 @@ def _check_with_socket(self, *args, **kwargs): def raises_err(): try: - t.select_server(writable_server_selector, server_selection_timeout=0.1) + t.select_server(writable_server_selector, _Op.TEST, server_selection_timeout=0.1) except ConnectionFailure: return True else: @@ -451,7 +500,9 @@ def test_discover_set_name_from_primary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -479,7 +530,9 @@ def test_discover_set_name_from_secondary(self): # Discovering a replica set without the setName supplied by the user # is not yet supported by MongoClient, but Topology can do it. topology_settings = SetNameDiscoverySettings( - seeds=[address], pool_class=MockPool, monitor_class=DummyMonitor + seeds=[address], + pool_class=MockPool, # type: ignore[arg-type] + monitor_class=DummyMonitor, # type: ignore[arg-type] ) t = Topology(topology_settings) @@ -523,13 +576,13 @@ def test_wire_version(self): "setName": "rs", "hosts": ["a"], "minWireVersion": 1, - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, }, ) self.assertEqual(server.description.min_wire_version, 1) - self.assertEqual(server.description.max_wire_version, 6) - t.select_servers(any_server_selector) + self.assertEqual(server.description.max_wire_version, 8) + t.select_servers(any_server_selector, _Op.TEST) # Incompatible. got_hello( @@ -540,18 +593,18 @@ def test_wire_version(self): HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a"], - "minWireVersion": 21, - "maxWireVersion": 22, + "minWireVersion": 26, + "maxWireVersion": 27, }, ) try: - t.select_servers(any_server_selector) + t.select_servers(any_server_selector, _Op.TEST) except ConfigurationError as e: # Error message should say which server failed and why. self.assertEqual( str(e), - "Server at a:27017 requires wire version 21, but this version " + "Server at a:27017 requires wire version 26, but this version " "of PyMongo only supports up to %d." % (common.MAX_SUPPORTED_WIRE_VERSION,), ) else: @@ -572,7 +625,7 @@ def test_wire_version(self): ) try: - t.select_servers(any_server_selector) + t.select_servers(any_server_selector, _Op.TEST) except ConfigurationError as e: # Error message should say which server failed and why. self.assertEqual( @@ -588,7 +641,7 @@ def test_max_write_batch_size(self): t = create_mock_topology(seeds=["a", "b"], replica_set_name="rs") def write_batch_size(): - s = t.select_server(writable_server_selector) + s = t.select_server(writable_server_selector, _Op.TEST) return s.description.max_write_batch_size got_hello( @@ -599,7 +652,7 @@ def write_batch_size(): HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"], - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, "maxWriteBatchSize": 1, }, ) @@ -613,7 +666,7 @@ def write_batch_size(): "secondary": True, "setName": "rs", "hosts": ["a", "b"], - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, "maxWriteBatchSize": 2, }, ) @@ -630,7 +683,7 @@ def write_batch_size(): HelloCompat.LEGACY_CMD: True, "setName": "rs", "hosts": ["a", "b"], - "maxWireVersion": 6, + "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION, "maxWriteBatchSize": 2, }, ) @@ -647,13 +700,13 @@ def test_topology_repr(self): ) self.assertEqual( repr(t.description), - ", " ", " "]>" % (t._topology_id,), + " rtt: None>]>", ) def test_unexpected_load_balancer(self): @@ -680,6 +733,23 @@ def test_unexpected_load_balancer(self): self.assertNotIn(("a", 27017), t.description.server_descriptions()) self.assertEqual(t.description.topology_type_name, "Unknown") + def test_filtered_server_selection(self): + s1 = Server(ServerDescription(("localhost", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + s2 = Server(ServerDescription(("localhost2", 27017)), pool=object(), monitor=object()) # type: ignore[arg-type] + servers = [s1, s2] + + result = _filter_servers(servers, deprioritized_servers=[s2]) + self.assertEqual(result, [s1]) + + result = _filter_servers(servers, deprioritized_servers=[s1, s2]) + self.assertEqual(result, servers) + + result = _filter_servers(servers, deprioritized_servers=[]) + self.assertEqual(result, servers) + + result = _filter_servers(servers) + self.assertEqual(result, servers) + def wait_for_primary(topology): """Wait for a Topology to discover a writable server. @@ -692,7 +762,7 @@ def wait_for_primary(topology): def get_primary(): try: - return topology.select_server(writable_server_selector, 0) + return topology.select_server(writable_server_selector, _Op.TEST, 0) except ConnectionFailure: return None @@ -702,6 +772,7 @@ def get_primary(): class TestTopologyErrors(TopologyTest): # Errors when calling hello. + @flaky(reason="PYTHON-5366") def test_pool_reset(self): # hello succeeds at first, then always raises socket error. hello_count = [0] @@ -710,7 +781,7 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] == 1: - return Hello({"ok": 1, "maxWireVersion": 6}), 0 + return Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), 0 else: raise AutoReconnect("mock monitor error") @@ -732,9 +803,9 @@ class TestMonitor(Monitor): def _check_with_socket(self, *args, **kwargs): hello_count[0] += 1 if hello_count[0] in (1, 3): - return Hello({"ok": 1, "maxWireVersion": 6}), 0 + return Hello({"ok": 1, "maxWireVersion": common.MIN_SUPPORTED_WIRE_VERSION}), 0 else: - raise AutoReconnect("mock monitor error #%s" % (hello_count[0],)) + raise AutoReconnect(f"mock monitor error #{hello_count[0]}") t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) @@ -748,7 +819,7 @@ def _check_with_socket(self, *args, **kwargs): # The third hello call (the immediate retry) happens sometime soon # after the failed check triggered by request_check_all. Wait until # the server becomes known again. - server = t.select_server(writable_server_selector, 0.250) + server = t.select_server(writable_server_selector, _Op.TEST, 0.250) self.assertEqual(SERVER_TYPE.Standalone, server.description.server_type) self.assertEqual(3, hello_count[0]) @@ -762,13 +833,13 @@ def _check_with_socket(self, *args, **kwargs): t = create_mock_topology(monitor_class=TestMonitor) self.addCleanup(t.close) with self.assertRaisesRegex(ConnectionFailure, "internal error"): - t.select_server(any_server_selector, server_selection_timeout=0.5) + t.select_server(any_server_selector, _Op.TEST, server_selection_timeout=0.5) class TestServerSelectionErrors(TopologyTest): def assertMessage(self, message, topology, selector=any_server_selector): with self.assertRaises(ConnectionFailure) as context: - topology.select_server(selector, server_selection_timeout=0) + topology.select_server(selector, _Op.TEST, server_selection_timeout=0) self.assertIn(message, str(context.exception)) diff --git a/test/test_transactions.py b/test/test_transactions.py index 4cee3fa236..813d6a688d 100644 --- a/test/test_transactions.py +++ b/test/test_transactions.py @@ -13,31 +13,30 @@ # limitations under the License. """Execute Transactions Spec tests.""" +from __future__ import annotations -import os import sys from io import BytesIO +from test.utils_spec_runner import SpecRunner + +from gridfs.synchronous.grid_file import GridFS, GridFSBucket +from pymongo.server_selectors import writable_server_selector +from pymongo.synchronous.pool import PoolState sys.path[0:0] = [""] -from test import client_context, unittest -from test.utils import ( +from test import IntegrationTest, client_context, unittest +from test.utils_shared import ( OvertCommandListener, - TestCreator, - rs_client, - single_client, wait_until, ) -from test.utils_spec_runner import SpecRunner +from typing import List from bson import encode from bson.raw_bson import RawBSONDocument -from gridfs import GridFS, GridFSBucket -from pymongo import WriteConcern, client_session -from pymongo.client_session import TransactionOptions -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor +from pymongo import WriteConcern, _csot from pymongo.errors import ( + AutoReconnect, CollectionInvalid, ConfigurationError, ConnectionFailure, @@ -47,11 +46,12 @@ from pymongo.operations import IndexModel, InsertOne from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference +from pymongo.synchronous import client_session +from pymongo.synchronous.client_session import TransactionOptions +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor -# Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "legacy") - -_TXN_TESTS_DEBUG = os.environ.get("TRANSACTION_TESTS_DEBUG") +_IS_SYNC = True # Max number of operations to perform after a transaction to prove unpinning # occurs. Chosen so that there's a low false positive rate. With 2 mongoses, @@ -61,21 +61,8 @@ class TransactionsBase(SpecRunner): - @classmethod - def setUpClass(cls): - super(TransactionsBase, cls).setUpClass() - if client_context.supports_transactions(): - for address in client_context.mongoses: - cls.mongos_clients.append(single_client("%s:%s" % address)) - - @classmethod - def tearDownClass(cls): - for client in cls.mongos_clients: - client.close() - super(TransactionsBase, cls).tearDownClass() - def maybe_skip_scenario(self, test): - super(TransactionsBase, self).maybe_skip_scenario(test) + super().maybe_skip_scenario(test) if ( "secondary" in self.id() and not client_context.is_mongos @@ -85,8 +72,6 @@ def maybe_skip_scenario(self, test): class TestTransactions(TransactionsBase): - RUN_ON_SERVERLESS = True - @client_context.require_transactions def test_transaction_options_validation(self): default_options = TransactionOptions() @@ -117,21 +102,20 @@ def test_transaction_options_validation(self): @client_context.require_transactions def test_transaction_write_concern_override(self): """Test txn overrides Client/Database/Collection write_concern.""" - client = rs_client(w=0) - self.addCleanup(client.close) + client = self.rs_client(w=0) db = client.test coll = db.test coll.insert_one({}) with client.start_session() as s: with s.start_transaction(write_concern=WriteConcern(w=1)): - self.assertTrue(coll.insert_one({}, session=s).acknowledged) - self.assertTrue(coll.insert_many([{}, {}], session=s).acknowledged) - self.assertTrue(coll.bulk_write([InsertOne({})], session=s).acknowledged) - self.assertTrue(coll.replace_one({}, {}, session=s).acknowledged) - self.assertTrue(coll.update_one({}, {"$set": {"a": 1}}, session=s).acknowledged) - self.assertTrue(coll.update_many({}, {"$set": {"a": 1}}, session=s).acknowledged) - self.assertTrue(coll.delete_one({}, session=s).acknowledged) - self.assertTrue(coll.delete_many({}, session=s).acknowledged) + self.assertTrue((coll.insert_one({}, session=s)).acknowledged) + self.assertTrue((coll.insert_many([{}, {}], session=s)).acknowledged) + self.assertTrue((coll.bulk_write([InsertOne({})], session=s)).acknowledged) + self.assertTrue((coll.replace_one({}, {}, session=s)).acknowledged) + self.assertTrue((coll.update_one({}, {"$set": {"a": 1}}, session=s)).acknowledged) + self.assertTrue((coll.update_many({}, {"$set": {"a": 1}}, session=s)).acknowledged) + self.assertTrue((coll.delete_one({}, session=s)).acknowledged) + self.assertTrue((coll.delete_many({}, session=s)).acknowledged) coll.find_one_and_delete({}, session=s) coll.find_one_and_replace({}, {}, session=s) coll.find_one_and_update({}, {"$set": {"a": 1}}, session=s) @@ -171,12 +155,11 @@ def test_transaction_write_concern_override(self): def test_unpin_for_next_transaction(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) - self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): @@ -199,12 +182,11 @@ def test_unpin_for_next_transaction(self): def test_unpin_for_non_transaction_operation(self): # Increase localThresholdMS and wait until both nodes are discovered # to avoid false positives. - client = rs_client(client_context.mongos_seeds(), localThresholdMS=1000) + client = self.rs_client(client_context.mongos_seeds(), localThresholdMS=1000) wait_until(lambda: len(client.nodes) > 1, "discover both mongoses") coll = client.test.test # Create the collection. coll.insert_one({}) - self.addCleanup(client.close) with client.start_session() as s: # Session is pinned to Mongos. with s.start_transaction(): @@ -260,7 +242,7 @@ def gridfs_find(*args, **kwargs): return gfs.find(*args, **kwargs).next() def gridfs_open_upload_stream(*args, **kwargs): - bucket.open_upload_stream(*args, **kwargs).write(b"1") + (bucket.open_upload_stream(*args, **kwargs)).write(b"1") gridfs_ops = [ (gfs.put, (b"123",)), @@ -305,6 +287,14 @@ def gridfs_open_upload_stream(*args, **kwargs): "new-name", ), ), + ( + bucket.rename_by_name, + ( + "new-name", + "new-name2", + ), + ), + (bucket.delete_by_name, ("new-name2",)), ] with client.start_session() as s, s.start_transaction(): @@ -328,26 +318,27 @@ def test_transaction_starts_with_batched_write(self): # Start a transaction with a batch of operations that needs to be # split. listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test coll.delete_many({}) listener.reset() - self.addCleanup(client.close) self.addCleanup(coll.drop) large_str = "\0" * (1 * 1024 * 1024) - ops = [InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48)] + ops: List[InsertOne[RawBSONDocument]] = [ + InsertOne(RawBSONDocument(encode({"a": large_str}))) for _ in range(48) + ] with client.start_session() as session: with session.start_transaction(): - coll.bulk_write(ops, session=session) + coll.bulk_write(ops, session=session) # type: ignore[arg-type] # Assert commands were constructed properly. self.assertEqual( ["insert", "insert", "commitTransaction"], listener.started_command_names() ) - first_cmd = listener.results["started"][0].command + first_cmd = listener.started_events[0].command self.assertTrue(first_cmd["startTransaction"]) lsid = first_cmd["lsid"] txn_number = first_cmd["txnNumber"] - for event in listener.results["started"][1:]: + for event in listener.started_events[1:]: self.assertNotIn("startTransaction", event.command) self.assertEqual(lsid, event.command["lsid"]) self.assertEqual(txn_number, event.command["txnNumber"]) @@ -355,15 +346,21 @@ def test_transaction_starts_with_batched_write(self): @client_context.require_transactions def test_transaction_direct_connection(self): - client = single_client() - self.addCleanup(client.close) + client = self.single_client() coll = client.pymongo_test.test # Make sure the collection exists. coll.insert_one({}) self.assertEqual(client.topology_description.topology_type_name, "Single") + + def find(*args, **kwargs): + return coll.find(*args, **kwargs) + + def find_raw_batches(*args, **kwargs): + return coll.find_raw_batches(*args, **kwargs) + ops = [ - (coll.bulk_write, [[InsertOne({})]]), + (coll.bulk_write, [[InsertOne[dict]({})]]), (coll.insert_one, [{}]), (coll.insert_many, [[{}, {}]]), (coll.replace_one, [{}, {}]), @@ -378,19 +375,35 @@ def test_transaction_direct_connection(self): (coll.count_documents, [{}]), (coll.distinct, ["foo"]), (coll.aggregate, [[]]), - (coll.find, [{}]), + (find, [{}]), (coll.aggregate_raw_batches, [[]]), - (coll.find_raw_batches, [{}]), + (find_raw_batches, [{}]), (coll.database.command, ["find", coll.name]), ] for f, args in ops: with client.start_session() as s, s.start_transaction(): - res = f(*args, session=s) + res = f(*args, session=s) # type:ignore[operator] if isinstance(res, (CommandCursor, Cursor)): - list(res) + res.to_list() + + @client_context.require_transactions + def test_transaction_pool_cleared_error_labelled_transient(self): + c = self.single_client() + with self.assertRaises(AutoReconnect) as context: + with c.start_session() as session: + with session.start_transaction(): + server = c._select_server(writable_server_selector, session, "test") + # Pause the server's pool, causing it to fail connection checkout. + server.pool.state = PoolState.PAUSED + with c._checkout(server, session): + pass -class PatchSessionTimeout(object): + # Verify that the TransientTransactionError label is present in the error. + self.assertTrue(context.exception.has_error_label("TransientTransactionError")) + + +class PatchSessionTimeout: """Patches the client_session's with_transaction timeout for testing.""" def __init__(self, mock_timeout): @@ -406,9 +419,17 @@ def __exit__(self, exc_type, exc_val, exc_tb): class TestTransactionsConvenientAPI(TransactionsBase): - TEST_PATH = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "transactions-convenient-api" - ) + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] + if client_context.supports_transactions(): + for address in client_context.mongoses: + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) + + def set_fail_point(self, command_args): + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + self.configure_fail_point(client, command_args) @client_context.require_transactions def test_callback_raises_custom_error(self): @@ -416,7 +437,7 @@ class _MyException(Exception): pass def raise_error(_): - raise _MyException() + raise _MyException with self.client.start_session() as s: with self.assertRaises(_MyException): @@ -442,8 +463,7 @@ def callback2(session): @client_context.require_transactions def test_callback_not_retried_after_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): @@ -459,7 +479,7 @@ def callback(session): # Create the collection. coll.insert_one({}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): with self.assertRaises(OperationFailure): @@ -471,8 +491,7 @@ def callback(session): @client_context.require_transactions def test_callback_not_retried_after_commit_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): @@ -491,7 +510,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): @@ -504,8 +523,7 @@ def callback(session): @client_context.require_transactions def test_commit_not_retried_after_timeout(self): listener = OvertCommandListener() - client = rs_client(event_listeners=[listener]) - self.addCleanup(client.close) + client = self.rs_client(event_listeners=[listener]) coll = client[self.db.name].test def callback(session): @@ -521,7 +539,7 @@ def callback(session): } ) self.addCleanup(self.set_fail_point, {"configureFailPoint": "failCommand", "mode": "off"}) - listener.results.clear() + listener.reset() with client.start_session() as s: with PatchSessionTimeout(0): @@ -572,22 +590,28 @@ def callback(session): self.assertFalse(s.in_transaction) -def create_test(scenario_def, test, name): - @client_context.require_test_commands +class TestOptionsInsideTransactionProse(TransactionsBase): @client_context.require_transactions - def run_scenario(self): - self.run_scenario(scenario_def, test) - - return run_scenario - - -test_creator = TestCreator(create_test, TestTransactions, TEST_PATH) -test_creator.create_tests() - - -TestCreator( - create_test, TestTransactionsConvenientAPI, TestTransactionsConvenientAPI.TEST_PATH -).create_tests() + @client_context.require_no_standalone + def test_case_1(self): + # Write concern not inherited from collection object inside transaction + # Create a MongoClient running against a configured sharded/replica set/load balanced cluster. + client = client_context.client + coll = client[self.db.name].test + coll.delete_many({}) + # Start a new session on the client. + with client.start_session() as s: + # Start a transaction on the session. + s.start_transaction() + # Instantiate a collection object in the driver with a default write concern of { w: 0 }. + inner_coll = coll.with_options(write_concern=WriteConcern(w=0)) + # Insert the document { n: 1 } on the instantiated collection. + result = inner_coll.insert_one({"n": 1}, session=s) + # Commit the transaction. + s.commit_transaction() + # End the session. + # Ensure the document was inserted and no error was thrown from the transaction. + assert result.inserted_id is not None if __name__ == "__main__": diff --git a/test/test_transactions_unified.py b/test/test_transactions_unified.py index 4f3aa233fa..4ab4885e2a 100644 --- a/test/test_transactions_unified.py +++ b/test/test_transactions_unified.py @@ -13,17 +13,40 @@ # limitations under the License. """Test the Transactions unified spec tests.""" +from __future__ import annotations import os import sys +from pathlib import Path sys.path[0:0] = [""] -from test import unittest +from test import client_context, unittest from test.unified_format import generate_test_classes +_IS_SYNC = True + + +def setUpModule(): + pass + + # Location of JSON test specifications. -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "transactions", "unified") +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions/unified") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "transactions/unified") + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + +# Location of JSON test specifications for transactions-convenient-api. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "transactions-convenient-api/unified") +else: + TEST_PATH = os.path.join( + Path(__file__).resolve().parent.parent, "transactions-convenient-api/unified" + ) # Generate unified tests. globals().update(generate_test_classes(TEST_PATH, module=__name__)) diff --git a/test/test_typing.py b/test/test_typing.py new file mode 100644 index 0000000000..17dc21b4e0 --- /dev/null +++ b/test/test_typing.py @@ -0,0 +1,640 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test that each file in mypy_fails/ actually fails mypy, and test some +sample client code that uses PyMongo typings. +""" + +from __future__ import annotations + +import os +import sys +import tempfile +import unittest +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + Iterator, + List, + TypeVar, + Union, + cast, +) + +if TYPE_CHECKING: + from typing_extensions import NotRequired, TypedDict + + from bson import Binary, ObjectId + from bson.binary import BinaryVector, BinaryVectorDtype + + class Movie(TypedDict): + name: str + year: int + + class MovieWithId(TypedDict): + _id: ObjectId + name: str + year: int + + class ImplicitMovie(TypedDict): + _id: NotRequired[ObjectId] + name: str + year: int +else: + Movie = dict + ImplicitMovie = dict + NotRequired = None + + +try: + from mypy import api +except ImportError: + api = None # type: ignore[assignment] + +sys.path[0:0] = [""] + +from test import IntegrationTest, PyMongoTestCase, client_context + +from bson import CodecOptions, ObjectId, decode, decode_all, decode_file_iter, decode_iter, encode +from bson.raw_bson import RawBSONDocument +from bson.son import SON +from pymongo import ASCENDING, MongoClient +from pymongo.operations import DeleteOne, InsertOne, ReplaceOne +from pymongo.read_preferences import ReadPreference +from pymongo.synchronous.collection import Collection + +TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "mypy_fails") + + +def get_tests() -> Iterable[str]: + for dirpath, _, filenames in os.walk(TEST_PATH): + for filename in filenames: + yield os.path.join(dirpath, filename) + + +FuncT = TypeVar("FuncT", bound=Callable[..., None]) + + +def only_type_check(func: FuncT) -> FuncT: + def inner(*args, **kwargs): + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + func(*args, **kwargs) + + return cast(FuncT, inner) + + +class TestMypyFails(unittest.TestCase): + def ensure_mypy_fails(self, filename: str) -> None: + if api is None: + raise unittest.SkipTest("Mypy is not installed") + stdout, stderr, exit_status = api.run([filename]) + self.assertTrue(exit_status, msg=stdout) + + def test_mypy_failures(self) -> None: + for filename in get_tests(): + if filename == "typeddict_client.py" and TypedDict is None: + continue + with self.subTest(filename=filename): + self.ensure_mypy_fails(filename) + + +class TestPymongo(IntegrationTest): + coll: Collection + + def setUp(self): + super().setUp() + self.coll = self.client.test.test + + def test_insert_find(self) -> None: + doc = {"my": "doc"} + coll2 = self.client.test.test2 + result = self.coll.insert_one(doc) + self.assertEqual(result.inserted_id, doc["_id"]) + retrieved = self.coll.find_one({"_id": doc["_id"]}) + if retrieved: + # Documents returned from find are mutable. + retrieved["new_field"] = 1 + result2 = coll2.insert_one(retrieved) + self.assertEqual(result2.inserted_id, result.inserted_id) + + def test_cursor_iterable(self) -> None: + def to_list(iterable: Iterable[Dict[str, Any]]) -> List[Dict[str, Any]]: + return list(iterable) + + self.coll.insert_one({}) + cursor = self.coll.find() + docs = to_list(cursor) + self.assertTrue(docs) + + def test_distinct(self) -> None: + self.coll.delete_many({}) + self.coll.insert_many( + [ + {"_id": None}, + {"_id": 0}, + {"_id": ""}, + {"_id": ObjectId()}, + {"_id": True}, + ] + ) + + def collection_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + return collection.distinct("_id") + + def cursor_distinct( + collection: Collection, + ) -> list[None | int | str | ObjectId | bool]: + cursor = collection.find() + return cursor.distinct("_id") + + collection_distinct(self.coll) + cursor_distinct(self.coll) + + @only_type_check + def test_bulk_write(self) -> None: + self.coll.insert_one({}) + coll: Collection[Movie] = self.coll + requests: List[InsertOne[Movie]] = [InsertOne(Movie(name="American Graffiti", year=1973))] + self.assertTrue(coll.bulk_write(requests).acknowledged) + new_requests: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [] + input_list: List[Union[InsertOne[Movie], ReplaceOne[Movie]]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, Movie(name="American Graffiti", year=1973)), + ] + for i in input_list: + new_requests.append(i) + self.assertTrue(coll.bulk_write(new_requests).acknowledged) + + # Because ReplaceOne is not generic, type checking is not enforced for ReplaceOne in the first example. + @only_type_check + def test_bulk_write_heterogeneous(self): + coll: Collection[Movie] = self.coll + requests: List[Union[InsertOne[Movie], ReplaceOne, DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne({}, {"name": "American Graffiti", "year": "WRONG_TYPE"}), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests).acknowledged) + requests_two: List[Union[InsertOne[Movie], ReplaceOne[Movie], DeleteOne]] = [ + InsertOne(Movie(name="American Graffiti", year=1973)), + ReplaceOne( + {}, + {"name": "American Graffiti", "year": "WRONG_TYPE"}, # type:ignore[arg-type] + ), + DeleteOne({}), + ] + self.assertTrue(coll.bulk_write(requests_two).acknowledged) + + def test_command(self) -> None: + result: Dict = self.client.admin.command("ping") + result.items() + + def test_list_collections(self) -> None: + cursor = self.client.test.list_collections() + value = cursor.next() + value.items() + + def test_list_databases(self) -> None: + cursor = self.client.list_databases() + value = cursor.next() + value.items() + + def test_default_document_type(self) -> None: + client = self.rs_or_single_client() + self.addCleanup(client.close) + coll = client.test.test + doc = {"my": "doc"} + coll.insert_one(doc) + retrieved = coll.find_one({"_id": doc["_id"]}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_aggregate_pipeline(self) -> None: + coll3 = self.client.test.test3 + coll3.insert_many( + [ + {"x": 1, "tags": ["dog", "cat"]}, + {"x": 2, "tags": ["cat"]}, + {"x": 2, "tags": ["mouse", "cat", "dog"]}, + {"x": 3, "tags": []}, + ] + ) + + class mydict(Dict[str, Any]): + pass + + result = coll3.aggregate( + [ + mydict({"$unwind": "$tags"}), + {"$group": {"_id": "$tags", "count": {"$sum": 1}}}, + {"$sort": SON([("count", -1), ("_id", -1)])}, + ] + ) + self.assertTrue(len(list(result))) + + def test_with_transaction(self) -> None: + def execute_transaction(session): + pass + + with self.client.start_session() as session: + return session.with_transaction( + execute_transaction, read_preference=ReadPreference.PRIMARY + ) + + def test_with_options(self) -> None: + coll: Collection[Dict[str, Any]] = self.coll + coll.drop() + doc = {"name": "foo", "year": 1982, "other": 1} + coll.insert_one(doc) + + coll2 = coll.with_options(codec_options=CodecOptions(document_class=Movie)) + retrieved = coll2.find_one() + assert retrieved is not None + assert retrieved["name"] == "foo" + # We expect a type error here. + assert retrieved["other"] == 1 # type:ignore[misc] + + +class TestDecode(unittest.TestCase): + def test_bson_decode(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + rt_document: Dict[str, Any] = decode(bsonbytes) + assert rt_document["_id"] == 1 + rt_document["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options = CodecOptions(document_class=MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options) + rt_document2 = decode(bsonbytes2, codec_options=codec_options) + assert rt_document2.foo() == "bar" + + codec_options2 = CodecOptions(document_class=RawBSONDocument) + encode(doc, codec_options=codec_options2) + rt_document3 = decode(bsonbytes2, codec_options=codec_options2) + assert rt_document3.raw + + def test_bson_decode_no_codec_option(self) -> None: + doc = decode_all(encode({"a": 1})) + assert doc + doc[0]["a"] = 2 + + def test_bson_decode_all(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: List[Dict[str, Any]] = decode_all(bsonbytes) + assert rt_documents[0]["_id"] == 1 + rt_documents[0]["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_all(bsonbytes2, codec_options2) + assert rt_documents2[0].foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_all(bsonbytes3, codec_options3) + assert rt_documents3[0].raw + + def test_bson_decode_all_no_codec_option(self) -> None: + docs = decode_all(b"") + docs.append({"new": 1}) + + docs = decode_all(encode({"a": 1})) + assert docs + docs[0]["a"] = 2 + docs.append({"new": 1}) + + def test_bson_decode_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + rt_documents: Iterator[Dict[str, Any]] = decode_iter(bsonbytes) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + rt_documents2 = decode_iter(bsonbytes2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + rt_documents3 = decode_iter(bsonbytes3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_iter_no_codec_option(self) -> None: + doc = next(decode_iter(encode({"a": 1}))) + assert doc + doc["a"] = 2 + + def make_tempfile(self, content: bytes) -> Any: + fileobj = tempfile.TemporaryFile() + fileobj.write(content) + fileobj.seek(0) + self.addCleanup(fileobj.close) + return fileobj + + def test_bson_decode_file_iter(self) -> None: + doc = {"_id": 1} + bsonbytes = encode(doc) + bsonbytes += encode(doc) + fileobj = self.make_tempfile(bsonbytes) + rt_documents: Iterator[Dict[str, Any]] = decode_file_iter(fileobj) + assert next(rt_documents)["_id"] == 1 + next(rt_documents)["foo"] = "bar" + + class MyDict(Dict[str, Any]): + def foo(self): + return "bar" + + codec_options2 = CodecOptions(MyDict) + bsonbytes2 = encode(doc, codec_options=codec_options2) + bsonbytes2 += encode(doc, codec_options=codec_options2) + fileobj2 = self.make_tempfile(bsonbytes2) + rt_documents2 = decode_file_iter(fileobj2, codec_options2) + assert next(rt_documents2).foo() == "bar" + + codec_options3 = CodecOptions(RawBSONDocument) + bsonbytes3 = encode(doc, codec_options=codec_options3) + bsonbytes3 += encode(doc, codec_options=codec_options3) + fileobj3 = self.make_tempfile(bsonbytes3) + rt_documents3 = decode_file_iter(fileobj3, codec_options3) + assert next(rt_documents3).raw + + def test_bson_decode_file_iter_none_codec_option(self) -> None: + fileobj = self.make_tempfile(encode({"new": 1})) + doc = next(decode_file_iter(fileobj)) + assert doc + doc["a"] = 2 + + +class TestDocumentType(PyMongoTestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient[Dict[str, Any]] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert retrieved["year"] == 1 + assert retrieved["name"] == "a" + + @only_type_check + def test_typeddict_document_type_insertion(self) -> None: + client: MongoClient[Movie] = MongoClient() + coll = client.test.test + mov = {"name": "THX-1138", "year": 1971} + movie = Movie(name="THX-1138", year=1971) + coll.insert_one(mov) # type: ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": 1971}) # This will work because it is in-line. + coll.insert_one(movie) + coll.insert_many([mov]) # type: ignore[list-item] + coll.insert_many([movie]) + bad_mov = {"name": "THX-1138", "year": "WRONG TYPE"} + bad_movie = Movie(name="THX-1138", year="WRONG TYPE") # type: ignore[typeddict-item] + coll.insert_one(bad_mov) # type:ignore[arg-type] + coll.insert_one({"name": "THX-1138", "year": "WRONG TYPE"}) # type: ignore[arg-type] + coll.insert_one(bad_movie) + coll.insert_many([bad_mov]) # type: ignore[list-item] + coll.insert_many( + [{"name": "THX-1138", "year": "WRONG TYPE"}] # type: ignore[list-item] + ) + coll.insert_many([bad_movie]) + + @only_type_check + def test_bulk_write_document_type_insertion(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [InsertOne(Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [InsertOne(mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + InsertOne({"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore + ] # No error because it is in-line. + ) + + @only_type_check + def test_bulk_write_document_type_replacement(self): + client: MongoClient[MovieWithId] = MongoClient() + coll: Collection[MovieWithId] = client.test.test + coll.bulk_write( + [ReplaceOne({}, Movie({"name": "THX-1138", "year": 1971}))] # type:ignore[arg-type] + ) + mov_dict = {"_id": ObjectId(), "name": "THX-1138", "year": 1971} + coll.bulk_write( + [ReplaceOne({}, mov_dict)] # type:ignore[arg-type] + ) + coll.bulk_write( + [ + ReplaceOne({}, {"_id": ObjectId(), "name": "THX-1138", "year": 1971}) # pyright: ignore + ] # No error because it is in-line. + ) + + @only_type_check + def test_typeddict_explicit_document_type(self) -> None: + out = MovieWithId(_id=ObjectId(), name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + assert out["_id"] + + # This should work the same as the test above, but this time using NotRequired to allow + # automatic insertion of the _id field by insert_one. + @only_type_check + def test_typeddict_not_required_document_type(self) -> None: + out = ImplicitMovie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore[unused-ignore] + + @only_type_check + def test_typeddict_empty_document_type(self) -> None: + out = Movie(name="THX-1138", year=1971) + assert out is not None + # This should fail because the output is a Movie. + assert out["foo"] # type:ignore[typeddict-item] + # This should fail because _id is not included in our TypedDict definition. + assert out["_id"] # type:ignore[typeddict-item] + + @client_context.require_connection + def test_typeddict_find_notrequired(self): + if NotRequired is None or ImplicitMovie is None: + raise unittest.SkipTest("Python 3.11+ is required to use NotRequired.") + client: MongoClient[ImplicitMovie] = self.rs_or_single_client() + coll = client.test.test + coll.insert_one(ImplicitMovie(name="THX-1138", year=1971)) + out = coll.find_one({}) + assert out is not None + # pyright gives reportTypedDictNotRequiredAccess for the following: + assert out["_id"] # type:ignore[unused-ignore] + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client = MongoClient(document_class=RawBSONDocument) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + assert len(retrieved.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + coll = client.test.test + retrieved = coll.find_one({"_id": "foo"}) + assert retrieved is not None + retrieved["a"] = 1 + + def test_son_document_type_runtime(self) -> None: + MongoClient(document_class=SON[str, Any], connect=False) + + @only_type_check + def test_create_index(self) -> None: + client: MongoClient[Dict[str, str]] = MongoClient("test") + db = client.test + with client.start_session() as session: + index = db.test.create_index([("user_id", ASCENDING)], unique=True, session=session) + assert isinstance(index, str) + + +class TestCommandDocumentType(unittest.TestCase): + @only_type_check + def test_default(self) -> None: + client: MongoClient = MongoClient() + result: Dict = client.admin.command("ping") + result["a"] = 1 + + @only_type_check + def test_explicit_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Dict[str, Any]] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + @only_type_check + def test_typeddict_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options: CodecOptions[Movie] = CodecOptions() + result = client.admin.command("ping", codec_options=codec_options) + assert result["year"] == 1 + assert result["name"] == "a" + + @only_type_check + def test_raw_bson_document_type(self) -> None: + client: MongoClient = MongoClient() + codec_options = CodecOptions(RawBSONDocument) + result = client.admin.command("ping", codec_options=codec_options) + assert len(result.raw) > 0 + + @only_type_check + def test_son_document_type(self) -> None: + client = MongoClient(document_class=SON[str, Any]) + codec_options = CodecOptions(SON[str, Any]) + result = client.admin.command("ping", codec_options=codec_options) + result["a"] = 1 + + +class TestCodecOptionsDocumentType(unittest.TestCase): + def test_default(self) -> None: + options: CodecOptions = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_explicit_document_type(self) -> None: + options: CodecOptions[Dict[str, Any]] = CodecOptions() + obj = options.document_class() + obj["a"] = 1 + + def test_typeddict_document_type(self) -> None: + options: CodecOptions[Movie] = CodecOptions() + # Suppress: Cannot instantiate type "Type[Movie]". + obj = options.document_class(name="a", year=1) + assert obj["year"] == 1 + assert obj["name"] == "a" + + def test_raw_bson_document_type(self) -> None: + options = CodecOptions(RawBSONDocument) + doc_bson = b"\x10\x00\x00\x00\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff\x00" + obj = options.document_class(doc_bson) + assert len(obj.raw) > 0 + + def test_son_document_type(self) -> None: + options = CodecOptions(SON[str, Any]) + obj = options.document_class() + obj["a"] = 1 + + +class TestBSONFromVectorType(unittest.TestCase): + @only_type_check + def test_from_vector_binaryvector(self): + list_vector = BinaryVector([127, 7], BinaryVectorDtype.INT8) + Binary.from_vector(list_vector) + + @only_type_check + def test_from_vector_list_int(self): + list_vector = [127, 7] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + @only_type_check + def test_from_vector_list_float(self): + list_vector = [127.0, 7.0] + Binary.from_vector(list_vector, BinaryVectorDtype.INT8) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_typing_strict.py b/test/test_typing_strict.py new file mode 100644 index 0000000000..32e9fcfcca --- /dev/null +++ b/test/test_typing_strict.py @@ -0,0 +1,40 @@ +# Copyright 2023-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test typings in strict mode.""" +from __future__ import annotations + +import unittest +from typing import TYPE_CHECKING, Any, Dict + +import pymongo +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.database import Database + + +def test_generic_arguments() -> None: + """Ensure known usages of generic arguments pass strict typing""" + if not TYPE_CHECKING: + raise unittest.SkipTest("Used for Type Checking Only") + mongo_client: pymongo.MongoClient[Dict[str, Any]] = pymongo.MongoClient() + mongo_client.drop_database("foo") + mongo_client.get_default_database() + db = mongo_client.get_database("test_db") + db = Database(mongo_client, "test_db") + db.with_options() + db.validate_collection("py_test") + col = db.get_collection("py_test") + col.insert_one({"abc": 123}) + col = Collection(db, "py_test") + col.with_options() diff --git a/test/test_unified_format.py b/test/test_unified_format.py index 8a6e3da549..f1cfd0139b 100644 --- a/test/test_unified_format.py +++ b/test/test_unified_format.py @@ -11,53 +11,60 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import os import sys +from pathlib import Path +from typing import Any sys.path[0:0] = [""] -from test import unittest +from test import UnitTest, unittest from test.unified_format import MatchEvaluatorUtil, generate_test_classes from bson import ObjectId -_TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "unified-test-format") +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "unified-test-format") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "unified-test-format") globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "valid-pass"), + os.path.join(TEST_PATH, "valid-pass"), module=__name__, class_name_prefix="UnifiedTestFormat", expected_failures=[ "Client side error in command starting transaction", # PYTHON-1894 ], - RUN_ON_SERVERLESS=False, ) ) globals().update( generate_test_classes( - os.path.join(_TEST_PATH, "valid-fail"), + os.path.join(TEST_PATH, "valid-fail"), module=__name__, class_name_prefix="UnifiedTestFormat", bypass_test_generation_errors=True, expected_failures=[ ".*", # All tests expected to fail ], - RUN_ON_SERVERLESS=False, ) ) -class TestMatchEvaluatorUtil(unittest.TestCase): +class TestMatchEvaluatorUtil(UnitTest): def setUp(self): self.match_evaluator = MatchEvaluatorUtil(self) def test_unsetOrMatches(self): - spec = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} + spec: dict[str, Any] = {"$$unsetOrMatches": {"y": {"$$unsetOrMatches": 2}}} for actual in [{}, {"y": 2}, None]: self.match_evaluator.match_result(spec, actual) diff --git a/test/test_uri_parser.py b/test/test_uri_parser.py index 2f81e3b512..502faf82b0 100644 --- a/test/test_uri_parser.py +++ b/test/test_uri_parser.py @@ -13,20 +13,28 @@ # limitations under the License. """Test the pymongo uri_parser module.""" +from __future__ import annotations import copy import sys import warnings +from typing import Any from urllib.parse import quote_plus sys.path[0:0] = [""] from test import unittest +from unittest.mock import patch from bson.binary import JAVA_LEGACY from pymongo import ReadPreference from pymongo.errors import ConfigurationError, InvalidURI -from pymongo.uri_parser import parse_uri, parse_userinfo, split_hosts, split_options +from pymongo.synchronous.uri_parser import parse_uri +from pymongo.uri_parser_shared import ( + parse_userinfo, + split_hosts, + split_options, +) class TestURI(unittest.TestCase): @@ -127,30 +135,31 @@ def test_split_options(self): self.assertEqual({"connecttimeoutms": 0.3}, split_options("connectTimeoutMS=300")) self.assertEqual({"connecttimeoutms": 0.0001}, split_options("connectTimeoutMS=0.1")) self.assertTrue(split_options("connectTimeoutMS=300")) - self.assertTrue(isinstance(split_options("w=5")["w"], int)) - self.assertTrue(isinstance(split_options("w=5.5")["w"], str)) + self.assertIsInstance(split_options("w=5")["w"], int) + self.assertIsInstance(split_options("w=5.5")["w"], str) self.assertTrue(split_options("w=foo")) self.assertTrue(split_options("w=majority")) self.assertTrue(split_options("wtimeoutms=500")) self.assertEqual({"fsync": True}, split_options("fsync=true")) self.assertEqual({"fsync": False}, split_options("fsync=false")) - self.assertEqual({"authmechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) - self.assertEqual({"authmechanism": "MONGODB-CR"}, split_options("authMechanism=MONGODB-CR")) + self.assertEqual({"authMechanism": "GSSAPI"}, split_options("authMechanism=GSSAPI")) self.assertEqual( - {"authmechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") + {"authMechanism": "SCRAM-SHA-1"}, split_options("authMechanism=SCRAM-SHA-1") ) self.assertEqual({"authsource": "foobar"}, split_options("authSource=foobar")) self.assertEqual({"maxpoolsize": 50}, split_options("maxpoolsize=50")) + # Test suggestions given when invalid kwarg passed + + expected = r"Unknown option: auth. Did you mean one of \(authsource, authmechanism, timeoutms\) or maybe a camelCase version of one\? Refer to docstring." + with self.assertRaisesRegex(ConfigurationError, expected): + split_options("auth=GSSAPI") + def test_parse_uri(self): self.assertRaises(InvalidURI, parse_uri, "http://foobar.com") self.assertRaises(InvalidURI, parse_uri, "http://foo@foobar.com") self.assertRaises(ValueError, parse_uri, "mongodb://::1", 27017) - # Extra whitespace should be visible in error message. - with self.assertRaisesRegex(ValueError, "'27017 '"): - parse_uri("mongodb://localhost:27017 ") - orig: dict = { "nodelist": [("localhost", 27017)], "username": None, @@ -281,35 +290,35 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri('mongodb://localhost/test.name/with "delimiters')) res = copy.deepcopy(orig) - res["options"] = {"readpreference": ReadPreference.SECONDARY.mongos_mode} + res["options"] = {"readPreference": ReadPreference.SECONDARY.mongos_mode} self.assertEqual(res, parse_uri("mongodb://localhost/?readPreference=secondary")) # Various authentication tests res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR"} + res["options"] = {"authMechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "password" self.assertEqual( - res, parse_uri("mongodb://user:password@localhost/?authMechanism=MONGODB-CR") + res, parse_uri("mongodb://user:password@localhost/?authMechanism=SCRAM-SHA-256") ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR", "authsource": "bar"} + res["options"] = {"authMechanism": "SCRAM-SHA-256", "authSource": "bar"} res["username"] = "user" res["password"] = "password" res["database"] = "foo" self.assertEqual( res, parse_uri( - "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=MONGODB-CR" + "mongodb://user:password@localhost/foo?authSource=bar;authMechanism=SCRAM-SHA-256" ), ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "MONGODB-CR"} + res["options"] = {"authMechanism": "SCRAM-SHA-256"} res["username"] = "user" res["password"] = "" - self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=MONGODB-CR")) + self.assertEqual(res, parse_uri("mongodb://user:@localhost/?authMechanism=SCRAM-SHA-256")) res = copy.deepcopy(orig) res["username"] = "user@domain.com" @@ -318,7 +327,7 @@ def test_parse_uri(self): self.assertEqual(res, parse_uri("mongodb://user%40domain.com:password@localhost/foo")) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "GSSAPI"} + res["options"] = {"authMechanism": "GSSAPI"} res["username"] = "user@domain.com" res["password"] = "password" res["database"] = "foo" @@ -328,7 +337,7 @@ def test_parse_uri(self): ) res = copy.deepcopy(orig) - res["options"] = {"authmechanism": "GSSAPI"} + res["options"] = {"authMechanism": "GSSAPI"} res["username"] = "user@domain.com" res["password"] = "" res["database"] = "foo" @@ -338,8 +347,8 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["options"] = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", "use": "website"}, {"dc": "east", "use": "website"}, ], @@ -359,8 +368,8 @@ def test_parse_uri(self): res = copy.deepcopy(orig) res["options"] = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", "use": "website"}, {"dc": "east", "use": "website"}, {}, @@ -453,6 +462,7 @@ def test_tlsinsecure_simple(self): "tlsInsecure": True, "tlsDisableOCSPEndpointCheck": True, } + print(parse_uri(uri)["options"]) self.assertEqual(res, parse_uri(uri)["options"]) def test_normalize_options(self): @@ -461,17 +471,17 @@ def test_normalize_options(self): res = {"tls": True, "appname": "myapp"} self.assertEqual(res, parse_uri(uri)["options"]) - def test_unquote_after_parsing(self): - quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%2C%3A+etc" - unquoted_val = "val!@#$%^&*()_+,: etc" + def test_unquote_during_parsing(self): + quoted_val = "val%21%40%23%24%25%5E%26%2A%28%29_%2B%3A+etc" + unquoted_val = "val!@#$%^&*()_+: etc" uri = ( "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" "&authMechanismProperties=AWS_SESSION_TOKEN:" + quoted_val ) res = parse_uri(uri) - options = { - "authmechanism": "MONGODB-AWS", - "authmechanismproperties": {"AWS_SESSION_TOKEN": unquoted_val}, + options: dict[str, Any] = { + "authMechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": unquoted_val}, } self.assertEqual(options, res["options"]) @@ -482,8 +492,8 @@ def test_unquote_after_parsing(self): ) res = parse_uri(uri) options = { - "readpreference": ReadPreference.SECONDARY.mongos_mode, - "readpreferencetags": [ + "readPreference": ReadPreference.SECONDARY.mongos_mode, + "readPreferenceTags": [ {"dc": "west", unquoted_val: unquoted_val}, {"dc": "east", "use": unquoted_val}, ], @@ -491,28 +501,64 @@ def test_unquote_after_parsing(self): self.assertEqual(options, res["options"]) def test_redact_AWS_SESSION_TOKEN(self): - unquoted_colon = "token:" + token = "token" uri = ( "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" - "&authMechanismProperties=AWS_SESSION_TOKEN:" + unquoted_colon + "&authMechanismProperties=AWS_SESSION_TOKEN-" + token ) with self.assertRaisesRegex( ValueError, - "auth mechanism properties must be key:value pairs like " - "SERVICE_NAME:mongodb, not AWS_SESSION_TOKEN:" - ", did you forget to percent-escape the token with " - "quote_plus?", + "Malformed auth mechanism properties", ): parse_uri(uri) + def test_handle_colon(self): + token = "token:foo" + uri = ( + "mongodb://user:password@localhost/?authMechanism=MONGODB-AWS" + "&authMechanismProperties=AWS_SESSION_TOKEN:" + token + ) + res = parse_uri(uri) + options = { + "authMechanism": "MONGODB-AWS", + "authMechanismProperties": {"AWS_SESSION_TOKEN": token}, + } + self.assertEqual(options, res["options"]) + def test_special_chars(self): user = "user@ /9+:?~!$&'()*+,;=" pwd = "pwd@ /9+:?~!$&'()*+,;=" - uri = "mongodb://%s:%s@localhost" % (quote_plus(user), quote_plus(pwd)) + uri = f"mongodb://{quote_plus(user)}:{quote_plus(pwd)}@localhost" res = parse_uri(uri) self.assertEqual(user, res["username"]) self.assertEqual(pwd, res["password"]) + def test_do_not_include_password_in_port_message(self): + with self.assertRaisesRegex(ValueError, "Port must be an integer between 0 and 65535"): + parse_uri("mongodb://localhost:65536") + with self.assertRaisesRegex( + ValueError, "Port contains non-digit characters. Hint: username " + ) as ctx: + parse_uri("mongodb://user:PASS /@localhost:27017") + self.assertNotIn("PASS", str(ctx.exception)) + + # This "invalid" case is technically a valid URI: + res = parse_uri("mongodb://user:1234/@localhost:27017") + self.assertEqual([("user", 1234)], res["nodelist"]) + self.assertEqual("@localhost:27017", res["database"]) + + def test_port_with_whitespace(self): + with self.assertRaisesRegex(ValueError, "Port contains whitespace character: ' '"): + parse_uri("mongodb://localhost:27017 ") + with self.assertRaisesRegex(ValueError, "Port contains whitespace character: ' '"): + parse_uri("mongodb://localhost: 27017") + with self.assertRaisesRegex(ValueError, r"Port contains whitespace character: '\\n'"): + parse_uri("mongodb://localhost:27\n017") + + def test_parse_uri_options_type(self): + opts = parse_uri("mongodb://localhost:27017")["options"] + self.assertIsInstance(opts, dict) + if __name__ == "__main__": unittest.main() diff --git a/test/test_uri_spec.py b/test/test_uri_spec.py index d12abf3b91..3d8f7b2b75 100644 --- a/test/test_uri_spec.py +++ b/test/test_uri_spec.py @@ -13,7 +13,9 @@ # limitations under the License. """Test that the pymongo.uri_parser module is compliant with the connection -string and uri options specifications.""" +string and uri options specifications. +""" +from __future__ import annotations import json import os @@ -22,12 +24,12 @@ sys.path[0:0] = [""] -from test import clear_warning_registry, unittest +from test import unittest +from test.helpers_shared import clear_warning_registry -from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, validate -from pymongo.compression_support import _HAVE_SNAPPY -from pymongo.srv_resolver import _HAVE_DNSPYTHON -from pymongo.uri_parser import SRV_SCHEME, parse_uri +from pymongo.common import INTERNAL_URI_OPTION_NAME_MAP, _CaseInsensitiveDictionary, validate +from pymongo.compression_support import _have_snappy +from pymongo.synchronous.uri_parser import parse_uri CONN_STRING_TEST_PATH = os.path.join( os.path.dirname(os.path.realpath(__file__)), os.path.join("connection_string", "test") @@ -72,8 +74,8 @@ def setUp(self): clear_warning_registry() -def get_error_message_template(expected, artefact): - return "%s %s for test '%s'" % ("Expected" if expected else "Unexpected", artefact, "%s") +def get_error_message_template(expected, artifact): + return "{} {} for test '{}'".format("Expected" if expected else "Unexpected", artifact, "%s") def run_scenario_in_dir(target_workdir): @@ -81,7 +83,9 @@ def workdir_context_decorator(func): def modified_test_scenario(*args, **kwargs): original_workdir = os.getcwd() os.chdir(target_workdir) - func(*args, **kwargs) + with warnings.catch_warnings(): + warnings.simplefilter("default") + func(*args, **kwargs) os.chdir(original_workdir) return modified_test_scenario @@ -92,23 +96,24 @@ def modified_test_scenario(*args, **kwargs): def create_test(test, test_workdir): def run_scenario(self): compressors = (test.get("options") or {}).get("compressors", []) - if "snappy" in compressors and not _HAVE_SNAPPY: + if "snappy" in compressors and not _have_snappy(): self.skipTest("This test needs the snappy module.") - if test["uri"].startswith(SRV_SCHEME) and not _HAVE_DNSPYTHON: - self.skipTest("This test needs dnspython package.") valid = True warning = False + expected_warning = test.get("warning", False) + expected_valid = test.get("valid", True) with warnings.catch_warnings(record=True) as ctx: - warnings.simplefilter("always") + warnings.simplefilter("ignore", category=ResourceWarning) try: options = parse_uri(test["uri"], warn=True) except Exception: valid = False else: warning = len(ctx) > 0 + if expected_valid and warning and not expected_warning: + raise ValueError("Got unexpected warning(s): ", [str(i) for i in ctx]) - expected_valid = test.get("valid", True) self.assertEqual( valid, expected_valid, @@ -116,7 +121,6 @@ def run_scenario(self): ) if expected_valid: - expected_warning = test.get("warning", False) self.assertEqual( warning, expected_warning, @@ -133,13 +137,15 @@ def run_scenario(self): for exp, actual in zip(test["hosts"], options["nodelist"]): self.assertEqual( - exp["host"], actual[0], "Expected host %s but got %s" % (exp["host"], actual[0]) + exp["host"], + actual[0], + "Expected host {} but got {}".format(exp["host"], actual[0]), ) if exp["port"] is not None: self.assertEqual( exp["port"], actual[1], - "Expected port %s but got %s" % (exp["port"], actual), + "Expected port {} but got {}".format(exp["port"], actual), ) # Compare auth options. @@ -157,13 +163,14 @@ def run_scenario(self): self.assertEqual( auth[elm], options[elm], - "Expected %s but got %s" % (auth[elm], options[elm]), + f"Expected {auth[elm]} but got {options[elm]}", ) # Compare URI options. err_msg = "For option %s expected %s but got %s" if test["options"]: - opts = options["options"] + opts = _CaseInsensitiveDictionary() + opts.update(options["options"]) for opt in test["options"]: lopt = opt.lower() optname = INTERNAL_URI_OPTION_NAME_MAP.get(lopt, lopt) @@ -183,7 +190,7 @@ def run_scenario(self): ), ) else: - self.fail("Missing expected option %s" % (opt,)) + self.fail(f"Missing expected option {opt}") return run_scenario_in_dir(test_workdir)(run_scenario) @@ -209,7 +216,7 @@ def create_tests(test_path): continue testmethod = create_test(testcase, dirpath) - testname = "test_%s_%s_%s" % ( + testname = "test_{}_{}_{}".format( dirname, os.path.splitext(filename)[0], str(dsc).replace(" ", "_"), diff --git a/test/test_versioned_api.py b/test/test_versioned_api.py index a2fd059d21..19b125770f 100644 --- a/test/test_versioned_api.py +++ b/test/test_versioned_api.py @@ -11,29 +11,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -import os import sys +from test import UnitTest sys.path[0:0] = [""] -from test import IntegrationTest, client_context, unittest -from test.unified_format import generate_test_classes -from test.utils import OvertCommandListener, rs_or_single_client +from test import unittest from pymongo.mongo_client import MongoClient from pymongo.server_api import ServerApi, ServerApiVersion -TEST_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "versioned-api") - -# Generate unified tests. -globals().update(generate_test_classes(TEST_PATH, module=__name__)) - - -class TestServerApi(IntegrationTest): - RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True +class TestServerApi(UnitTest): def test_server_api_defaults(self): api = ServerApi(ServerApiVersion.V1) self.assertEqual(api.version, "1") @@ -56,9 +47,9 @@ def test_server_api_validation(self): with self.assertRaises(ValueError): ServerApi("2") with self.assertRaises(TypeError): - ServerApi("1", strict="not-a-bool") + ServerApi("1", strict="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): - ServerApi("1", deprecation_errors="not-a-bool") + ServerApi("1", deprecation_errors="not-a-bool") # type: ignore[arg-type] with self.assertRaises(TypeError): MongoClient(server_api="not-a-ServerApi") @@ -73,35 +64,6 @@ def assertServerApiInAllCommands(self, events): for event in events: self.assertServerApi(event) - @client_context.require_version_min(4, 7) - def test_command_options(self): - listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) - self.addCleanup(client.close) - coll = client.test.test - coll.insert_many([{} for _ in range(100)]) - self.addCleanup(coll.delete_many, {}) - list(coll.find(batch_size=25)) - client.admin.command("ping") - self.assertServerApiInAllCommands(listener.results["started"]) - - @client_context.require_version_min(4, 7) - @client_context.require_transactions - def test_command_options_txn(self): - listener = OvertCommandListener() - client = rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) - self.addCleanup(client.close) - coll = client.test.test - coll.insert_many([{} for _ in range(100)]) - self.addCleanup(coll.delete_many, {}) - - listener.reset() - with client.start_session() as s, s.start_transaction(): - coll.insert_many([{} for _ in range(100)], session=s) - list(coll.find(batch_size=25, session=s)) - client.test.command("find", "test", session=s) - self.assertServerApiInAllCommands(listener.results["started"]) - if __name__ == "__main__": unittest.main() diff --git a/test/test_versioned_api_integration.py b/test/test_versioned_api_integration.py new file mode 100644 index 0000000000..066a1935ca --- /dev/null +++ b/test/test_versioned_api_integration.py @@ -0,0 +1,81 @@ +# Copyright 2020-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import sys +from pathlib import Path +from test.unified_format import generate_test_classes + +sys.path[0:0] = [""] + +from test import IntegrationTest, client_context, unittest +from test.utils_shared import OvertCommandListener + +from pymongo.server_api import ServerApi + +_IS_SYNC = True + +# Location of JSON test specifications. +if _IS_SYNC: + TEST_PATH = os.path.join(Path(__file__).resolve().parent, "versioned-api") +else: + TEST_PATH = os.path.join(Path(__file__).resolve().parent.parent, "versioned-api") + + +# Generate unified tests. +globals().update(generate_test_classes(TEST_PATH, module=__name__)) + + +class TestServerApiIntegration(IntegrationTest): + RUN_ON_LOAD_BALANCER = True + + def assertServerApi(self, event): + self.assertIn("apiVersion", event.command) + self.assertEqual(event.command["apiVersion"], "1") + + def assertServerApiInAllCommands(self, events): + for event in events: + self.assertServerApi(event) + + @client_context.require_version_min(4, 7) + def test_command_options(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + coll.find(batch_size=25).to_list() + client.admin.command("ping") + self.assertServerApiInAllCommands(listener.started_events) + + @client_context.require_version_min(4, 7) + @client_context.require_transactions + def test_command_options_txn(self): + listener = OvertCommandListener() + client = self.rs_or_single_client(server_api=ServerApi("1"), event_listeners=[listener]) + coll = client.test.test + coll.insert_many([{} for _ in range(100)]) + self.addCleanup(coll.delete_many, {}) + + listener.reset() + with client.start_session() as s, s.start_transaction(): + coll.insert_many([{} for _ in range(100)], session=s) + coll.find(batch_size=25, session=s).to_list() + client.test.command("find", "test", session=s) + self.assertServerApiInAllCommands(listener.started_events) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_write_concern.py b/test/test_write_concern.py index 02c562a348..02a7cb6e5c 100644 --- a/test/test_write_concern.py +++ b/test/test_write_concern.py @@ -13,6 +13,7 @@ # limitations under the License. """Run the unit tests for WriteConcern.""" +from __future__ import annotations import collections import unittest @@ -37,10 +38,10 @@ def test_equality_to_none(self): concern = WriteConcern() self.assertNotEqual(concern, None) # Explicitly use the != operator. - self.assertTrue(concern != None) # noqa + self.assertTrue(concern != None) # noqa: E711 def test_equality_compatible_type(self): - class _FakeWriteConcern(object): + class _FakeWriteConcern: def __init__(self, **document): self.document = document @@ -66,6 +67,19 @@ def test_equality_incompatible_type(self): _fake_type = collections.namedtuple("NotAWriteConcern", ["document"]) # type: ignore self.assertNotEqual(WriteConcern(j=True), _fake_type({"j": True})) + def assertRepr(self, obj): + new_obj = eval(repr(obj)) + self.assertEqual(type(new_obj), type(obj)) + self.assertEqual(repr(new_obj), repr(obj)) + + def test_repr(self): + concern = WriteConcern(j=True, wtimeout=3000, w="majority", fsync=False) + self.assertRepr(concern) + self.assertEqual( + repr(concern), + "WriteConcern(wtimeout=3000, j=True, fsync=False, w='majority')", + ) + if __name__ == "__main__": unittest.main() diff --git a/test/transactions-convenient-api/callback-aborts.json b/test/transactions-convenient-api/callback-aborts.json deleted file mode 100644 index 2a3038e8ba..0000000000 --- a/test/transactions-convenient-api/callback-aborts.json +++ /dev/null @@ -1,244 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction succeeds if callback aborts", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "withTransaction succeeds if callback aborts with no ops", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "abortTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "withTransaction still succeeds if callback aborts and runs extra op", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "autocommit": null, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/callback-commits.json b/test/transactions-convenient-api/callback-commits.json deleted file mode 100644 index 4abbbdd0e6..0000000000 --- a/test/transactions-convenient-api/callback-commits.json +++ /dev/null @@ -1,303 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction succeeds if callback commits", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "withTransaction still succeeds if callback commits and runs extra op", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "autocommit": null, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/callback-retry.json b/test/transactions-convenient-api/callback-retry.json deleted file mode 100644 index a0391c1b5d..0000000000 --- a/test/transactions-convenient-api/callback-retry.json +++ /dev/null @@ -1,315 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "callback succeeds after multiple connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "callback is not retried after non-transient error (DuplicateKeyError)", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ] - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-retry.json b/test/transactions-convenient-api/commit-retry.json deleted file mode 100644 index 02e38460d0..0000000000 --- a/test/transactions-convenient-api/commit-retry.json +++ /dev/null @@ -1,531 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction succeeds after multiple connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry only overwrites write concern w option", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit is not retried after MaxTimeMSExpired error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "maxCommitTimeMS": 60000 - } - }, - "result": { - "errorCodeName": "MaxTimeMSExpired", - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "maxTimeMS": 60000, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json b/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json deleted file mode 100644 index 7663bb54e1..0000000000 --- a/test/transactions-convenient-api/commit-transienttransactionerror-4.2.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.6", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "transaction is retried after commitTransaction TransientTransactionError (PreparedTransactionInProgress)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 267, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-transienttransactionerror.json b/test/transactions-convenient-api/commit-transienttransactionerror.json deleted file mode 100644 index 18becbe09c..0000000000 --- a/test/transactions-convenient-api/commit-transienttransactionerror.json +++ /dev/null @@ -1,725 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "transaction is retried after commitTransaction TransientTransactionError (LockTimeout)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 24, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (WriteConflict)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 112, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (SnapshotUnavailable)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 246, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction is retried after commitTransaction TransientTransactionError (NoSuchTransaction)", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 251, - "closeConnection": false - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit-writeconcernerror.json b/test/transactions-convenient-api/commit-writeconcernerror.json deleted file mode 100644 index fbad645546..0000000000 --- a/test/transactions-convenient-api/commit-writeconcernerror.json +++ /dev/null @@ -1,602 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction is retried after WriteConcernFailed timeout error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is retried after WriteConcernFailed non-timeout error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "multiple errors reported" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after UnknownReplWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 79, - "codeName": "UnknownReplWriteConcern", - "errmsg": "No write concern mode named 'foo' found in replica set configuration" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "UnknownReplWriteConcern", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "codeName": "UnsatisfiableWriteConcern", - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "UnsatisfiableWriteConcern", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction is not retried after MaxTimeMSExpired error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 50, - "codeName": "MaxTimeMSExpired", - "errmsg": "operation exceeded time limit" - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - }, - "result": { - "errorCodeName": "MaxTimeMSExpired", - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/commit.json b/test/transactions-convenient-api/commit.json deleted file mode 100644 index 0a7451db95..0000000000 --- a/test/transactions-convenient-api/commit.json +++ /dev/null @@ -1,286 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction commits after callback returns", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "withTransaction commits after callback returns (second transaction)", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": { - "afterClusterTime": 42 - }, - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/transaction-options.json b/test/transactions-convenient-api/transaction-options.json deleted file mode 100644 index 6deff43cf4..0000000000 --- a/test/transactions-convenient-api/transaction-options.json +++ /dev/null @@ -1,577 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "withTransaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "withTransaction and no transaction options set", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "readConcern": null, - "startTransaction": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction inherits transaction options from client", - "useMultipleMongoses": true, - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction inherits transaction options from defaultTransactionOptions", - "useMultipleMongoses": true, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options", - "useMultipleMongoses": true, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options override defaultTransactionOptions", - "useMultipleMongoses": true, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "snapshot" - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "withTransaction explicit transaction options override client options", - "useMultipleMongoses": true, - "clientOptions": { - "readConcernLevel": "local", - "w": "majority" - }, - "operations": [ - { - "name": "withTransaction", - "object": "session0", - "arguments": { - "callback": { - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ] - }, - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - } - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "withTransaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": { - "w": 1 - }, - "readConcern": null, - "startTransaction": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions-convenient-api/unified/callback-aborts.json b/test/transactions-convenient-api/unified/callback-aborts.json new file mode 100644 index 0000000000..206428715c --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-aborts.json @@ -0,0 +1,344 @@ +{ + "description": "callback-aborts", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback aborts", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction succeeds if callback aborts with no ops", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "abortTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + }, + { + "description": "withTransaction still succeeds if callback aborts and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "abortTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-commits.json b/test/transactions-convenient-api/unified/callback-commits.json new file mode 100644 index 0000000000..06f791e9ae --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-commits.json @@ -0,0 +1,423 @@ +{ + "description": "callback-commits", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction succeeds if callback commits", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction still succeeds if callback commits and runs extra op", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "autocommit": { + "$$exists": false + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/callback-retry.json b/test/transactions-convenient-api/unified/callback-retry.json new file mode 100644 index 0000000000..277dfa18ed --- /dev/null +++ b/test/transactions-convenient-api/unified/callback-retry.json @@ -0,0 +1,472 @@ +{ + "description": "callback-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "callback succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "ignoreResultAndError": true + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "callback is not retried after non-transient error (DuplicateKeyError)", + "operations": [ + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry-errorLabels.json b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json new file mode 100644 index 0000000000..c6a4e44d62 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry-errorLabels.json @@ -0,0 +1,231 @@ +{ + "description": "commit-retry-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit is retried after commitTransaction UnknownTransactionCommitResult (NotWritablePrimary)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-retry.json b/test/transactions-convenient-api/unified/commit-retry.json new file mode 100644 index 0000000000..928f0167e4 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-retry.json @@ -0,0 +1,552 @@ +{ + "description": "commit-retry", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction succeeds after multiple connection errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry only overwrites write concern w option", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "maxCommitTimeMS": 60000 + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json new file mode 100644 index 0000000000..0f5a782452 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror-4.2.json @@ -0,0 +1,294 @@ +{ + "description": "commit-transienttransactionerror-4.2", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.6", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (PreparedTransactionInProgress)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 267, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-transienttransactionerror.json b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json new file mode 100644 index 0000000000..dd5158d813 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-transienttransactionerror.json @@ -0,0 +1,996 @@ +{ + "description": "commit-transienttransactionerror", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "transaction is retried after commitTransaction TransientTransactionError (LockTimeout)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 24, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (WriteConflict)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (SnapshotUnavailable)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 246, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction is retried after commitTransaction TransientTransactionError (NoSuchTransaction)", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251, + "closeConnection": false + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit-writeconcernerror.json b/test/transactions-convenient-api/unified/commit-writeconcernerror.json new file mode 100644 index 0000000000..568f7ede42 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit-writeconcernerror.json @@ -0,0 +1,812 @@ +{ + "description": "commit-writeconcernerror", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction is retried after WriteConcernTimeout timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is retried after WriteConcernTimeout non-timeout error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnknownReplWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "codeName": "UnknownReplWriteConcern", + "errmsg": "No write concern mode named 'foo' found in replica set configuration" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnknownReplWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "codeName": "UnsatisfiableWriteConcern", + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "UnsatisfiableWriteConcern", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction is not retried after MaxTimeMSExpired error", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "codeName": "MaxTimeMSExpired", + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + }, + "expectError": { + "errorCodeName": "MaxTimeMSExpired", + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/commit.json b/test/transactions-convenient-api/unified/commit.json new file mode 100644 index 0000000000..5684d5ee89 --- /dev/null +++ b/test/transactions-convenient-api/unified/commit.json @@ -0,0 +1,398 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction commits after callback returns", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "withTransaction commits after callback returns (second transaction)", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "name": "commitTransaction", + "object": "session0" + }, + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions-convenient-api/unified/transaction-options.json b/test/transactions-convenient-api/unified/transaction-options.json new file mode 100644 index 0000000000..b1a74c5fd1 --- /dev/null +++ b/test/transactions-convenient-api/unified/transaction-options.json @@ -0,0 +1,819 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction and no transaction options set", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction inherits transaction options from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options", + "operations": [ + { + "name": "withTransaction", + "object": "session0", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": "majority" + } + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "withTransaction explicit transaction options override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "readConcernLevel": "local", + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "withTransaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session1", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection1", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "withTransaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "w": 1 + }, + "readConcern": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "withTransaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/legacy/abort.json b/test/transactions/legacy/abort.json deleted file mode 100644 index 3729a98298..0000000000 --- a/test/transactions/legacy/abort.json +++ /dev/null @@ -1,621 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "implicit abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "two aborts", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "cannot call abortTransaction twice" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort without start", - "operations": [ - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort directly after no-op commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call abortTransaction after calling commitTransaction" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort directly after commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call abortTransaction after calling commitTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "abort ignores TransactionAborted", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "NoSuchTransaction", - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abort does not apply writeConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 10 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/bulk.json b/test/transactions/legacy/bulk.json deleted file mode 100644 index 8a9793b8b3..0000000000 --- a/test/transactions/legacy/bulk.json +++ /dev/null @@ -1,531 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "bulk", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - } - } - }, - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 2 - }, - "update": { - "$set": { - "x": 2 - } - }, - "upsert": true - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 3 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 4 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 5 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 6 - } - } - }, - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 7 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - } - } - }, - { - "name": "replaceOne", - "arguments": { - "filter": { - "_id": 2 - }, - "replacement": { - "y": 2 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 3 - } - } - }, - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 4 - } - } - }, - { - "name": "updateMany", - "arguments": { - "filter": { - "_id": { - "$gte": 2 - } - }, - "update": { - "$set": { - "z": 1 - } - } - } - }, - { - "name": "deleteMany", - "arguments": { - "filter": { - "_id": { - "$gte": 6 - } - } - } - } - ] - }, - "result": { - "deletedCount": 4, - "insertedCount": 6, - "insertedIds": { - "0": 1, - "3": 3, - "4": 4, - "5": 5, - "6": 6, - "7": 7 - }, - "matchedCount": 7, - "modifiedCount": 7, - "upsertedCount": 1, - "upsertedIds": { - "2": 2 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$set": { - "x": 1 - } - } - }, - { - "q": { - "_id": 2 - }, - "u": { - "$set": { - "x": 2 - } - }, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - }, - { - "_id": 7 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "y": 1 - } - }, - { - "q": { - "_id": 2 - }, - "u": { - "y": 2 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 3 - }, - "limit": 1 - }, - { - "q": { - "_id": 4 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 2 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$gte": 6 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "y": 1 - }, - { - "_id": 2, - "y": 2, - "z": 1 - }, - { - "_id": 5, - "z": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/causal-consistency.json b/test/transactions/legacy/causal-consistency.json deleted file mode 100644 index 0e81bf2ff2..0000000000 --- a/test/transactions/legacy/causal-consistency.json +++ /dev/null @@ -1,305 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1, - "count": 0 - } - ], - "tests": [ - { - "description": "causal consistency", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "lsid": "session0", - "readConcern": null, - "txnNumber": null, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "count": 2 - } - ] - } - } - }, - { - "description": "causal consistency disabled", - "clientOptions": { - "retryWrites": false - }, - "sessionOptions": { - "session0": { - "causalConsistency": false - } - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "count": 1 - } - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 1 - }, - "u": { - "$inc": { - "count": 1 - } - } - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1, - "count": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/commit.json b/test/transactions/legacy/commit.json deleted file mode 100644 index faa39a65f1..0000000000 --- a/test/transactions/legacy/commit.json +++ /dev/null @@ -1,925 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "rerun commit after empty transaction", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "multiple commits in a row", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "write concern error on commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 10 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit without start", - "operations": [ - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commit after no-op abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call commitTransaction after calling abortTransaction" - } - } - ], - "expectations": [], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commit after abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "Cannot call commitTransaction after calling abortTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "multiple commits after empty transaction", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "reset session state commit", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "reset session state abort", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0", - "result": { - "errorContains": "no transaction started" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/count.json b/test/transactions/legacy/count.json deleted file mode 100644 index 169296416a..0000000000 --- a/test/transactions/legacy/count.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0.2", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "count", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "count", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "OperationNotSupportedInTransaction", - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "count": "test", - "query": { - "_id": 1 - }, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "count", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/create-collection.json b/test/transactions/legacy/create-collection.json deleted file mode 100644 index 9071c59c41..0000000000 --- a/test/transactions/legacy/create-collection.json +++ /dev/null @@ -1,204 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "explicitly create collection using create command", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "session": "session0", - "collection": "test" - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "create": "test", - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "create", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "implicitly create collection using insert", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "assertCollectionNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertCollectionExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/create-index.json b/test/transactions/legacy/create-index.json deleted file mode 100644 index 2ff09c9288..0000000000 --- a/test/transactions/legacy/create-index.json +++ /dev/null @@ -1,237 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.4", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "create index on a non-existing collection", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "session": "session0", - "name": "t_1", - "keys": { - "x": 1 - } - } - }, - { - "name": "assertIndexNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "test", - "indexes": [ - { - "name": "t_1", - "key": { - "x": 1 - } - } - ], - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "createIndexes", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "create index on a collection created within the same transaction", - "operations": [ - { - "name": "dropCollection", - "object": "database", - "arguments": { - "collection": "test" - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "createCollection", - "object": "database", - "arguments": { - "session": "session0", - "collection": "test" - } - }, - { - "name": "createIndex", - "object": "collection", - "arguments": { - "session": "session0", - "name": "t_1", - "keys": { - "x": 1 - } - } - }, - { - "name": "assertIndexNotExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertIndexExists", - "object": "testRunner", - "arguments": { - "database": "transaction-tests", - "collection": "test", - "index": "t_1" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "drop": "test", - "writeConcern": null - }, - "command_name": "drop", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "create": "test", - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "create", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "createIndexes": "test", - "indexes": [ - { - "name": "t_1", - "key": { - "x": 1 - } - } - ], - "lsid": "session0", - "writeConcern": null - }, - "command_name": "createIndexes", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/delete.json b/test/transactions/legacy/delete.json deleted file mode 100644 index 65b8327039..0000000000 --- a/test/transactions/legacy/delete.json +++ /dev/null @@ -1,327 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "tests": [ - { - "description": "delete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$lte": 3 - } - } - }, - "result": { - "deletedCount": 2 - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$lte": 3 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 4 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 5 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for delete", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "deleteMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$lte": 3 - } - } - }, - "result": { - "deletedCount": 2 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 1 - }, - "limit": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": { - "$lte": 3 - } - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/error-labels-blockConnection.json b/test/transactions/legacy/error-labels-blockConnection.json deleted file mode 100644 index 56b646f7ad..0000000000 --- a/test/transactions/legacy/error-labels-blockConnection.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.2", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", - "clientOptions": { - "socketTimeoutMS": 100 - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "blockConnection": true, - "blockTimeMS": 150 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/error-labels.json b/test/transactions/legacy/error-labels.json deleted file mode 100644 index 0be19c731c..0000000000 --- a/test/transactions/legacy/error-labels.json +++ /dev/null @@ -1,2086 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "DuplicateKey errors do not contain transient label", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 1 - }, - { - "_id": 1 - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorContains": "E11000" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - }, - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NotWritablePrimary errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 10107 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "WriteConflict errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 112 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NoSuchTransaction errors contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 251 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "NoSuchTransaction errors on commit contain transient label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 251 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 4 - }, - "data": { - "failCommands": [ - "insert", - "find", - "aggregate", - "distinct" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": {}, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "errmsg": "multiple errors reported" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernFailed with wtimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 79, - "errmsg": "No write concern mode named 'blah' found in replica set configuration" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteConcern", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "maxTimeMS": 60000, - "session": "session0" - }, - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "UnknownTransactionCommitResult", - "TransientTransactionError" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": {}, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "maxTimeMS": 60000 - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 50 - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 50, - "errmsg": "operation exceeded time limit" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/errors.json b/test/transactions/legacy/errors.json deleted file mode 100644 index 5fc4905e8c..0000000000 --- a/test/transactions/legacy/errors.json +++ /dev/null @@ -1,222 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "start insert start", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "startTransaction", - "object": "session0", - "result": { - "errorContains": "transaction already in progress" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ] - }, - { - "description": "start twice", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "result": { - "errorContains": "transaction already in progress" - } - } - ] - }, - { - "description": "commit and start twice", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "result": { - "errorContains": "transaction already in progress" - } - } - ] - }, - { - "description": "write conflict commit", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "WriteConflict", - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session1", - "result": { - "errorCodeName": "NoSuchTransaction", - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - } - ] - }, - { - "description": "write conflict abort", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 1 - } - }, - "result": { - "errorCodeName": "WriteConflict", - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "abortTransaction", - "object": "session1" - } - ] - } - ] -} diff --git a/test/transactions/legacy/findOneAndDelete.json b/test/transactions/legacy/findOneAndDelete.json deleted file mode 100644 index d82657a9f5..0000000000 --- a/test/transactions/legacy/findOneAndDelete.json +++ /dev/null @@ -1,221 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - } - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - } - }, - "result": null - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - } - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/findOneAndReplace.json b/test/transactions/legacy/findOneAndReplace.json deleted file mode 100644 index 7a54ca3433..0000000000 --- a/test/transactions/legacy/findOneAndReplace.json +++ /dev/null @@ -1,255 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "replacement": { - "x": 1 - }, - "upsert": true, - "returnDocument": "After" - }, - "result": { - "_id": 4, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "update": { - "x": 1 - }, - "new": true, - "upsert": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "x": 1 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/findOneAndUpdate.json b/test/transactions/legacy/findOneAndUpdate.json deleted file mode 100644 index 7af54ba808..0000000000 --- a/test/transactions/legacy/findOneAndUpdate.json +++ /dev/null @@ -1,413 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true, - "returnDocument": "After" - }, - "result": { - "_id": 4, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3, - "x": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3, - "x": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": true, - "upsert": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "x": 2 - }, - { - "_id": 4, - "x": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 3 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 3 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/insert.json b/test/transactions/legacy/insert.json deleted file mode 100644 index f26e7c2a76..0000000000 --- a/test/transactions/legacy/insert.json +++ /dev/null @@ -1,648 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "insert", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 5 - } - }, - "result": { - "insertedId": 5 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 5 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ] - } - } - }, - { - "description": "insert with session1", - "operations": [ - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session1" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "commitTransaction", - "object": "session1" - }, - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session1", - "document": { - "_id": 4 - } - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "abortTransaction", - "object": "session1" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session1", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session1", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session1", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "collection writeConcern without transaction", - "clientOptions": { - "retryWrites": false - }, - "operations": [ - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "collection writeConcern ignored for insert", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "insertMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 2, - "1": 3 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/isolation.json b/test/transactions/legacy/isolation.json deleted file mode 100644 index f16b28a5e6..0000000000 --- a/test/transactions/legacy/isolation.json +++ /dev/null @@ -1,225 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "one transaction", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session1", - "filter": { - "_id": 1 - } - }, - "result": [] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [] - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session1", - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1 - } - ] - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "two transactions", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session1" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session1", - "filter": { - "_id": 1 - } - }, - "result": [] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [] - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session1", - "filter": { - "_id": 1 - } - }, - "result": [] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 1 - } - }, - "result": [ - { - "_id": 1 - } - ] - }, - { - "name": "commitTransaction", - "object": "session1" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/mongos-pin-auto.json b/test/transactions/legacy/mongos-pin-auto.json deleted file mode 100644 index 037f212f49..0000000000 --- a/test/transactions/legacy/mongos-pin-auto.json +++ /dev/null @@ -1,4815 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "remain pinned after non-transient Interrupted error on insertOne", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError", - "UnknownTransactionCommitResult" - ], - "errorCodeName": "Interrupted" - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ] - } - } - }, - { - "description": "unpin after transient error within a transaction", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on insertOne insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on insertMany insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on updateOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on replaceOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on updateMany update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on deleteOne delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on deleteMany delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on bulkWrite insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on bulkWrite update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - } - } - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on bulkWrite delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on find find", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on countDocuments aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "session": "session0", - "filter": {} - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on aggregate aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "session": "session0", - "pipeline": [] - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on distinct distinct", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient Interrupted error on runCommand insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 11601 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on insertOne insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on insertOne insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on insertMany insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on insertMany insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on updateOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on updateOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on replaceOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on replaceOne update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on updateMany update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on updateMany update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on deleteOne delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "closeConnection": true - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on deleteOne delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "deleteOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on deleteMany delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "closeConnection": true - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on deleteMany delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "deleteMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 1 - } - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "findOneAndDelete", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "closeConnection": true - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "findAndModify" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "findOneAndReplace", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - }, - "replacement": { - "y": 1 - }, - "returnDocument": "Before" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on bulkWrite insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on bulkWrite insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on bulkWrite update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "closeConnection": true - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on bulkWrite update", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "update" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "updateOne", - "arguments": { - "filter": { - "_id": 1 - }, - "update": { - "$set": { - "x": 1 - } - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on bulkWrite delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "closeConnection": true - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on bulkWrite delete", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "delete" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "session": "session0", - "requests": [ - { - "name": "deleteOne", - "arguments": { - "filter": { - "_id": 1 - } - } - } - ] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on find find", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "closeConnection": true - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on find find", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "find" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on countDocuments aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "session": "session0", - "filter": {} - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on countDocuments aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "session": "session0", - "filter": {} - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on aggregate aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "closeConnection": true - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "session": "session0", - "pipeline": [] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on aggregate aggregate", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "aggregate" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "session": "session0", - "pipeline": [] - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on distinct distinct", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "closeConnection": true - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on distinct distinct", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "distinct" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient connection error on runCommand insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient ShutdownInProgress error on runCommand insert", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "errorCode": 91 - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "assertSessionUnpinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/mongos-recovery-token.json b/test/transactions/legacy/mongos-recovery-token.json deleted file mode 100644 index da4e9861d1..0000000000 --- a/test/transactions/legacy/mongos-recovery-token.json +++ /dev/null @@ -1,511 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction explicit retries include recoveryToken", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry succeeds on new mongos", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction retry fails on new mongos", - "useMultipleMongoses": true, - "clientOptions": { - "heartbeatFrequencyMS": 30000 - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 7 - }, - "data": { - "failCommands": [ - "commitTransaction", - "isMaster", - "hello" - ], - "closeConnection": true - } - } - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ], - "errorCodeName": "NoSuchTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - }, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction sends recoveryToken", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - } - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/pin-mongos.json b/test/transactions/legacy/pin-mongos.json deleted file mode 100644 index 485a3d9322..0000000000 --- a/test/transactions/legacy/pin-mongos.json +++ /dev/null @@ -1,1229 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ], - "serverless": "forbid" - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "tests": [ - { - "description": "countDocuments", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "distinct", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "fieldName": "_id", - "session": "session0" - }, - "result": [ - 1, - 2 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "find", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "filter": { - "_id": 2 - }, - "session": "session0" - }, - "result": [ - { - "_id": 2 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "insertOne", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - }, - "session": "session0" - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - }, - "session": "session0" - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 5 - }, - "session": "session0" - }, - "result": { - "insertedId": 5 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 6 - }, - "session": "session0" - }, - "result": { - "insertedId": 6 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 7 - }, - "session": "session0" - }, - "result": { - "insertedId": 7 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 8 - }, - "session": "session0" - }, - "result": { - "insertedId": 8 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 9 - }, - "session": "session0" - }, - "result": { - "insertedId": 9 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 10 - }, - "session": "session0" - }, - "result": { - "insertedId": 10 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - }, - { - "_id": 7 - }, - { - "_id": 8 - }, - { - "_id": 9 - }, - { - "_id": 10 - } - ] - } - } - }, - { - "description": "mixed read write operations", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 3 - }, - "session": "session0" - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 3 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 3 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 3 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 3 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "countDocuments", - "object": "collection", - "arguments": { - "filter": { - "_id": 3 - }, - "session": "session0" - }, - "result": 1 - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 4 - }, - "session": "session0" - }, - "result": { - "insertedId": 4 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 5 - }, - "session": "session0" - }, - "result": { - "insertedId": 5 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 6 - }, - "session": "session0" - }, - "result": { - "insertedId": 6 - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "document": { - "_id": 7 - }, - "session": "session0" - }, - "result": { - "insertedId": 7 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - }, - { - "_id": 5 - }, - { - "_id": 6 - }, - { - "_id": 7 - } - ] - } - } - }, - { - "description": "multiple commits", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 3, - "1": 4 - } - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "remain pinned after non-transient error on commit", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 3, - "1": 4 - } - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 51 - } - } - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "TransientTransactionError" - ], - "errorCode": 51 - } - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "assertSessionPinned", - "object": "testRunner", - "arguments": { - "session": "session0" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "unpin after transient error within a transaction", - "useMultipleMongoses": true, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unpin after transient error within a transaction and commit", - "useMultipleMongoses": true, - "clientOptions": { - "heartbeatFrequencyMS": 30000 - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "targetedFailPoint", - "object": "testRunner", - "arguments": { - "session": "session0", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 7 - }, - "data": { - "failCommands": [ - "insert", - "isMaster", - "hello" - ], - "closeConnection": true - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 4 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ], - "errorLabelsOmit": [ - "UnknownTransactionCommitResult" - ], - "errorCodeName": "NoSuchTransaction" - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null, - "recoveryToken": 42 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/read-concern.json b/test/transactions/legacy/read-concern.json deleted file mode 100644 index dd9243e2f7..0000000000 --- a/test/transactions/legacy/read-concern.json +++ /dev/null @@ -1,1628 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "only first countDocuments includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first find includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first aggregate includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first distinct includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "only first runCommand includes readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": { - "level": "majority" - }, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "countDocuments ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "countDocuments", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 2 - } - } - }, - "result": 3 - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$match": { - "_id": { - "$gte": 2 - } - } - }, - { - "$group": { - "_id": 1, - "n": { - "$sum": 1 - } - } - } - ], - "cursor": {}, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "find ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "aggregate ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "distinct ignores collection readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "distinct", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "runCommand ignores database readConcern", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "databaseOptions": { - "readConcern": { - "level": "majority" - } - }, - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/read-pref.json b/test/transactions/legacy/read-pref.json deleted file mode 100644 index bf1f1970eb..0000000000 --- a/test/transactions/legacy/read-pref.json +++ /dev/null @@ -1,720 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "default readPreference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "session": "session0", - "pipeline": [ - { - "$match": { - "_id": 1 - } - }, - { - "$count": "count" - } - ] - }, - "result": [ - { - "count": 1 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "primary readPreference", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Primary" - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "session": "session0", - "pipeline": [ - { - "$match": { - "_id": 1 - } - }, - { - "$count": "count" - } - ] - }, - "result": [ - { - "count": 1 - } - ] - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Secondary" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "secondary readPreference", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "pipeline": [ - { - "$match": { - "_id": 1 - } - }, - { - "$count": "count" - } - ] - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "primaryPreferred readPreference", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "PrimaryPreferred" - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "pipeline": [ - { - "$match": { - "_id": 1 - } - }, - { - "$count": "count" - } - ] - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "nearest readPreference", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Nearest" - } - } - } - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2, - "2": 3, - "3": 4 - } - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "pipeline": [ - { - "$match": { - "_id": 1 - } - }, - { - "$count": "count" - } - ] - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "aggregate", - "object": "collection", - "collectionOptions": { - "readPreference": { - "mode": "Primary" - } - }, - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "secondary write only", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/reads.json b/test/transactions/legacy/reads.json deleted file mode 100644 index 9fc587f482..0000000000 --- a/test/transactions/legacy/reads.json +++ /dev/null @@ -1,543 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ], - "tests": [ - { - "description": "collection readConcern without transaction", - "operations": [ - { - "name": "find", - "object": "collection", - "collectionOptions": { - "readConcern": { - "level": "majority" - } - }, - "arguments": { - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "readConcern": { - "level": "majority" - }, - "lsid": "session0", - "txnNumber": null, - "startTransaction": null, - "autocommit": null - }, - "command_name": "find", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "find", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "batchSize": 3 - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "find": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "find", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "aggregate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "aggregate", - "object": "collection", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "batchSize": 3, - "session": "session0" - }, - "result": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "aggregate": "test", - "pipeline": [ - { - "$project": { - "_id": 1 - } - } - ], - "cursor": { - "batchSize": 3 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "aggregate", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "getMore": { - "$numberLong": "42" - }, - "collection": "test", - "batchSize": 3, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false - }, - "command_name": "getMore", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - }, - { - "description": "distinct", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "distinct", - "object": "collection", - "arguments": { - "session": "session0", - "fieldName": "_id" - }, - "result": [ - 1, - 2, - 3, - 4 - ] - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "distinct": "test", - "key": "_id", - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "distinct", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "readConcern": null, - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - }, - { - "_id": 4 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-abort-errorLabels.json b/test/transactions/legacy/retryable-abort-errorLabels.json deleted file mode 100644 index 1110ce2c32..0000000000 --- a/test/transactions/legacy/retryable-abort-errorLabels.json +++ /dev/null @@ -1,204 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abortTransaction only retries once with RetryableWriteError from server", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry without RetryableWriteError label", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-abort.json b/test/transactions/legacy/retryable-abort.json deleted file mode 100644 index 13cc7c88fb..0000000000 --- a/test/transactions/legacy/retryable-abort.json +++ /dev/null @@ -1,2017 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "abortTransaction only performs a single retry", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry after Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11601, - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction does not retry after WriteConcernError Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "writeConcernError": { - "code": 11601, - "errmsg": "operation was interrupted" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after connection error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "abortTransaction succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "abortTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-commit-errorLabels.json b/test/transactions/legacy/retryable-commit-errorLabels.json deleted file mode 100644 index e0818f237b..0000000000 --- a/test/transactions/legacy/retryable-commit-errorLabels.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.3.1", - "topology": [ - "replicaset", - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction does not retry error without RetryableWriteError label", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11600, - "errorLabels": [] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commitTransaction retries once with RetryableWriteError from server", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 112, - "errorLabels": [ - "RetryableWriteError" - ] - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-commit.json b/test/transactions/legacy/retryable-commit.json deleted file mode 100644 index 49148c62d2..0000000000 --- a/test/transactions/legacy/retryable-commit.json +++ /dev/null @@ -1,2336 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "commitTransaction fails after two errors", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction applies majority write concern on retries", - "clientOptions": { - "retryWrites": false - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 2 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsContain": [ - "RetryableWriteError", - "UnknownTransactionCommitResult" - ], - "errorLabelsOmit": [ - "TransientTransactionError" - ] - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": 2, - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "j": true, - "wtimeout": 5000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction fails after Interrupted", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11601, - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorCodeName": "Interrupted", - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "writeConcernError": { - "code": 100, - "errmsg": "Not enough data-bearing nodes" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0", - "result": { - "errorLabelsOmit": [ - "RetryableWriteError", - "TransientTransactionError", - "UnknownTransactionCommitResult" - ] - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after connection error", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotWritablePrimary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 10107, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotPrimaryOrSecondary", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 13436, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 13435, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11602, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 11600, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 189, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 91, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after HostNotFound", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 7, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after HostUnreachable", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 6, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after SocketException", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 9001, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after NetworkTimeout", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorCode": 89, - "errorLabels": [ - "RetryableWriteError" - ], - "closeConnection": false - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError InterruptedAtShutdown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11600, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 11602, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError PrimarySteppedDown", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 189, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commitTransaction succeeds after WriteConcernError ShutdownInProgress", - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "commitTransaction" - ], - "errorLabels": [ - "RetryableWriteError" - ], - "writeConcernError": { - "code": 91, - "errmsg": "Replication is being shut down" - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority", - "wtimeout": 10000 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/retryable-writes.json b/test/transactions/legacy/retryable-writes.json deleted file mode 100644 index c932893b5b..0000000000 --- a/test/transactions/legacy/retryable-writes.json +++ /dev/null @@ -1,343 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "increment txnNumber", - "clientOptions": { - "retryWrites": true - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 3 - } - }, - "result": { - "insertedId": 3 - } - }, - { - "name": "abortTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "arguments": { - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "session": "session0" - }, - "result": { - "insertedIds": { - "0": 4, - "1": 5 - } - } - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 3 - } - ], - "ordered": true, - "readConcern": { - "afterClusterTime": 42 - }, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "3" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 4 - }, - { - "_id": 5 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "4" - }, - "startTransaction": null, - "autocommit": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 4 - }, - { - "_id": 5 - } - ] - } - } - }, - { - "description": "writes are not retried", - "clientOptions": { - "retryWrites": true - }, - "failPoint": { - "configureFailPoint": "failCommand", - "mode": { - "times": 1 - }, - "data": { - "failCommands": [ - "insert" - ], - "closeConnection": true - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "errorLabelsContain": [ - "TransientTransactionError" - ] - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - } - ] -} diff --git a/test/transactions/legacy/run-command.json b/test/transactions/legacy/run-command.json deleted file mode 100644 index 2f2a3a8815..0000000000 --- a/test/transactions/legacy/run-command.json +++ /dev/null @@ -1,306 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "run command with default read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command with secondary read preference in client option and primary read preference in transaction options", - "clientOptions": { - "readPreference": "secondary" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Primary" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command with explicit primary read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "insert", - "arguments": { - "session": "session0", - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ] - }, - "readPreference": { - "mode": "Primary" - } - }, - "result": { - "n": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - }, - { - "description": "run command fails with explicit secondary read preference", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - }, - "readPreference": { - "mode": "Secondary" - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - } - ] - }, - { - "description": "run command fails with secondary read preference from transaction options", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "runCommand", - "object": "database", - "command_name": "find", - "arguments": { - "session": "session0", - "command": { - "find": "test" - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/transaction-options-repl.json b/test/transactions/legacy/transaction-options-repl.json deleted file mode 100644 index 33324debb8..0000000000 --- a/test/transactions/legacy/transaction-options-repl.json +++ /dev/null @@ -1,181 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "readConcern snapshot in startTransaction options", - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "snapshot" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "snapshot" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "snapshot" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "snapshot", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/transaction-options.json b/test/transactions/legacy/transaction-options.json deleted file mode 100644 index 25d245dca5..0000000000 --- a/test/transactions/legacy/transaction-options.json +++ /dev/null @@ -1,1404 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [], - "tests": [ - { - "description": "no transaction options set", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction options inherited from client", - "clientOptions": { - "w": 1, - "readConcernLevel": "local" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "transaction options inherited from defaultTransactionOptions", - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": 1 - }, - "maxCommitTimeMS": 60000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "startTransaction options override defaults", - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "snapshot" - }, - "writeConcern": { - "w": 1 - }, - "maxCommitTimeMS": 30000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "defaultTransactionOptions override client options", - "clientOptions": { - "readConcernLevel": "local", - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "majority" - }, - "writeConcern": { - "w": "majority" - }, - "maxCommitTimeMS": 60000 - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority" - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": 60000 - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "majority", - "afterClusterTime": 42 - }, - "writeConcern": null, - "maxTimeMS": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": "majority" - }, - "maxTimeMS": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readConcern local in defaultTransactionOptions", - "clientOptions": { - "w": 1 - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readConcern": { - "level": "local" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - }, - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 2 - } - }, - "result": { - "insertedId": 2 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local" - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - }, - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": { - "level": "local", - "afterClusterTime": 42 - }, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "2" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": { - "w": 1 - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "client writeConcern ignored for bulk", - "clientOptions": { - "w": "majority" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 1 - } - } - } - }, - { - "name": "bulkWrite", - "object": "collection", - "arguments": { - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ], - "session": "session0" - }, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 1 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": 1 - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readPreference inherited from client", - "clientOptions": { - "readPreference": "secondary" - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "readPreference inherited from defaultTransactionOptions", - "clientOptions": { - "readPreference": "primary" - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - }, - { - "description": "startTransaction overrides readPreference", - "clientOptions": { - "readPreference": "primary" - }, - "sessionOptions": { - "session0": { - "defaultTransactionOptions": { - "readPreference": { - "mode": "Primary" - } - } - } - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "readPreference": { - "mode": "Secondary" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "find", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 1 - } - }, - "result": { - "errorContains": "read preference in a transaction must be primary" - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/legacy/update.json b/test/transactions/legacy/update.json deleted file mode 100644 index e33bf5b810..0000000000 --- a/test/transactions/legacy/update.json +++ /dev/null @@ -1,442 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3 - } - ], - "tests": [ - { - "description": "update", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - } - }, - { - "name": "replaceOne", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "x": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "updateMany", - "object": "collection", - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 3 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "x": 1 - }, - "u": { - "y": 1 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 3 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 1 - }, - { - "_id": 2 - }, - { - "_id": 3, - "z": 1 - }, - { - "_id": 4, - "y": 1, - "z": 1 - } - ] - } - } - }, - { - "description": "collections writeConcern ignored for update", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "updateOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 4 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 1, - "upsertedId": 4 - } - }, - { - "name": "replaceOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "x": 1 - }, - "replacement": { - "y": 1 - } - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "updateMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": "majority" - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": { - "$gte": 3 - } - }, - "update": { - "$set": { - "z": 1 - } - } - }, - "result": { - "matchedCount": 2, - "modifiedCount": 2, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 4 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "readConcern": null, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "x": 1 - }, - "u": { - "y": 1 - } - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": { - "$gte": 3 - } - }, - "u": { - "$set": { - "z": 1 - } - }, - "multi": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ] - } - ] -} diff --git a/test/transactions/legacy/write-concern.json b/test/transactions/legacy/write-concern.json deleted file mode 100644 index 84b1ea3650..0000000000 --- a/test/transactions/legacy/write-concern.json +++ /dev/null @@ -1,1278 +0,0 @@ -{ - "runOn": [ - { - "minServerVersion": "4.0", - "topology": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.8", - "topology": [ - "sharded" - ] - } - ], - "database_name": "transaction-tests", - "collection_name": "test", - "data": [ - { - "_id": 0 - } - ], - "tests": [ - { - "description": "commit with majority", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "commit with default", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "abort with majority", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": "majority" - } - } - } - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": { - "w": "majority" - } - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - } - ] - } - } - }, - { - "description": "abort with default", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "abortTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "abortTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "abortTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - } - ] - } - } - }, - { - "description": "start with unacknowledged write concern", - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "arguments": { - "options": { - "writeConcern": { - "w": 0 - } - } - }, - "result": { - "errorContains": "transactions do not support unacknowledged write concern" - } - } - ] - }, - { - "description": "start with implicit unacknowledged write concern", - "clientOptions": { - "w": 0 - }, - "operations": [ - { - "name": "startTransaction", - "object": "session0", - "result": { - "errorContains": "transactions do not support unacknowledged write concern" - } - } - ] - }, - { - "description": "unacknowledged write concern coll insertOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "document": { - "_id": 1 - } - }, - "result": { - "insertedId": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll insertMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "insertMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ] - }, - "result": { - "insertedIds": { - "0": 1, - "1": 2 - } - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - }, - { - "_id": 2 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - }, - { - "_id": 2 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll bulkWrite", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "bulkWrite", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "requests": [ - { - "name": "insertOne", - "arguments": { - "document": { - "_id": 1 - } - } - } - ] - }, - "result": { - "deletedCount": 0, - "insertedCount": 1, - "insertedIds": { - "0": 1 - }, - "matchedCount": 0, - "modifiedCount": 0, - "upsertedCount": 0, - "upsertedIds": {} - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "insert": "test", - "documents": [ - { - "_id": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "insert", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0 - }, - { - "_id": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll deleteOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 0 - }, - "limit": 1 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll deleteMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "deleteMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "deletedCount": 1 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "delete": "test", - "deletes": [ - { - "q": { - "_id": 0 - }, - "limit": 0 - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "delete", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll updateOne", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateOne", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 0 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll updateMany", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "updateMany", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "upsert": true - }, - "result": { - "matchedCount": 1, - "modifiedCount": 1, - "upsertedCount": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "update": "test", - "updates": [ - { - "q": { - "_id": 0 - }, - "u": { - "$inc": { - "x": 1 - } - }, - "multi": true, - "upsert": true - } - ], - "ordered": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "update", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndDelete", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndDelete", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - } - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "remove": true, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndReplace", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndReplace", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "replacement": { - "x": 1 - }, - "returnDocument": "Before" - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "update": { - "x": 1 - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - }, - { - "description": "unacknowledged write concern coll findOneAndUpdate", - "operations": [ - { - "name": "startTransaction", - "object": "session0" - }, - { - "name": "findOneAndUpdate", - "object": "collection", - "collectionOptions": { - "writeConcern": { - "w": 0 - } - }, - "arguments": { - "session": "session0", - "filter": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "returnDocument": "Before" - }, - "result": { - "_id": 0 - } - }, - { - "name": "commitTransaction", - "object": "session0" - } - ], - "expectations": [ - { - "command_started_event": { - "command": { - "findAndModify": "test", - "query": { - "_id": 0 - }, - "update": { - "$inc": { - "x": 1 - } - }, - "new": false, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": true, - "autocommit": false, - "readConcern": null, - "writeConcern": null - }, - "command_name": "findAndModify", - "database_name": "transaction-tests" - } - }, - { - "command_started_event": { - "command": { - "commitTransaction": 1, - "lsid": "session0", - "txnNumber": { - "$numberLong": "1" - }, - "startTransaction": null, - "autocommit": false, - "writeConcern": null - }, - "command_name": "commitTransaction", - "database_name": "admin" - } - } - ], - "outcome": { - "collection": { - "data": [ - { - "_id": 0, - "x": 1 - } - ] - } - } - } - ] -} diff --git a/test/transactions/unified/abort.json b/test/transactions/unified/abort.json new file mode 100644 index 0000000000..c151a7d0c6 --- /dev/null +++ b/test/transactions/unified/abort.json @@ -0,0 +1,828 @@ +{ + "description": "abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "implicit abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "endSession" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "two aborts", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "cannot call abortTransaction twice" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort without start", + "operations": [ + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after no-op commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort directly after commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "abortTransaction", + "expectError": { + "errorContains": "Cannot call abortTransaction after calling commitTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort ignores TransactionAborted", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "NoSuchTransaction", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abort does not apply writeConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/bulk.json b/test/transactions/unified/bulk.json new file mode 100644 index 0000000000..ece162518f --- /dev/null +++ b/test/transactions/unified/bulk.json @@ -0,0 +1,652 @@ +{ + "description": "bulk", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "bulk", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + }, + { + "updateOne": { + "filter": { + "_id": 2 + }, + "update": { + "$set": { + "x": 2 + } + }, + "upsert": true + } + }, + { + "insertOne": { + "document": { + "_id": 3 + } + } + }, + { + "insertOne": { + "document": { + "_id": 4 + } + } + }, + { + "insertOne": { + "document": { + "_id": 5 + } + } + }, + { + "insertOne": { + "document": { + "_id": 6 + } + } + }, + { + "insertOne": { + "document": { + "_id": 7 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 2 + }, + "replacement": { + "y": 2 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "filter": { + "_id": 4 + } + } + }, + { + "updateMany": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "update": { + "$set": { + "z": 1 + } + } + } + }, + { + "deleteMany": { + "filter": { + "_id": { + "$gte": 6 + } + } + } + } + ] + }, + "expectResult": { + "deletedCount": 4, + "insertedCount": 6, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "3": 3, + "4": 4, + "5": 5, + "6": 6, + "7": 7 + } + }, + "matchedCount": 7, + "modifiedCount": 7, + "upsertedCount": 1, + "upsertedIds": { + "2": 2 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$set": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "$set": { + "x": 2 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + }, + { + "q": { + "_id": 2 + }, + "u": { + "y": 2 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 3 + }, + "limit": 1 + }, + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 2 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$gte": 6 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "y": 1 + }, + { + "_id": 2, + "y": 2, + "z": 1 + }, + { + "_id": 5, + "z": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/causal-consistency.json b/test/transactions/unified/causal-consistency.json new file mode 100644 index 0000000000..52a6cb8180 --- /dev/null +++ b/test/transactions/unified/causal-consistency.json @@ -0,0 +1,426 @@ +{ + "description": "causal-consistency", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session_no_cc", + "client": "client0", + "sessionOptions": { + "causalConsistency": false + } + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 0 + } + ] + } + ], + "tests": [ + { + "description": "causal consistency", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 2 + } + ] + } + ] + }, + { + "description": "causal consistency disabled", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session_no_cc", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session_no_cc", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session_no_cc", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "count": 1 + } + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session_no_cc", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 1 + }, + "u": { + "$inc": { + "count": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_no_cc" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "count": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/client-bulkWrite.json b/test/transactions/unified/client-bulkWrite.json new file mode 100644 index 0000000000..4a8d013f8d --- /dev/null +++ b/test/transactions/unified/client-bulkWrite.json @@ -0,0 +1,593 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/commit.json b/test/transactions/unified/commit.json new file mode 100644 index 0000000000..ab778d8df2 --- /dev/null +++ b/test/transactions/unified/commit.json @@ -0,0 +1,1234 @@ +{ + "description": "commit", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "rerun commit after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "multiple commits in a row", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "write concern error on commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 10 + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit without start", + "operations": [ + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after no-op abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commit after abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorContains": "Cannot call commitTransaction after calling abortTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "multiple commits after empty transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "reset session state commit", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "reset session state abort", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction", + "expectError": { + "errorContains": "no transaction started" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/count.json b/test/transactions/unified/count.json new file mode 100644 index 0000000000..404b06beb6 --- /dev/null +++ b/test/transactions/unified/count.json @@ -0,0 +1,177 @@ +{ + "description": "count", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0.2", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "count", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "count", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "OperationNotSupportedInTransaction", + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "count": "test", + "query": { + "_id": 1 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "count", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-collection.json b/test/transactions/unified/create-collection.json new file mode 100644 index 0000000000..e190088b3b --- /dev/null +++ b/test/transactions/unified/create-collection.json @@ -0,0 +1,282 @@ +{ + "description": "create-collection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "explicitly create collection using create command", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "implicitly create collection using insert", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "assertCollectionNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertCollectionExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/create-index.json b/test/transactions/unified/create-index.json new file mode 100644 index 0000000000..98d6e11547 --- /dev/null +++ b/test/transactions/unified/create-index.json @@ -0,0 +1,313 @@ +{ + "description": "create-index", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.4", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "create index on a non-existing collection", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "create index on a collection created within the same transaction", + "operations": [ + { + "object": "database0", + "name": "dropCollection", + "arguments": { + "collection": "test" + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "createCollection", + "arguments": { + "session": "session0", + "collection": "test" + } + }, + { + "object": "collection0", + "name": "createIndex", + "arguments": { + "session": "session0", + "name": "t_1", + "keys": { + "x": 1 + } + } + }, + { + "object": "testRunner", + "name": "assertIndexNotExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertIndexExists", + "arguments": { + "databaseName": "transaction-tests", + "collectionName": "test", + "indexName": "t_1" + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "drop": "test", + "writeConcern": { + "$$exists": false + } + }, + "commandName": "drop", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "create": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "create", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "createIndexes": "test", + "indexes": [ + { + "name": "t_1", + "key": { + "x": 1 + } + } + ], + "lsid": { + "$$sessionLsid": "session0" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "createIndexes", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/delete.json b/test/transactions/unified/delete.json new file mode 100644 index 0000000000..4c1cae0a4e --- /dev/null +++ b/test/transactions/unified/delete.json @@ -0,0 +1,425 @@ +{ + "description": "delete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ], + "tests": [ + { + "description": "delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "collection0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 4 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for delete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "collection_wc_majority", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$lte": 3 + } + } + }, + "expectResult": { + "deletedCount": 2 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 1 + }, + "limit": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": { + "$lte": 3 + } + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/do-not-retry-read-in-transaction.json b/test/transactions/unified/do-not-retry-read-in-transaction.json new file mode 100644 index 0000000000..6d9dc704b8 --- /dev/null +++ b/test/transactions/unified/do-not-retry-read-in-transaction.json @@ -0,0 +1,115 @@ +{ + "description": "do not retry read in a transaction", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2.0", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryReads": true + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-read-in-transaction-test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "tests": [ + { + "description": "find does not retry in a transaction", + "operations": [ + { + "name": "startTransaction", + "object": "session0" + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "isError": true, + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll", + "filter": {}, + "startTransaction": true + }, + "commandName": "find", + "databaseName": "retryable-read-in-transaction-test" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-blockConnection.json b/test/transactions/unified/error-labels-blockConnection.json new file mode 100644 index 0000000000..8da04d1005 --- /dev/null +++ b/test/transactions/unified/error-labels-blockConnection.json @@ -0,0 +1,235 @@ +{ + "description": "error-labels-blockConnection", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "uriOptions": { + "socketTimeoutMS": 100 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 150 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels-errorLabels.json b/test/transactions/unified/error-labels-errorLabels.json new file mode 100644 index 0000000000..1f95ad3419 --- /dev/null +++ b/test/transactions/unified/error-labels-errorLabels.json @@ -0,0 +1,423 @@ +{ + "description": "error-labels-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to retryable commit errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to writeConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/error-labels.json b/test/transactions/unified/error-labels.json new file mode 100644 index 0000000000..74ed750b07 --- /dev/null +++ b/test/transactions/unified/error-labels.json @@ -0,0 +1,2263 @@ +{ + "description": "error-labels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "DuplicateKey errors do not contain transient label", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorContains": "E11000" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NotWritablePrimary errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 10107 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "WriteConflict errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 112 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "NoSuchTransaction errors on commit contain transient label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 251 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add TransientTransactionError label to connection errors, but do not add RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert", + "find", + "aggregate", + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add RetryableWriteError label to writeConcernError ShutdownInProgress that occurs within transaction", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "multiple errors reported" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError WriteConcernTimeout with wtimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 64, + "errmsg": "waiting for replication timed out", + "errInfo": { + "wtimeout": true + } + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnsatisfiableWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "omit UnknownTransactionCommitResult label from writeConcernError UnknownReplWriteConcern", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 79, + "errmsg": "No write concern mode named 'blah' found in replica set configuration" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteConcern", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "do not add UnknownTransactionCommitResult label to MaxTimeMSExpired inside transactions", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "maxTimeMS": 60000, + "session": "session0" + }, + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "UnknownTransactionCommitResult", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": {}, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "maxTimeMS": 60000 + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 50 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "add UnknownTransactionCommitResult label to writeConcernError MaxTimeMSExpired", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 50, + "errmsg": "operation exceeded time limit" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/errors-client.json b/test/transactions/unified/errors-client.json new file mode 100644 index 0000000000..00f1497c2d --- /dev/null +++ b/test/transactions/unified/errors-client.json @@ -0,0 +1,142 @@ +{ + "description": "errors-client", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "Client side error in command starting transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "starting" + } + } + ] + }, + { + "description": "Client side error when transaction is in progress", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "x": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "object": "testRunner", + "name": "assertSessionTransactionState", + "arguments": { + "session": "session0", + "state": "in_progress" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/errors.json b/test/transactions/unified/errors.json new file mode 100644 index 0000000000..94a9cac207 --- /dev/null +++ b/test/transactions/unified/errors.json @@ -0,0 +1,285 @@ +{ + "description": "errors", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "start insert start", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ] + }, + { + "description": "start twice", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + } + ] + }, + { + "description": "commit and start twice", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session0", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transaction already in progress" + } + } + ] + }, + { + "description": "write conflict commit", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "WriteConflict", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorCodeName": "NoSuchTransaction", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + } + ] + }, + { + "description": "write conflict abort", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorCodeName": "WriteConflict", + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "abortTransaction" + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndDelete.json b/test/transactions/unified/findOneAndDelete.json new file mode 100644 index 0000000000..7db9c872af --- /dev/null +++ b/test/transactions/unified/findOneAndDelete.json @@ -0,0 +1,317 @@ +{ + "description": "findOneAndDelete", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndDelete", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + } + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndReplace.json b/test/transactions/unified/findOneAndReplace.json new file mode 100644 index 0000000000..f0742f0c60 --- /dev/null +++ b/test/transactions/unified/findOneAndReplace.json @@ -0,0 +1,356 @@ +{ + "description": "findOneAndReplace", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 1 + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "x": 1 + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 1 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndReplace", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/findOneAndUpdate.json b/test/transactions/unified/findOneAndUpdate.json new file mode 100644 index 0000000000..f5308efef3 --- /dev/null +++ b/test/transactions/unified/findOneAndUpdate.json @@ -0,0 +1,546 @@ +{ + "description": "findOneAndUpdate", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "returnDocument": "After" + }, + "expectResult": { + "_id": 4, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3, + "x": 2 + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": true, + "upsert": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "x": 2 + }, + { + "_id": 4, + "x": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for findOneAndUpdate", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 3 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 3 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/insert.json b/test/transactions/unified/insert.json new file mode 100644 index 0000000000..9a80d8bf4b --- /dev/null +++ b/test/transactions/unified/insert.json @@ -0,0 +1,895 @@ +{ + "description": "insert", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 5 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "insert with session1", + "operations": [ + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session1" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 4 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "collection writeConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection_wc_majority", + "database": "database1", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + }, + { + "session": { + "id": "session2", + "client": "client1" + } + } + ] + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session2", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session2" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "collection writeConcern ignored for insert", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection_wc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection_wc_majority", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 2, + "1": 3 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + }, + { + "_id": 3 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/isolation.json b/test/transactions/unified/isolation.json new file mode 100644 index 0000000000..5d0a0139fb --- /dev/null +++ b/test/transactions/unified/isolation.json @@ -0,0 +1,281 @@ +{ + "description": "isolation", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "session": { + "id": "session1", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "one transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "two transactions", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectResult": [] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1 + } + ] + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-pin-auto.json b/test/transactions/unified/mongos-pin-auto.json new file mode 100644 index 0000000000..27db520401 --- /dev/null +++ b/test/transactions/unified/mongos-pin-auto.json @@ -0,0 +1,5474 @@ +{ + "description": "mongos-pin-auto", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "remain pinned after non-transient Interrupted error on insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError", + "UnknownTransactionCommitResult" + ], + "errorCodeName": "Interrupted" + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient connection error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on insertOne insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on insertMany insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "insertMany", + "object": "collection0", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on updateOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "updateOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on replaceOne update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on updateMany update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "updateMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on deleteOne delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on deleteMany delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 1 + } + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndDelete findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndUpdate findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "closeConnection": true + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on findOneAndReplace findAndModify", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + }, + "replacement": { + "y": 1 + }, + "returnDocument": "Before" + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "updateOne": { + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "closeConnection": true + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on bulkWrite delete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection0", + "arguments": { + "session": "session0", + "requests": [ + { + "deleteOne": { + "filter": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on find find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on countDocuments aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection0", + "arguments": { + "session": "session0", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "closeConnection": true + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on aggregate aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "aggregate", + "object": "collection0", + "arguments": { + "session": "session0", + "pipeline": [] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "closeConnection": true + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on distinct distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "distinct", + "object": "collection0", + "arguments": { + "session": "session0", + "fieldName": "_id", + "filter": {} + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on runCommand insert", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "session": "session0", + "commandName": "insert", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-recovery-token-errorLabels.json b/test/transactions/unified/mongos-recovery-token-errorLabels.json new file mode 100644 index 0000000000..13345c6a29 --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token-errorLabels.json @@ -0,0 +1,211 @@ +{ + "description": "mongos-recovery-token-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "serverless": "forbid", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction retry succeeds on new mongos", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-recovery-token.json b/test/transactions/unified/mongos-recovery-token.json new file mode 100644 index 0000000000..bb88aa16bd --- /dev/null +++ b/test/transactions/unified/mongos-recovery-token.json @@ -0,0 +1,568 @@ +{ + "description": "mongos-recovery-token", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction explicit retries include recoveryToken", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction retry fails on new mongos", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 7 + }, + "data": { + "failCommands": [ + "commitTransaction", + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "transactionsClient" + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ], + "errorCodeName": "NoSuchTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction sends recoveryToken", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/mongos-unpin.json b/test/transactions/unified/mongos-unpin.json index 4f7ae43794..4d1ebc87bc 100644 --- a/test/transactions/unified/mongos-unpin.json +++ b/test/transactions/unified/mongos-unpin.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.2", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -52,7 +52,10 @@ "description": "unpin after TransientTransactionError error on commit", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -163,7 +166,10 @@ "description": "unpin after non-transient error on abort", "runOnRequirements": [ { - "serverless": "forbid" + "serverless": "forbid", + "topologies": [ + "sharded" + ] } ], "operations": [ @@ -233,6 +239,13 @@ }, { "description": "unpin after TransientTransactionError error on abort", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], "operations": [ { "name": "startTransaction", diff --git a/test/transactions/unified/pin-mongos.json b/test/transactions/unified/pin-mongos.json new file mode 100644 index 0000000000..c96f3f341f --- /dev/null +++ b/test/transactions/unified/pin-mongos.json @@ -0,0 +1,1466 @@ +{ + "description": "pin-mongos", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.1.8", + "serverless": "forbid", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": true, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "countDocuments", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 2 + }, + "session": "session0" + }, + "expectResult": [ + { + "_id": 2 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 5 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 6 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 7 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 8 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 8 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 9 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 9 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 10 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 10 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + }, + { + "_id": 9 + }, + { + "_id": 10 + } + ] + } + ] + }, + { + "description": "mixed read write operations", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": 3 + }, + "session": "session0" + }, + "expectResult": 1 + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 4 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 5 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 5 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 6 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 6 + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 7 + }, + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 7 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + } + ] + } + ] + }, + { + "description": "multiple commits", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "remain pinned after non-transient error on commit", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 3, + "1": 4 + } + } + } + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 51 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ], + "errorCode": 51 + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unpin after transient error within a transaction and commit", + "runOnRequirements": [ + { + "topologies": [ + "sharded" + ] + } + ], + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": true, + "uriOptions": { + "heartbeatFrequencyMS": 30000, + "appName": "transactionsClient" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "testRunner", + "name": "targetedFailPoint", + "arguments": { + "session": "session1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 7 + }, + "data": { + "failCommands": [ + "insert", + "isMaster", + "hello" + ], + "closeConnection": true, + "appName": "transactionsClient" + } + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 4 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ], + "errorLabelsOmit": [ + "UnknownTransactionCommitResult" + ], + "errorCodeName": "NoSuchTransaction" + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "recoveryToken": { + "$$exists": true + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/read-concern.json b/test/transactions/unified/read-concern.json new file mode 100644 index 0000000000..b3bd967c09 --- /dev/null +++ b/test/transactions/unified/read-concern.json @@ -0,0 +1,1924 @@ +{ + "description": "read-concern", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "database": { + "id": "database_rc_majority", + "client": "client0", + "databaseName": "transaction-tests", + "databaseOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "collection": { + "id": "collection_rc_majority", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "only first countDocuments includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first find includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first aggregate includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first distinct includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "only first runCommand includes readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "level": "majority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "countDocuments ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "collection_rc_majority", + "name": "countDocuments", + "arguments": { + "filter": { + "_id": { + "$gte": 2 + } + }, + "session": "session0" + }, + "expectResult": 3 + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$match": { + "_id": { + "$gte": 2 + } + } + }, + { + "$group": { + "_id": 1, + "n": { + "$sum": 1 + } + } + } + ], + "cursor": {}, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rc_majority", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct ignores collection readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "collection_rc_majority", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "runCommand ignores database readConcern", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database_rc_majority", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/read-pref.json b/test/transactions/unified/read-pref.json new file mode 100644 index 0000000000..eda00bd10d --- /dev/null +++ b/test/transactions/unified/read-pref.json @@ -0,0 +1,728 @@ +{ + "description": "read-pref", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_primary", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_rp_secondary", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "default readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectResult": [ + { + "count": 1 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "primary readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primary" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectResult": [ + { + "count": 1 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection_rp_secondary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "secondary readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "primaryPreferred readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primaryPreferred" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "nearest readPreference", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "nearest" + } + } + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2, + "2": 3, + "3": 4 + } + } + } + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "session": "session0", + "pipeline": [ + { + "$match": { + "_id": 1 + } + }, + { + "$count": "count" + } + ] + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "collection_rp_primary", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "secondary write only", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/reads.json b/test/transactions/unified/reads.json new file mode 100644 index 0000000000..52e8457634 --- /dev/null +++ b/test/transactions/unified/reads.json @@ -0,0 +1,706 @@ +{ + "description": "reads", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "tests": [ + { + "description": "collection readConcern without transaction", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "readConcern": { + "level": "majority" + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$$exists": false + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + } + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "find", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "find", + "arguments": { + "batchSize": 3, + "filter": {}, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "find", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "aggregate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "batchSize": 3, + "session": "session0" + }, + "expectResult": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "aggregate": "test", + "pipeline": [ + { + "$project": { + "_id": 1 + } + } + ], + "cursor": { + "batchSize": 3 + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "aggregate", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "test", + "batchSize": 3, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false + }, + "commandName": "getMore", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + }, + { + "description": "distinct", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "distinct", + "arguments": { + "fieldName": "_id", + "filter": {}, + "session": "session0" + }, + "expectResult": [ + 1, + 2, + 3, + 4 + ] + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "distinct": "test", + "key": "_id", + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "distinct", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "readConcern": { + "$$exists": false + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort-errorLabels.json b/test/transactions/unified/retryable-abort-errorLabels.json new file mode 100644 index 0000000000..77a1b03eb0 --- /dev/null +++ b/test/transactions/unified/retryable-abort-errorLabels.json @@ -0,0 +1,2436 @@ +{ + "description": "retryable-abort-errorLabels", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-abort.json b/test/transactions/unified/retryable-abort.json new file mode 100644 index 0000000000..381cfa91f8 --- /dev/null +++ b/test/transactions/unified/retryable-abort.json @@ -0,0 +1,600 @@ +{ + "description": "retryable-abort", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "abortTransaction only performs a single retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction does not retry after WriteConcernError Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "writeConcernError": { + "code": 11601, + "errmsg": "operation was interrupted" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "abortTransaction succeeds after connection error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit-errorLabels.json b/test/transactions/unified/retryable-commit-errorLabels.json new file mode 100644 index 0000000000..d3ce8b148e --- /dev/null +++ b/test/transactions/unified/retryable-commit-errorLabels.json @@ -0,0 +1,2564 @@ +{ + "description": "retryable-commit-errorLabels", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction does not retry error without RetryableWriteError label", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction retries once with RetryableWriteError from server", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 112, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotWritablePrimary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 10107, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryOrSecondary", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13436, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NotPrimaryNoSecondaryOk", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 13435, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11602, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostNotFound", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after HostUnreachable", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 6, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after SocketException", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 9001, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after NetworkTimeout", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 89, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedAtShutdown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11600, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError InterruptedDueToReplStateChange", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 11602, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError PrimarySteppedDown", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 189, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after WriteConcernError ShutdownInProgress", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after InterruptedAtShutdown", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11600, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after ShutdownInProgress", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 91, + "errorLabels": [ + "RetryableWriteError" + ], + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-commit.json b/test/transactions/unified/retryable-commit.json new file mode 100644 index 0000000000..b794c1c55c --- /dev/null +++ b/test/transactions/unified/retryable-commit.json @@ -0,0 +1,868 @@ +{ + "description": "retryable-commit", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction fails after Interrupted", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "errorCode": 11601, + "closeConnection": false + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorCodeName": "Interrupted", + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "commitTransaction is not retried after UnsatisfiableWriteConcern error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "writeConcernError": { + "code": 100, + "errmsg": "Not enough data-bearing nodes" + } + } + } + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction", + "expectError": { + "errorLabelsOmit": [ + "RetryableWriteError", + "TransientTransactionError", + "UnknownTransactionCommitResult" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction fails after two errors", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction applies majority write concern on retries", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client1", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 2, + "journal": true, + "wtimeoutMS": 5000 + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction", + "expectError": { + "errorLabelsContain": [ + "RetryableWriteError", + "UnknownTransactionCommitResult" + ], + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 2, + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "j": true, + "wtimeout": 5000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commitTransaction succeeds after connection error", + "runOnRequirements": [ + { + "serverless": "forbid" + } + ], + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority", + "wtimeout": 10000 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/retryable-writes.json b/test/transactions/unified/retryable-writes.json new file mode 100644 index 0000000000..c196e68622 --- /dev/null +++ b/test/transactions/unified/retryable-writes.json @@ -0,0 +1,468 @@ +{ + "description": "retryable-writes", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "increment txnNumber", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + }, + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "session": "session0" + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 4, + "1": 5 + } + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 3 + } + ], + "ordered": true, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "3" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 4 + }, + { + "_id": 5 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "4" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 4 + }, + { + "_id": 5 + } + ] + } + ] + }, + { + "description": "writes are not retried", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "closeConnection": true + } + } + } + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + } + ] +} diff --git a/test/transactions/unified/run-command.json b/test/transactions/unified/run-command.json new file mode 100644 index 0000000000..7bd420ef74 --- /dev/null +++ b/test/transactions/unified/run-command.json @@ -0,0 +1,421 @@ +{ + "description": "run-command", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "run command with default read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with secondary read preference in client option and primary read preference in transaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "primary" + } + } + }, + { + "object": "database1", + "name": "runCommand", + "arguments": { + "session": "session1", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command with explicit primary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ] + }, + "readPreference": { + "mode": "primary" + }, + "commandName": "insert" + }, + "expectResult": { + "n": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + }, + { + "description": "run command fails with explicit secondary read preference", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "readPreference": { + "mode": "secondary" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + }, + { + "description": "run command fails with secondary read preference from transaction options", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "database0", + "name": "runCommand", + "arguments": { + "session": "session0", + "command": { + "find": "test" + }, + "commandName": "find" + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options-repl.json b/test/transactions/unified/transaction-options-repl.json new file mode 100644 index 0000000000..dc2cb77582 --- /dev/null +++ b/test/transactions/unified/transaction-options-repl.json @@ -0,0 +1,267 @@ +{ + "description": "transaction-options-repl", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "readConcern snapshot in startTransaction options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "snapshot" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "snapshot", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/transaction-options.json b/test/transactions/unified/transaction-options.json new file mode 100644 index 0000000000..78e4c8207b --- /dev/null +++ b/test/transactions/unified/transaction-options.json @@ -0,0 +1,2081 @@ +{ + "description": "transaction-options", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ], + "tests": [ + { + "description": "no transaction options set", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + }, + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "transaction options inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "session": { + "id": "session1", + "client": "client0", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction options override defaults", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "snapshot" + }, + "writeConcern": { + "w": 1 + }, + "maxCommitTimeMS": 30000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "defaultTransactionOptions override client options", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readConcernLevel": "local", + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "w": "majority" + }, + "maxCommitTimeMS": 60000 + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority" + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": 60000 + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "majority", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": "majority" + }, + "maxTimeMS": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readConcern local in defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readConcern": { + "level": "local" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session1", + "name": "commitTransaction" + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 2 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 2 + } + } + } + }, + { + "object": "session1", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local" + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "level": "local", + "afterClusterTime": { + "$$exists": true + } + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "2" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "w": 1 + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for bulk", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "collection1", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ], + "session": "session1" + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from client", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "secondary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "readPreference inherited from defaultTransactionOptions", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "secondary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction" + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "startTransaction overrides readPreference", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "readPreference": "primary" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database1", + "client": "client1", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection1", + "database": "database1", + "collectionName": "test" + } + }, + { + "session": { + "id": "session1", + "client": "client1", + "sessionOptions": { + "defaultTransactionOptions": { + "readPreference": { + "mode": "primary" + } + } + } + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "arguments": { + "readPreference": { + "mode": "secondary" + } + } + }, + { + "object": "collection1", + "name": "insertOne", + "arguments": { + "session": "session1", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "collection1", + "name": "find", + "arguments": { + "session": "session1", + "filter": { + "_id": 1 + } + }, + "expectError": { + "errorContains": "read preference in a transaction must be primary" + } + }, + { + "object": "session1", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client1", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session1" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/update.json b/test/transactions/unified/update.json new file mode 100644 index 0000000000..8090fc9087 --- /dev/null +++ b/test/transactions/unified/update.json @@ -0,0 +1,565 @@ +{ + "description": "update", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + ], + "tests": [ + { + "description": "update", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3, + "z": 1 + }, + { + "_id": 4, + "y": 1, + "z": 1 + } + ] + } + ] + }, + { + "description": "collections writeConcern ignored for update", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": "majority" + } + } + } + } + ] + } + }, + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection1", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 4 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 1, + "upsertedId": 4 + } + }, + { + "object": "collection1", + "name": "replaceOne", + "arguments": { + "session": "session0", + "filter": { + "x": 1 + }, + "replacement": { + "y": 1 + } + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "collection1", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": { + "$gte": 3 + } + }, + "update": { + "$set": { + "z": 1 + } + } + }, + "expectResult": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 4 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "readConcern": { + "$$exists": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "x": 1 + }, + "u": { + "y": 1 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": { + "$gte": 3 + } + }, + "u": { + "$set": { + "z": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ] + } + ] +} diff --git a/test/transactions/unified/write-concern.json b/test/transactions/unified/write-concern.json new file mode 100644 index 0000000000..29d1977a82 --- /dev/null +++ b/test/transactions/unified/write-concern.json @@ -0,0 +1,1588 @@ +{ + "description": "write-concern", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.1.8", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "test" + } + }, + { + "collection": { + "id": "collection_w0", + "database": "database0", + "collectionName": "test", + "collectionOptions": { + "writeConcern": { + "w": 0 + } + } + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ], + "tests": [ + { + "description": "commit with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "commit with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "abort with majority", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": "majority" + } + } + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": "majority" + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "abort with default", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "abortTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "abortTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + } + ] + } + ] + }, + { + "description": "start with unacknowledged write concern", + "operations": [ + { + "object": "session0", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "start with implicit unacknowledged write concern", + "operations": [ + { + "object": "testRunner", + "name": "createEntities", + "arguments": { + "entities": [ + { + "client": { + "id": "client1", + "useMultipleMongoses": false, + "uriOptions": { + "w": 0 + } + } + }, + { + "session": { + "id": "session1", + "client": "client1" + } + } + ] + } + }, + { + "object": "session1", + "name": "startTransaction", + "expectError": { + "isClientError": true, + "errorContains": "transactions do not support unacknowledged write concern" + } + } + ] + }, + { + "description": "unacknowledged write concern coll insertOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 1 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 1 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll insertMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "insertMany", + "arguments": { + "session": "session0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedIds": { + "$$unsetOrMatches": { + "0": 1, + "1": 2 + } + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "bulkWrite", + "arguments": { + "session": "session0", + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectResult": { + "deletedCount": 0, + "insertedCount": 1, + "insertedIds": { + "$$unsetOrMatches": { + "0": 1 + } + }, + "matchedCount": 0, + "modifiedCount": 0, + "upsertedCount": 0, + "upsertedIds": {} + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "test", + "documents": [ + { + "_id": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "insert", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0 + }, + { + "_id": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 1 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll deleteMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "deleteMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "deletedCount": 1 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "delete": "test", + "deletes": [ + { + "q": { + "_id": 0 + }, + "limit": 0 + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "delete", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll updateOne", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateOne", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "upsert": true, + "multi": { + "$$unsetOrMatches": false + } + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll updateMany", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "updateMany", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "upsert": true + }, + "expectResult": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedCount": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "test", + "updates": [ + { + "q": { + "_id": 0 + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": true + } + ], + "ordered": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "update", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndDelete", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndDelete", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + } + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "remove": true, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndReplace", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndReplace", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "replacement": { + "x": 1 + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "x": 1 + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + }, + { + "description": "unacknowledged write concern coll findOneAndUpdate", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection_w0", + "name": "findOneAndUpdate", + "arguments": { + "session": "session0", + "filter": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "returnDocument": "Before" + }, + "expectResult": { + "_id": 0 + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "test", + "query": { + "_id": 0 + }, + "update": { + "$inc": { + "x": 1 + } + }, + "new": { + "$$unsetOrMatches": false + }, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": true, + "autocommit": false, + "readConcern": { + "$$exists": false + }, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "findAndModify", + "databaseName": "transaction-tests" + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 0, + "x": 1 + } + ] + } + ] + } + ] +} diff --git a/test/unicode/test_utf8.py b/test/unicode/test_utf8.py index 7ce2936b7a..578d98bffb 100644 --- a/test/unicode/test_utf8.py +++ b/test/unicode/test_utf8.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import sys sys.path[0:0] = [""] @@ -9,7 +11,6 @@ class TestUTF8(unittest.TestCase): - # Verify that python and bson have the same understanding of # legal utf-8 if the first byte is 0xf4 (244) def _assert_same_utf8_validation(self, data): diff --git a/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json new file mode 100644 index 0000000000..9c659c8f76 --- /dev/null +++ b/test/unified-test-format/invalid/clientEncryptionOpts-kmsProviders-invalidName.json @@ -0,0 +1,29 @@ +{ + "description": "clientEncryptionOpts-kmsProviders-invalidName", + "schemaVersion": "1.18", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "clientEncryption": { + "id": "clientEncryption0", + "clientEncryptionOpts": { + "keyVaultClient": "client0", + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws:name_with_invalid_character*": {} + } + } + } + } + ], + "tests": [ + { + "description": "", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json new file mode 100644 index 0000000000..87cbd21125 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-minProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-minProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": {} + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json new file mode 100644 index 0000000000..fed0accd6e --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-type.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": {} + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json new file mode 100644 index 0000000000..f14b18d6de --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-property-value.json @@ -0,0 +1,20 @@ +{ + "description": "entity-client-observeLogMessages-property-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": { + "command": "notALogLevel" + } + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json new file mode 100644 index 0000000000..8a277034e2 --- /dev/null +++ b/test/unified-test-format/invalid/entity-client-observeLogMessages-type.json @@ -0,0 +1,18 @@ +{ + "description": "entity-client-observeLogMessages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0", + "observeLogMessages": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json deleted file mode 100644 index d94863ed11..0000000000 --- a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-minItems.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-minItems", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json b/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json deleted file mode 100644 index 79f6b85ed2..0000000000 --- a/test/unified-test-format/invalid/entity-client-storeEventsAsEntities-type.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": 0 - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/entity-thread-additionalProperties.json b/test/unified-test-format/invalid/entity-thread-additionalProperties.json new file mode 100644 index 0000000000..b296719f13 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-additionalProperties.json @@ -0,0 +1,18 @@ +{ + "description": "entity-thread-additionalProperties", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": "thread0", + "foo": "bar" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-required.json b/test/unified-test-format/invalid/entity-thread-id-required.json new file mode 100644 index 0000000000..3b197e3d6b --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-required.json @@ -0,0 +1,15 @@ +{ + "description": "entity-thread-id-required", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": {} + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/entity-thread-id-type.json b/test/unified-test-format/invalid/entity-thread-id-type.json new file mode 100644 index 0000000000..8f281ef6f4 --- /dev/null +++ b/test/unified-test-format/invalid/entity-thread-id-type.json @@ -0,0 +1,17 @@ +{ + "description": "entity-thread-id-type", + "schemaVersion": "1.10", + "createEntities": [ + { + "thread": { + "id": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json new file mode 100644 index 0000000000..de59318822 --- /dev/null +++ b/test/unified-test-format/invalid/expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedCmapEvent-poolClearedEvent-interruptInUseConnections-type", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": { + "interruptInUseConnections": "foo" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json new file mode 100644 index 0000000000..f6a305b89a --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandFailedEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandFailedEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandFailedEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json new file mode 100644 index 0000000000..47b8c8bb9d --- /dev/null +++ b/test/unified-test-format/invalid/expectedCommandEvent-commandSucceededEvent-databaseName-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedCommandEvent-commandSucceededEvent-databaseName-type", + "schemaVersion": "1.15", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandSucceededEvent": { + "databaseName": 0 + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedError-errorResponse-type.json b/test/unified-test-format/invalid/expectedError-errorResponse-type.json new file mode 100644 index 0000000000..6eb66d9b0b --- /dev/null +++ b/test/unified-test-format/invalid/expectedError-errorResponse-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedError-errorResponse-type", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [ + { + "name": "foo", + "object": "client0", + "expectError": { + "errorResponse": 0 + } + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json new file mode 100644 index 0000000000..cd7cf8726c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessage-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-enum.json b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json new file mode 100644 index 0000000000..2283e9b243 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "foo", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-required.json b/test/unified-test-format/invalid/expectedLogMessage-component-required.json new file mode 100644 index 0000000000..f3a157787f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-component-type.json b/test/unified-test-format/invalid/expectedLogMessage-component-type.json new file mode 100644 index 0000000000..af8f711573 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-component-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-component-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-required.json b/test/unified-test-format/invalid/expectedLogMessage-data-required.json new file mode 100644 index 0000000000..7e8152dddd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-data-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command" + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-data-type.json b/test/unified-test-format/invalid/expectedLogMessage-data-type.json new file mode 100644 index 0000000000..4f81fb6272 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-data-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-data-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "data": 0 + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json new file mode 100644 index 0000000000..190748a185 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-failureIsRedacted-type.json @@ -0,0 +1,30 @@ +{ + "description": "expectedLogMessage-failureIsRedacted-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "debug", + "component": "command", + "failureIsRedacted": 0, + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-enum.json b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json new file mode 100644 index 0000000000..f4c886bb68 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-enum.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-enum", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": "foo", + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-required.json b/test/unified-test-format/invalid/expectedLogMessage-level-required.json new file mode 100644 index 0000000000..27c9c7a6cd --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-required.json @@ -0,0 +1,28 @@ +{ + "description": "expectedLogMessage-level-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessage-level-type.json b/test/unified-test-format/invalid/expectedLogMessage-level-type.json new file mode 100644 index 0000000000..180d7afcd6 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessage-level-type.json @@ -0,0 +1,29 @@ +{ + "description": "expectedLogMessage-level-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + { + "level": 0, + "component": "command", + "data": {} + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json new file mode 100644 index 0000000000..306b78b446 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-additionalProperties.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-additionalProperties", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "foo": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json new file mode 100644 index 0000000000..d8e1100bea --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-client-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json new file mode 100644 index 0000000000..5399cac029 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-client-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedEventsForClient-client-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": 0, + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json new file mode 100644 index 0000000000..a9f2da9bce --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreExtraMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreExtraMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "ignoreExtraMessages": "true", + "messages": [] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json new file mode 100644 index 0000000000..345faf41f5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-items.json @@ -0,0 +1,26 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-items", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json new file mode 100644 index 0000000000..4bc2d41dbf --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-ignoreMessages-type.json @@ -0,0 +1,24 @@ +{ + "description": "expectedLogMessagesForClient-ignoreMessages-type", + "schemaVersion": "1.16", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [], + "ignoreMessages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json new file mode 100644 index 0000000000..9788d8fe5c --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-items.json @@ -0,0 +1,25 @@ +{ + "description": "expectedLogMessagesForClient-messages-items", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": [ + 0 + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json new file mode 100644 index 0000000000..85d070672f --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-required.json @@ -0,0 +1,22 @@ +{ + "description": "expectedLogMessagesForClient-messages-required", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0" + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json new file mode 100644 index 0000000000..27531667c5 --- /dev/null +++ b/test/unified-test-format/invalid/expectedLogMessagesForClient-messages-type.json @@ -0,0 +1,23 @@ +{ + "description": "expectedLogMessagesForClient-messages-type", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + { + "client": "client0", + "messages": 0 + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..1c6ec460b7 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json new file mode 100644 index 0000000000..58f686739a --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-additionalProperties", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "foo": "bar" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json new file mode 100644 index 0000000000..1b4a7e2e70 --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-enum", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": "not a server type" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json new file mode 100644 index 0000000000..c7ea9cc9be --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type.json @@ -0,0 +1,25 @@ +{ + "description": "expectedSdamEvent-serverDescriptionChangedEvent-serverDescription-type-type", + "schemaVersion": "1.10", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "serverDescriptionChangedEvent": { + "previousDescription": { + "type": 12 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json new file mode 100644 index 0000000000..ef2686e93f --- /dev/null +++ b/test/unified-test-format/invalid/expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties.json @@ -0,0 +1,23 @@ +{ + "description": "expectedSdamEvent-topologyDescriptionChangedEvent-additionalProperties", + "schemaVersion": "1.14", + "tests": [ + { + "description": "foo", + "operations": [], + "expectEvents": [ + { + "client": "client0", + "eventType": "sdam", + "events": [ + { + "topologyDescriptionChangedEvent": { + "foo": "bar" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json new file mode 100644 index 0000000000..007f3f304c --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-authMechanism-type.json @@ -0,0 +1,15 @@ +{ + "description": "runOnRequirement-authMechanism-type", + "schemaVersion": "1.19", + "runOnRequirements": [ + { + "authMechanism": 0 + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json new file mode 100644 index 0000000000..1db023bf68 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-pattern.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-pattern", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": "1.2.3.4" + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json new file mode 100644 index 0000000000..8de7b293f1 --- /dev/null +++ b/test/unified-test-format/invalid/runOnRequirement-csfle-minLibmongocryptVersion-type.json @@ -0,0 +1,17 @@ +{ + "description": "runOnRequirement-csfle-minLibmongocryptVersion-type", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "csfle": { + "minLibmongocryptVersion": 0 + } + } + ], + "tests": [ + { + "description": "foo", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json b/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json deleted file mode 100644 index 5357da8d8d..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-additionalProperties.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "description": "storeEventsAsEntity-additionalProperties", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "CommandStartedEvent" - ], - "foo": 0 - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json deleted file mode 100644 index ee99a55381..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-enum.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-enum", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "foo" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json deleted file mode 100644 index ddab042b1b..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-minItems.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-minItems", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json deleted file mode 100644 index 90b45918ce..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-required.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-required", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events" - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json deleted file mode 100644 index 1b920ebd5d..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-events-type.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "description": "storeEventsAsEntity-events-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": 0 - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json deleted file mode 100644 index 71387c5315..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-id-required.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "description": "storeEventsAsEntity-id-required", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "events": [ - "CommandStartedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json b/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json deleted file mode 100644 index 4f52dc2533..0000000000 --- a/test/unified-test-format/invalid/storeEventsAsEntity-id-type.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "description": "storeEventsAsEntity-id-type", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": 0, - "events": [ - "CommandStartedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-items.json b/test/unified-test-format/invalid/test-expectLogMessages-items.json new file mode 100644 index 0000000000..be4a609c56 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-items.json @@ -0,0 +1,13 @@ +{ + "description": "test-expectLogMessages-items", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [ + 0 + ] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-minItems.json b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json new file mode 100644 index 0000000000..d7a07c2e77 --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-minItems.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-minItems", + "schemaVersion": "1.11", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": [] + } + ] +} diff --git a/test/unified-test-format/invalid/test-expectLogMessages-type.json b/test/unified-test-format/invalid/test-expectLogMessages-type.json new file mode 100644 index 0000000000..9a8d6fcdfb --- /dev/null +++ b/test/unified-test-format/invalid/test-expectLogMessages-type.json @@ -0,0 +1,11 @@ +{ + "description": "test-expectLogMessages-type", + "schemaVersion": "1.13", + "tests": [ + { + "description": "foo", + "operations": [], + "expectLogMessages": 0 + } + ] +} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json deleted file mode 100644 index 8c0c4d2041..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_with_client_id.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_with_client_id", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json deleted file mode 100644 index 77bc4abf2e..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_different_array.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_within_different_array", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - } - ] - } - }, - { - "client": { - "id": "client1", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json b/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json deleted file mode 100644 index e1a9499883..0000000000 --- a/test/unified-test-format/valid-fail/entity-client-storeEventsAsEntities-conflict_within_same_array.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities-conflict_within_same_array", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "events", - "events": [ - "PoolCreatedEvent", - "PoolReadyEvent", - "PoolClearedEvent", - "PoolClosedEvent" - ] - }, - { - "id": "events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - } - ], - "tests": [ - { - "description": "foo", - "operations": [] - } - ] -} diff --git a/test/unified-test-format/valid-fail/operator-matchAsDocument.json b/test/unified-test-format/valid-fail/operator-matchAsDocument.json new file mode 100644 index 0000000000..24f6be9cb8 --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsDocument.json @@ -0,0 +1,205 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2 }" + }, + { + "_id": 2, + "json": "1" + }, + { + "_id": 3, + "json": "[ \"foo\" ]" + }, + { + "_id": 4, + "json": "{ \"x\" }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument with non-matching filter", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": "two" + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument does not permit extra fields", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given scalar", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument expects JSON object but given array", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 3 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 3, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument fails to decode Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 4 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 4, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": {} + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-fail/operator-matchAsRoot.json b/test/unified-test-format/valid-fail/operator-matchAsRoot.json new file mode 100644 index 0000000000..ec6309418c --- /dev/null +++ b/test/unified-test-format/valid-fail/operator-matchAsRoot.json @@ -0,0 +1,67 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document does not match", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 3 + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/collectionData-createOptions.json b/test/unified-test-format/valid-pass/collectionData-createOptions.json index 64f8fb02ff..19edc2247b 100644 --- a/test/unified-test-format/valid-pass/collectionData-createOptions.json +++ b/test/unified-test-format/valid-pass/collectionData-createOptions.json @@ -49,19 +49,29 @@ "description": "collection is created with the correct options", "operations": [ { - "name": "runCommand", - "object": "database0", + "object": "collection0", + "name": "aggregate", "arguments": { - "commandName": "collStats", - "command": { - "collStats": "coll0", - "scale": 1 - } + "pipeline": [ + { + "$collStats": { + "storageStats": {} + } + }, + { + "$project": { + "capped": "$storageStats.capped", + "maxSize": "$storageStats.maxSize" + } + } + ] }, - "expectResult": { - "capped": true, - "maxSize": 4096 - } + "expectResult": [ + { + "capped": true, + "maxSize": 4096 + } + ] } ] } diff --git a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json b/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json deleted file mode 100644 index e37e5a1acd..0000000000 --- a/test/unified-test-format/valid-pass/entity-client-storeEventsAsEntities.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "description": "entity-client-storeEventsAsEntities", - "schemaVersion": "1.2", - "createEntities": [ - { - "client": { - "id": "client0", - "storeEventsAsEntities": [ - { - "id": "client0_events", - "events": [ - "CommandStartedEvent", - "CommandSucceededEvent", - "CommandFailedEvent" - ] - } - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "test", - "documents": [ - { - "_id": 1, - "x": 11 - } - ] - } - ], - "tests": [ - { - "description": "storeEventsAsEntities captures events", - "operations": [ - { - "name": "find", - "object": "collection0", - "arguments": { - "filter": {} - }, - "expectResult": [ - { - "_id": 1, - "x": 11 - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/valid-pass/entity-commandCursor.json b/test/unified-test-format/valid-pass/entity-commandCursor.json new file mode 100644 index 0000000000..72b74b4a9a --- /dev/null +++ b/test/unified-test-format/valid-pass/entity-commandCursor.json @@ -0,0 +1,278 @@ +{ + "description": "entity-commandCursor", + "schemaVersion": "1.3", + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "collection", + "database": "db", + "collectionName": "collection" + } + } + ], + "initialData": [ + { + "collectionName": "collection", + "databaseName": "db", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "tests": [ + { + "description": "runCursorCommand creates and exhausts cursor by running getMores", + "operations": [ + { + "name": "runCursorCommand", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + }, + { + "commandStartedEvent": { + "command": { + "getMore": { + "$$type": [ + "int", + "long" + ] + }, + "collection": "collection", + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "getMore" + } + } + ] + } + ] + }, + { + "description": "createCommandCursor creates a cursor and stores it as an entity that can be iterated one document at a time", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 2, + "x": 22 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 3, + "x": 33 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 4, + "x": 44 + } + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 5, + "x": 55 + } + } + ] + }, + { + "description": "createCommandCursor's cursor can be closed and will perform a killCursors operation", + "operations": [ + { + "name": "createCommandCursor", + "object": "db", + "arguments": { + "commandName": "find", + "batchSize": 2, + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2 + } + }, + "saveResultAsEntity": "myRunCommandCursor" + }, + { + "name": "iterateUntilDocumentOrError", + "object": "myRunCommandCursor", + "expectResult": { + "_id": 1, + "x": 11 + } + }, + { + "name": "close", + "object": "myRunCommandCursor" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "collection", + "filter": {}, + "batchSize": 2, + "$db": "db", + "lsid": { + "$$exists": true + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "killCursors": "collection", + "cursors": { + "$$type": "array" + } + }, + "commandName": "killCursors" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json index 88fc28e34e..b17ae78b94 100644 --- a/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json +++ b/test/unified-test-format/valid-pass/entity-cursor-iterateOnce.json @@ -93,7 +93,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, diff --git a/test/unified-test-format/valid-pass/entity-find-cursor.json b/test/unified-test-format/valid-pass/entity-find-cursor.json index 85b8f69d7f..6f955d81f4 100644 --- a/test/unified-test-format/valid-pass/entity-find-cursor.json +++ b/test/unified-test-format/valid-pass/entity-find-cursor.json @@ -109,7 +109,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" @@ -126,7 +129,10 @@ "commandStartedEvent": { "command": { "getMore": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "collection": "coll0" }, @@ -138,7 +144,10 @@ "reply": { "cursor": { "id": { - "$$type": "long" + "$$type": [ + "int", + "long" + ] }, "ns": { "$$type": "string" diff --git a/test/unified-test-format/valid-pass/expectedError-errorResponse.json b/test/unified-test-format/valid-pass/expectedError-errorResponse.json new file mode 100644 index 0000000000..177b1baf56 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-errorResponse.json @@ -0,0 +1,70 @@ +{ + "description": "expectedError-errorResponse", + "schemaVersion": "1.12", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "tests": [ + { + "description": "Unsupported command", + "operations": [ + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "unsupportedCommand", + "command": { + "unsupportedCommand": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + }, + { + "description": "Unsupported query operator", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "$unsupportedQueryOperator": 1 + } + }, + "expectError": { + "errorResponse": { + "errmsg": { + "$$type": "string" + } + } + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedError-isClientError.json b/test/unified-test-format/valid-pass/expectedError-isClientError.json new file mode 100644 index 0000000000..9c6beda588 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedError-isClientError.json @@ -0,0 +1,74 @@ +{ + "description": "expectedError-isClientError", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "isClientError considers network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json new file mode 100644 index 0000000000..cf7bd60826 --- /dev/null +++ b/test/unified-test-format/valid-pass/expectedEventsForClient-topologyDescriptionChangedEvent.json @@ -0,0 +1,68 @@ +{ + "description": "expectedEventsForClient-topologyDescriptionChangedEvent", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "topologies": [ + "replicaset" + ], + "minServerVersion": "4.4" + } + ], + "tests": [ + { + "description": "can assert on values of newDescription and previousDescription fields", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "directConnection": true + }, + "observeEvents": [ + "topologyDescriptionChangedEvent" + ] + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "sdam", + "ignoreExtraEvents": true, + "events": [ + { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/matches-lte-operator.json b/test/unified-test-format/valid-pass/matches-lte-operator.json deleted file mode 100644 index 4de65c5838..0000000000 --- a/test/unified-test-format/valid-pass/matches-lte-operator.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "description": "matches-lte-operator", - "schemaVersion": "1.9", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "database0Name" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "coll0" - } - } - ], - "initialData": [ - { - "collectionName": "coll0", - "databaseName": "database0Name", - "documents": [] - } - ], - "tests": [ - { - "description": "special lte matching operator", - "operations": [ - { - "name": "insertOne", - "object": "collection0", - "arguments": { - "document": { - "_id": 1, - "y": 1 - } - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "insert": "coll0", - "documents": [ - { - "_id": { - "$$lte": 1 - }, - "y": { - "$$lte": 2 - } - } - ] - }, - "commandName": "insert", - "databaseName": "database0Name" - } - } - ] - } - ] - } - ] -} diff --git a/test/unified-test-format/valid-pass/observeSensitiveCommands.json b/test/unified-test-format/valid-pass/observeSensitiveCommands.json index 411ca19c5d..d3ae5665be 100644 --- a/test/unified-test-format/valid-pass/observeSensitiveCommands.json +++ b/test/unified-test-format/valid-pass/observeSensitiveCommands.json @@ -61,6 +61,11 @@ "tests": [ { "description": "getnonce is observed with observeSensitiveCommands=true", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -106,6 +111,11 @@ }, { "description": "getnonce is not observed with observeSensitiveCommands=false", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", @@ -127,6 +137,11 @@ }, { "description": "getnonce is not observed by default", + "runOnRequirements": [ + { + "maxServerVersion": "6.1.99" + } + ], "operations": [ { "name": "runCommand", diff --git a/test/unified-test-format/valid-pass/operation-empty_array.json b/test/unified-test-format/valid-pass/operation-empty_array.json new file mode 100644 index 0000000000..93b25c983c --- /dev/null +++ b/test/unified-test-format/valid-pass/operation-empty_array.json @@ -0,0 +1,10 @@ +{ + "description": "operation-empty_array", + "schemaVersion": "1.0", + "tests": [ + { + "description": "Empty operations array", + "operations": [] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-lte.json b/test/unified-test-format/valid-pass/operator-lte.json new file mode 100644 index 0000000000..7a6a8057ad --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-lte.json @@ -0,0 +1,88 @@ +{ + "description": "operator-lte", + "schemaVersion": "1.9", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "database0Name" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "database0Name", + "documents": [] + } + ], + "tests": [ + { + "description": "special lte matching operator", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": 2, + "y": 3, + "z": 4 + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll0", + "documents": [ + { + "_id": { + "$$lte": 2 + }, + "x": { + "$$lte": 2.1 + }, + "y": { + "$$lte": { + "$numberLong": "3" + } + }, + "z": { + "$$lte": 4 + } + } + ] + }, + "commandName": "insert", + "databaseName": "database0Name" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-matchAsDocument.json b/test/unified-test-format/valid-pass/operator-matchAsDocument.json new file mode 100644 index 0000000000..fd8b514d4a --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsDocument.json @@ -0,0 +1,124 @@ +{ + "description": "operator-matchAsDocument", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "json": "{ \"x\": 1, \"y\": 2.0 }" + }, + { + "_id": 2, + "json": "{ \"x\": { \"$oid\": \"57e193d7a9cc81b4027498b5\" } }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsDocument performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "json": { + "$$matchAsDocument": { + "x": 1, + "y": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsDocument decodes Extended JSON", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "x": { + "$$type": "objectId" + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-matchAsRoot.json b/test/unified-test-format/valid-pass/operator-matchAsRoot.json new file mode 100644 index 0000000000..1966e3b377 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-matchAsRoot.json @@ -0,0 +1,151 @@ +{ + "description": "operator-matchAsRoot", + "schemaVersion": "1.13", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [ + { + "_id": 1, + "x": { + "y": 2, + "z": 3 + } + }, + { + "_id": 2, + "json": "{ \"x\": 1, \"y\": 2 }" + } + ] + } + ], + "tests": [ + { + "description": "matchAsRoot with nested document", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot performs flexible numeric comparisons", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2 + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot evaluates special operators", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$matchAsRoot": { + "y": 2, + "z": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "matchAsRoot with matchAsDocument", + "operations": [ + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 2 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 2, + "json": { + "$$matchAsDocument": { + "$$matchAsRoot": { + "x": 1 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/operator-type-number_alias.json b/test/unified-test-format/valid-pass/operator-type-number_alias.json new file mode 100644 index 0000000000..e628d0d777 --- /dev/null +++ b/test/unified-test-format/valid-pass/operator-type-number_alias.json @@ -0,0 +1,174 @@ +{ + "description": "operator-type-number_alias", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "type number alias matches int32", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberInt": "2147483647" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches int64", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberLong": "9223372036854775807" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches double", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDouble": "2.71828" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches decimal128", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDecimal": "3.14159" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-crud.json b/test/unified-test-format/valid-pass/poc-crud.json index 0790d9b789..94e4ec5682 100644 --- a/test/unified-test-format/valid-pass/poc-crud.json +++ b/test/unified-test-format/valid-pass/poc-crud.json @@ -322,7 +322,7 @@ "minServerVersion": "4.1.0", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ], "serverless": "forbid" } diff --git a/test/unified-test-format/valid-pass/poc-queryable-encryption.json b/test/unified-test-format/valid-pass/poc-queryable-encryption.json new file mode 100644 index 0000000000..309d1d3b4b --- /dev/null +++ b/test/unified-test-format/valid-pass/poc-queryable-encryption.json @@ -0,0 +1,193 @@ +{ + "description": "poc-queryable-encryption", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "csfle": true, + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "encrypted" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "encrypted" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "poc-queryable-encryption", + "collectionName": "encrypted", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + ], + "tests": [ + { + "description": "insert, replace, and find with queryable encryption", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": 11 + } + } + }, + { + "object": "encryptedColl", + "name": "replaceOne", + "arguments": { + "filter": { + "encryptedInt": 11 + }, + "replacement": { + "encryptedInt": 22 + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "encryptedInt": 22 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": 22 + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhS16TJojgDDBtbluxBokvcotP1mQTGeYpNt8xd3MJQ=", + "subType": "00" + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/test/unified-test-format/valid-pass/poc-retryable-writes.json b/test/unified-test-format/valid-pass/poc-retryable-writes.json index 50160799f3..f19aa3f9d8 100644 --- a/test/unified-test-format/valid-pass/poc-retryable-writes.json +++ b/test/unified-test-format/valid-pass/poc-retryable-writes.json @@ -1,14 +1,6 @@ { "description": "poc-retryable-writes", "schemaVersion": "1.0", - "runOnRequirements": [ - { - "minServerVersion": "3.6", - "topologies": [ - "replicaset" - ] - } - ], "createEntities": [ { "client": { @@ -79,6 +71,14 @@ "tests": [ { "description": "FindOneAndUpdate is committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -132,6 +132,14 @@ }, { "description": "FindOneAndUpdate is not committed on first attempt", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -188,6 +196,14 @@ }, { "description": "FindOneAndUpdate is never committed", + "runOnRequirements": [ + { + "minServerVersion": "3.6", + "topologies": [ + "replicaset" + ] + } + ], "operations": [ { "name": "failPoint", @@ -245,15 +261,10 @@ "description": "InsertMany succeeds after PrimarySteppedDown", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -345,7 +356,7 @@ { "minServerVersion": "4.1.7", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -406,15 +417,10 @@ "description": "InsertOne fails after multiple retryable writeConcernErrors", "runOnRequirements": [ { - "minServerVersion": "4.0", - "topologies": [ - "replicaset" - ] - }, - { - "minServerVersion": "4.1.7", + "minServerVersion": "4.3.1", "topologies": [ - "sharded-replicaset" + "replicaset", + "sharded" ] } ], @@ -433,6 +439,9 @@ "failCommands": [ "insert" ], + "errorLabels": [ + "RetryableWriteError" + ], "writeConcernError": { "code": 91, "errmsg": "Replication is being shut down" diff --git a/test/unified-test-format/valid-pass/poc-sessions.json b/test/unified-test-format/valid-pass/poc-sessions.json index 75f3489428..117c9e7d00 100644 --- a/test/unified-test-format/valid-pass/poc-sessions.json +++ b/test/unified-test-format/valid-pass/poc-sessions.json @@ -264,7 +264,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json index 820ed65927..9ab44a9c54 100644 --- a/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json +++ b/test/unified-test-format/valid-pass/poc-transactions-convenient-api.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json index a0b297d59a..de08edec44 100644 --- a/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json +++ b/test/unified-test-format/valid-pass/poc-transactions-mongos-pin-auto.json @@ -5,7 +5,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified-test-format/valid-pass/poc-transactions.json b/test/unified-test-format/valid-pass/poc-transactions.json index 0355ca2060..2055a3b705 100644 --- a/test/unified-test-format/valid-pass/poc-transactions.json +++ b/test/unified-test-format/valid-pass/poc-transactions.json @@ -11,7 +11,7 @@ { "minServerVersion": "4.1.8", "topologies": [ - "sharded-replicaset" + "sharded" ] } ], @@ -93,7 +93,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], @@ -203,7 +203,7 @@ "minServerVersion": "4.3.4", "topologies": [ "replicaset", - "sharded-replicaset" + "sharded" ] } ], diff --git a/test/unified_format.py b/test/unified_format.py index ee64915202..0c5f68edd3 100644 --- a/test/unified_format.py +++ b/test/unified_format.py @@ -14,151 +14,107 @@ """Unified test format runner. -https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.rst +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md """ +from __future__ import annotations + +import asyncio import binascii -import collections import copy -import datetime import functools import os import re import sys import time -import types -from collections import abc +import traceback +from collections import defaultdict +from inspect import iscoroutinefunction from test import ( - AWS_CREDS, - AZURE_CREDS, - CA_PEM, - CLIENT_PEM, - GCP_CREDS, - KMIP_CREDS, - LOCAL_MASTER_KEY, IntegrationTest, client_context, + client_knobs, unittest, ) -from test.utils import ( - CMAPListener, +from test.helpers_shared import ALL_KMS_PROVIDERS, DEFAULT_KMS_TLS +from test.unified_format_shared import ( + KMS_TLS_OPTS, + PLACEHOLDER_MAP, + EventListenerUtil, + MatchEvaluatorUtil, + coerce_result, + parse_bulk_write_error_result, + parse_bulk_write_result, + parse_client_bulk_write_error_result, + parse_collection_or_database_options, + with_metaclass, +) +from test.utils import flaky, get_pool +from test.utils_shared import ( camel_to_snake, camel_to_snake_args, - get_pool, - parse_collection_options, parse_spec_options, prepare_spec_arguments, - rs_or_single_client, - single_client, snake_to_camel, + wait_until, ) +from test.utils_spec_runner import SpecRunnerThread from test.version import Version -from typing import Any, List +from typing import Any, Dict, List, Mapping, Optional + +import pytest import pymongo -from bson import SON, Code, DBRef, Decimal128, Int64, MaxKey, MinKey, json_util -from bson.binary import Binary +from bson import SON, json_util from bson.codec_options import DEFAULT_CODEC_OPTIONS from bson.objectid import ObjectId -from bson.regex import RE_TYPE, Regex -from gridfs import GridFSBucket, GridOut -from pymongo import ASCENDING, MongoClient, _csot -from pymongo.change_stream import ChangeStream -from pymongo.client_session import ClientSession, TransactionOptions, _TxnState -from pymongo.collection import Collection -from pymongo.database import Database -from pymongo.encryption import ClientEncryption -from pymongo.encryption_options import _HAVE_PYMONGOCRYPT +from gridfs import GridFSBucket, GridOut, NoFile +from gridfs.errors import CorruptGridFile +from pymongo import ASCENDING, CursorType, MongoClient, _csot +from pymongo.driver_info import DriverInfo +from pymongo.encryption_options import _HAVE_PYMONGOCRYPT, AutoEncryptionOpts from pymongo.errors import ( + AutoReconnect, BulkWriteError, + ClientBulkWriteException, ConfigurationError, ConnectionFailure, EncryptionError, InvalidOperation, NotPrimaryError, + OperationFailure, PyMongoError, ) from pymongo.monitoring import ( - _SENSITIVE_COMMANDS, - CommandFailedEvent, - CommandListener, CommandStartedEvent, - CommandSucceededEvent, - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutStartedEvent, - ConnectionClosedEvent, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, +) +from pymongo.operations import ( + SearchIndexModel, ) from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference -from pymongo.results import BulkWriteResult from pymongo.server_api import ServerApi +from pymongo.server_selectors import Selection, writable_server_selector +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.change_stream import ChangeStream +from pymongo.synchronous.client_session import ClientSession, TransactionOptions, _TxnState +from pymongo.synchronous.collection import Collection +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.database import Database +from pymongo.synchronous.encryption import ClientEncryption +from pymongo.topology_description import TopologyDescription +from pymongo.typings import _Address from pymongo.write_concern import WriteConcern -JSON_OPTS = json_util.JSONOptions(tz_aware=False) +_IS_SYNC = True IS_INTERRUPTED = False -KMS_TLS_OPTS = { - "kmip": { - "tlsCAFile": CA_PEM, - "tlsCertificateKeyFile": CLIENT_PEM, - } -} - - -# Build up a placeholder map. -PLACEHOLDER_MAP = dict() -for (provider_name, provider_data) in [ - ("local", {"key": LOCAL_MASTER_KEY}), - ("aws", AWS_CREDS), - ("azure", AZURE_CREDS), - ("gcp", GCP_CREDS), - ("kmip", KMIP_CREDS), -]: - for (key, value) in provider_data.items(): - placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" - PLACEHOLDER_MAP[placeholder] = value - def interrupt_loop(): global IS_INTERRUPTED IS_INTERRUPTED = True -def with_metaclass(meta, *bases): - """Create a base class with a metaclass. - - Vendored from six: https://github.com/benjaminp/six/blob/master/six.py - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): - # This version introduced PEP 560 that requires a bit - # of extra care (we mimic what is done by __build_class__). - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d["__orig_bases__"] = bases - else: - resolved_bases = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - - return type.__new__(metaclass, "temporary_class", (), {}) - - def is_run_on_requirement_satisfied(requirement): topology_satisfied = True req_topologies = requirement.get("topologies") @@ -177,14 +133,6 @@ def is_run_on_requirement_satisfied(requirement): if req_max_server_version: max_version_satisfied = Version.from_string(req_max_server_version) >= server_version - serverless = requirement.get("serverless") - if serverless == "require": - serverless_satisfied = client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - params_satisfied = True params = requirement.get("serverParameters") if params: @@ -199,55 +147,54 @@ def is_run_on_requirement_satisfied(requirement): if req_auth is not None: if req_auth: auth_satisfied = client_context.auth_enabled + if auth_satisfied and "authMechanism" in requirement: + auth_satisfied = client_context.check_auth_type(requirement["authMechanism"]) else: auth_satisfied = not client_context.auth_enabled csfle_satisfied = True req_csfle = requirement.get("csfle") if req_csfle is True: - min_version_satisfied = Version.from_string("4.2") <= server_version + # Don't overwrite unsatisfied minimum version requirements. + if min_version_satisfied: + min_version_satisfied = Version.from_string("4.2") <= server_version csfle_satisfied = _HAVE_PYMONGOCRYPT and min_version_satisfied + elif isinstance(req_csfle, dict) and "minLibmongocryptVersion" in req_csfle: + csfle_satisfied = False + req_version = req_csfle["minLibmongocryptVersion"] + if _HAVE_PYMONGOCRYPT: + from pymongocrypt import libmongocrypt_version + + if Version.from_string(libmongocrypt_version()) >= Version.from_string(req_version): + csfle_satisfied = True return ( topology_satisfied and min_version_satisfied and max_version_satisfied - and serverless_satisfied and params_satisfied and auth_satisfied and csfle_satisfied ) -def parse_collection_or_database_options(options): - return parse_collection_options(options) - - -def parse_bulk_write_result(result): - upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} - return { - "deletedCount": result.deleted_count, - "insertedCount": result.inserted_count, - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedCount": result.upserted_count, - "upsertedIds": upserted_ids, - } - - -def parse_bulk_write_error_result(error): - write_result = BulkWriteResult(error.details, True) - return parse_bulk_write_result(write_result) - - -class NonLazyCursor(object): +class NonLazyCursor: """A find cursor proxy that creates the remote cursor when initialized.""" def __init__(self, find_cursor, client): self.client = client self.find_cursor = find_cursor # Create the server side cursor. - self.first_result = next(find_cursor, None) + self.first_result = None + + @classmethod + def create(cls, find_cursor, client): + cursor = cls(find_cursor, client) + try: + cursor.first_result = next(cursor.find_cursor) + except StopIteration: + cursor.first_result = None + return cursor @property def alive(self): @@ -268,80 +215,16 @@ def close(self): self.client = None -class EventListenerUtil(CMAPListener, CommandListener): - def __init__( - self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map - ): - self._event_types = set(name.lower() for name in observe_events) - if observe_sensitive_commands: - self._observe_sensitive_commands = True - self._ignore_commands = set(ignore_commands) - else: - self._observe_sensitive_commands = False - self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) - self._ignore_commands.add("configurefailpoint") - self._event_mapping = collections.defaultdict(list) - self.entity_map = entity_map - if store_events: - for i in store_events: - id = i["id"] - events = (i.lower() for i in i["events"]) - for i in events: - self._event_mapping[i].append(id) - self.entity_map[id] = [] - super(EventListenerUtil, self).__init__() - - def get_events(self, event_type): - if event_type == "command": - return [e for e in self.events if "Command" in type(e).__name__] - return [e for e in self.events if "Command" not in type(e).__name__] - - def add_event(self, event): - event_name = type(event).__name__.lower() - if event_name in self._event_types: - super(EventListenerUtil, self).add_event(event) - for id in self._event_mapping[event_name]: - self.entity_map[id].append( - { - "name": type(event).__name__, - "observedAt": time.time(), - "description": repr(event), - } - ) - - def _command_event(self, event): - if not event.command_name.lower() in self._ignore_commands: - self.add_event(event) - - def started(self, event): - if event.command == {}: - # Command is redacted. Observe only if flag is set. - if self._observe_sensitive_commands: - self._command_event(event) - else: - self._command_event(event) - - def succeeded(self, event): - if event.reply == {}: - # Command is redacted. Observe only if flag is set. - if self._observe_sensitive_commands: - self._command_event(event) - else: - self._command_event(event) - - def failed(self, event): - self._command_event(event) - - -class EntityMapUtil(object): +class EntityMapUtil: """Utility class that implements an entity map as per the unified - test format specification.""" + test format specification. + """ def __init__(self, test_class): - self._entities = {} - self._listeners = {} - self._session_lsids = {} - self.test = test_class + self._entities: Dict[str, Any] = {} + self._listeners: Dict[str, EventListenerUtil] = {} + self._session_lsids: Dict[str, Mapping[str, Any]] = {} + self.test: UnifiedSpecTestMixinV1 = test_class def __contains__(self, item): return item in self._entities @@ -353,14 +236,14 @@ def __getitem__(self, item): try: return self._entities[item] except KeyError: - self.test.fail("Could not find entity named %s in map" % (item,)) + self.test.fail(f"Could not find entity named {item} in map") def __setitem__(self, key, value): if not isinstance(key, str): self.test.fail("Expected entity name of type str, got %s" % (type(key))) if key in self._entities: - self.test.fail("Entity named %s already in map" % (key,)) + self.test.fail(f"Entity named {key} already in map") self._entities[key] = value @@ -370,6 +253,10 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: raise ValueError(f"Could not find a placeholder value for {path}") return PLACEHOLDER_MAP[path] + # Distinguish between temp and non-temp aws credentials. + if path.endswith("/kmsProviders/aws") and "sessionToken" in current: + path = path.replace("aws", "aws_temp") + for key in list(current): value = current[key] if isinstance(value, dict): @@ -379,15 +266,33 @@ def _handle_placeholders(self, spec: dict, current: dict, path: str) -> Any: def _create_entity(self, entity_spec, uri=None): if len(entity_spec) != 1: - self.test.fail( - "Entity spec %s did not contain exactly one top-level key" % (entity_spec,) - ) + self.test.fail(f"Entity spec {entity_spec} did not contain exactly one top-level key") entity_type, spec = next(iter(entity_spec.items())) spec = self._handle_placeholders(spec, spec, "") if entity_type == "client": kwargs: dict = {} observe_events = spec.get("observeEvents", []) + + if "autoEncryptOpts" in spec: + auto_encrypt_opts = spec["autoEncryptOpts"].copy() + auto_encrypt_kwargs: dict = dict(kms_tls_options=DEFAULT_KMS_TLS) + kms_providers = auto_encrypt_opts.pop("kmsProviders", ALL_KMS_PROVIDERS.copy()) + key_vault_namespace = auto_encrypt_opts.pop("keyVaultNamespace") + extra_opts = auto_encrypt_opts.pop("extraOptions", {}) + for key, value in extra_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + for key, value in auto_encrypt_opts.items(): + auto_encrypt_kwargs[camel_to_snake(key)] = value + auto_encryption_opts = AutoEncryptionOpts( + kms_providers, key_vault_namespace, **auto_encrypt_kwargs + ) + kwargs["auto_encryption_opts"] = auto_encryption_opts + + # The unified tests use topologyOpeningEvent, we use topologyOpenedEvent + for i in range(len(observe_events)): + if "topologyOpeningEvent" == observe_events[i]: + observe_events[i] = "topologyOpenedEvent" ignore_commands = spec.get("ignoreCommandMonitoringEvents", []) observe_sensitive_commands = spec.get("observeSensitiveCommands", False) ignore_commands = [cmd.lower() for cmd in ignore_commands] @@ -401,12 +306,16 @@ def _create_entity(self, entity_spec, uri=None): self._listeners[spec["id"]] = listener kwargs["event_listeners"] = [listener] if spec.get("useMultipleMongoses"): - if client_context.load_balancer or client_context.serverless: + if client_context.load_balancer: kwargs["h"] = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: kwargs["h"] = client_context.mongos_seeds() kwargs.update(spec.get("uriOptions", {})) server_api = spec.get("serverApi") + if "waitQueueSize" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueSize") + if "waitQueueMultiple" in kwargs: + raise unittest.SkipTest("PyMongo does not support waitQueueMultiple") if server_api: kwargs["server_api"] = ServerApi( server_api["version"], @@ -415,16 +324,17 @@ def _create_entity(self, entity_spec, uri=None): ) if uri: kwargs["h"] = uri - client = rs_or_single_client(**kwargs) + client = self.test.rs_or_single_client(**kwargs) + client._connect() self[spec["id"]] = client - self.test.addCleanup(client.close) return elif entity_type == "database": client = self[spec["client"]] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) options = parse_collection_or_database_options(spec.get("databaseOptions", {})) self[spec["id"]] = client.get_database(spec["databaseName"], **options) @@ -433,18 +343,20 @@ def _create_entity(self, entity_spec, uri=None): database = self[spec["database"]] if not isinstance(database, Database): self.test.fail( - "Expected entity %s to be of type Database, got %s" - % (spec["database"], type(database)) + "Expected entity {} to be of type Database, got {}".format( + spec["database"], type(database) + ) ) options = parse_collection_or_database_options(spec.get("collectionOptions", {})) self[spec["id"]] = database.get_collection(spec["collectionName"], **options) return elif entity_type == "session": client = self[spec["client"]] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" - % (spec["client"], type(client)) + "Expected entity {} to be of type MongoClient, got {}".format( + spec["client"], type(client) + ) ) opts = camel_to_snake_args(spec.get("sessionOptions", {})) if "default_transaction_options" in opts: @@ -476,31 +388,45 @@ def drop(self: GridFSBucket, *args: Any, **kwargs: Any) -> None: opts = camel_to_snake_args(spec["clientEncryptionOpts"].copy()) if isinstance(opts["key_vault_client"], str): opts["key_vault_client"] = self[opts["key_vault_client"]] + # Set TLS options for providers like "kmip:name1". + kms_tls_options = {} + for provider in opts["kms_providers"]: + provider_type = provider.split(":")[0] + if provider_type in KMS_TLS_OPTS: + kms_tls_options[provider] = KMS_TLS_OPTS[provider_type] self[spec["id"]] = ClientEncryption( opts["kms_providers"], opts["key_vault_namespace"], opts["key_vault_client"], DEFAULT_CODEC_OPTIONS, - opts.get("kms_tls_options", KMS_TLS_OPTS), + opts.get("kms_tls_options", kms_tls_options), + opts.get("key_expiration_ms"), ) return + elif entity_type == "thread": + name = spec["id"] + thread = SpecRunnerThread(name) + thread.start() + self.test.addCleanup(thread.join, 5) + self[name] = thread + return - self.test.fail("Unable to create entity of unknown type %s" % (entity_type,)) + self.test.fail(f"Unable to create entity of unknown type {entity_type}") def create_entities_from_spec(self, entity_spec, uri=None): for spec in entity_spec: self._create_entity(spec, uri=uri) - def get_listener_for_client(self, client_name): + def get_listener_for_client(self, client_name: str) -> EventListenerUtil: client = self[client_name] - if not isinstance(client, MongoClient): + if type(client).__name__ != "MongoClient": self.test.fail( - "Expected entity %s to be of type MongoClient, got %s" % (client_name, type(client)) + f"Expected entity {client_name} to be of type MongoClient, got {type(client)}" ) listener = self._listeners.get(client_name) if not listener: - self.test.fail("No listeners configured for client %s" % (client_name,)) + self.test.fail(f"No listeners configured for client {client_name}") return listener @@ -508,8 +434,7 @@ def get_lsid_for_session(self, session_name): session = self[session_name] if not isinstance(session, ClientSession): self.test.fail( - "Expected entity %s to be of type ClientSession, got %s" - % (session_name, type(session)) + f"Expected entity {session_name} to be of type ClientSession, got {type(session)}" ) try: @@ -518,297 +443,28 @@ def get_lsid_for_session(self, session_name): # session has been closed. return self._session_lsids[session_name] - -binary_types = (Binary, bytes) -long_types = (Int64,) -unicode_type = str - - -BSON_TYPE_ALIAS_MAP = { - # https://mongodb.com/docs/manual/reference/operator/query/type/ - # https://pymongo.readthedocs.io/en/stable/api/bson/index.html - "double": (float,), - "string": (str,), - "object": (abc.Mapping,), - "array": (abc.MutableSequence,), - "binData": binary_types, - "undefined": (type(None),), - "objectId": (ObjectId,), - "bool": (bool,), - "date": (datetime.datetime,), - "null": (type(None),), - "regex": (Regex, RE_TYPE), - "dbPointer": (DBRef,), - "javascript": (unicode_type, Code), - "symbol": (unicode_type,), - "javascriptWithScope": (unicode_type, Code), - "int": (int,), - "long": (Int64,), - "decimal": (Decimal128,), - "maxKey": (MaxKey,), - "minKey": (MinKey,), -} - - -class MatchEvaluatorUtil(object): - """Utility class that implements methods for evaluating matches as per - the unified test format specification.""" - - def __init__(self, test_class): - self.test = test_class - - def _operation_exists(self, spec, actual, key_to_compare): - if spec is True: - if key_to_compare is None: - assert actual is not None - else: - self.test.assertIn(key_to_compare, actual) - elif spec is False: - if key_to_compare is None: - assert actual is None - else: - self.test.assertNotIn(key_to_compare, actual) - else: - self.test.fail("Expected boolean value for $$exists operator, got %s" % (spec,)) - - def __type_alias_to_type(self, alias): - if alias not in BSON_TYPE_ALIAS_MAP: - self.test.fail("Unrecognized BSON type alias %s" % (alias,)) - return BSON_TYPE_ALIAS_MAP[alias] - - def _operation_type(self, spec, actual, key_to_compare): - if isinstance(spec, abc.MutableSequence): - permissible_types = tuple( - [t for alias in spec for t in self.__type_alias_to_type(alias)] - ) - else: - permissible_types = self.__type_alias_to_type(spec) - value = actual[key_to_compare] if key_to_compare else actual - self.test.assertIsInstance(value, permissible_types) - - def _operation_matchesEntity(self, spec, actual, key_to_compare): - expected_entity = self.test.entity_map[spec] - self.test.assertEqual(expected_entity, actual[key_to_compare]) - - def _operation_matchesHexBytes(self, spec, actual, key_to_compare): - expected = binascii.unhexlify(spec) - value = actual[key_to_compare] if key_to_compare else actual - self.test.assertEqual(value, expected) - - def _operation_unsetOrMatches(self, spec, actual, key_to_compare): - if key_to_compare is None and not actual: - # top-level document can be None when unset - return - - if key_to_compare not in actual: - # we add a dummy value for the compared key to pass map size check - actual[key_to_compare] = "dummyValue" - return - self.match_result(spec, actual[key_to_compare], in_recursive_call=True) - - def _operation_sessionLsid(self, spec, actual, key_to_compare): - expected_lsid = self.test.entity_map.get_lsid_for_session(spec) - self.test.assertEqual(expected_lsid, actual[key_to_compare]) - - def _operation_lte(self, spec, actual, key_to_compare): - if key_to_compare not in actual: - self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") - self.test.assertLessEqual(actual[key_to_compare], spec) - - def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): - method_name = "_operation_%s" % (opname.strip("$"),) - try: - method = getattr(self, method_name) - except AttributeError: - self.test.fail("Unsupported special matching operator %s" % (opname,)) - else: - method(spec, actual, key_to_compare) - - def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): - """Returns True if a special operation is evaluated, False - otherwise. If the ``expectation`` map contains a single key, - value pair we check it for a special operation. - If given, ``key_to_compare`` is assumed to be the key in - ``expectation`` whose corresponding value needs to be - evaluated for a possible special operation. ``key_to_compare`` - is ignored when ``expectation`` has only one key.""" - if not isinstance(expectation, abc.Mapping): - return False - - is_special_op, opname, spec = False, False, False - - if key_to_compare is not None: - if key_to_compare.startswith("$$"): - is_special_op = True - opname = key_to_compare - spec = expectation[key_to_compare] - key_to_compare = None - else: - nested = expectation[key_to_compare] - if isinstance(nested, abc.Mapping) and len(nested) == 1: - opname, spec = next(iter(nested.items())) - if opname.startswith("$$"): - is_special_op = True - elif len(expectation) == 1: - opname, spec = next(iter(expectation.items())) - if opname.startswith("$$"): - is_special_op = True - key_to_compare = None - - if is_special_op: - self._evaluate_special_operation( - opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare - ) - return True - - return False - - def _match_document(self, expectation, actual, is_root): - if self._evaluate_if_special_operation(expectation, actual): - return - - self.test.assertIsInstance(actual, abc.Mapping) - for key, value in expectation.items(): - if self._evaluate_if_special_operation(expectation, actual, key): - continue - - self.test.assertIn(key, actual) - self.match_result(value, actual[key], in_recursive_call=True) - - if not is_root: - expected_keys = set(expectation.keys()) - for key, value in expectation.items(): - if value == {"$$exists": False}: - expected_keys.remove(key) - self.test.assertEqual(expected_keys, set(actual.keys())) - - def match_result(self, expectation, actual, in_recursive_call=False): - if isinstance(expectation, abc.Mapping): - return self._match_document(expectation, actual, is_root=not in_recursive_call) - - if isinstance(expectation, abc.MutableSequence): - self.test.assertIsInstance(actual, abc.MutableSequence) - for e, a in zip(expectation, actual): - if isinstance(e, abc.Mapping): - self._match_document(e, a, is_root=not in_recursive_call) - else: - self.match_result(e, a, in_recursive_call=True) - return - - # account for flexible numerics in element-wise comparison - if isinstance(expectation, int) or isinstance(expectation, float): - self.test.assertEqual(expectation, actual) - else: - self.test.assertIsInstance(actual, type(expectation)) - self.test.assertEqual(expectation, actual) - - def assertHasServiceId(self, spec, actual): - if "hasServiceId" in spec: - if spec.get("hasServiceId"): - self.test.assertIsNotNone(actual.service_id) - self.test.assertIsInstance(actual.service_id, ObjectId) - else: - self.test.assertIsNone(actual.service_id) - - def match_event(self, event_type, expectation, actual): - name, spec = next(iter(expectation.items())) - - # every command event has the commandName field - if event_type == "command": - command_name = spec.get("commandName") - if command_name: - self.test.assertEqual(command_name, actual.command_name) - - if name == "commandStartedEvent": - self.test.assertIsInstance(actual, CommandStartedEvent) - command = spec.get("command") - database_name = spec.get("databaseName") - if command: - if actual.command_name == "update": - # TODO: remove this once PYTHON-1744 is done. - # Add upsert and multi fields back into expectations. - for update in command.get("updates", []): - update.setdefault("upsert", False) - update.setdefault("multi", False) - self.match_result(command, actual.command) - if database_name: - self.test.assertEqual(database_name, actual.database_name) - self.assertHasServiceId(spec, actual) - elif name == "commandSucceededEvent": - self.test.assertIsInstance(actual, CommandSucceededEvent) - reply = spec.get("reply") - if reply: - self.match_result(reply, actual.reply) - self.assertHasServiceId(spec, actual) - elif name == "commandFailedEvent": - self.test.assertIsInstance(actual, CommandFailedEvent) - self.assertHasServiceId(spec, actual) - elif name == "poolCreatedEvent": - self.test.assertIsInstance(actual, PoolCreatedEvent) - elif name == "poolReadyEvent": - self.test.assertIsInstance(actual, PoolReadyEvent) - elif name == "poolClearedEvent": - self.test.assertIsInstance(actual, PoolClearedEvent) - self.assertHasServiceId(spec, actual) - elif name == "poolClosedEvent": - self.test.assertIsInstance(actual, PoolClosedEvent) - elif name == "connectionCreatedEvent": - self.test.assertIsInstance(actual, ConnectionCreatedEvent) - elif name == "connectionReadyEvent": - self.test.assertIsInstance(actual, ConnectionReadyEvent) - elif name == "connectionClosedEvent": - self.test.assertIsInstance(actual, ConnectionClosedEvent) - if "reason" in spec: - self.test.assertEqual(actual.reason, spec["reason"]) - elif name == "connectionCheckOutStartedEvent": - self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) - elif name == "connectionCheckOutFailedEvent": - self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) - if "reason" in spec: - self.test.assertEqual(actual.reason, spec["reason"]) - elif name == "connectionCheckedOutEvent": - self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) - elif name == "connectionCheckedInEvent": - self.test.assertIsInstance(actual, ConnectionCheckedInEvent) - else: - self.test.fail("Unsupported event type %s" % (name,)) - - -def coerce_result(opname, result): - """Convert a pymongo result into the spec's result format.""" - if hasattr(result, "acknowledged") and not result.acknowledged: - return {"acknowledged": False} - if opname == "bulkWrite": - return parse_bulk_write_result(result) - if opname == "insertOne": - return {"insertedId": result.inserted_id} - if opname == "insertMany": - return {idx: _id for idx, _id in enumerate(result.inserted_ids)} - if opname in ("deleteOne", "deleteMany"): - return {"deletedCount": result.deleted_count} - if opname in ("updateOne", "updateMany", "replaceOne"): - return { - "matchedCount": result.matched_count, - "modifiedCount": result.modified_count, - "upsertedCount": 0 if result.upserted_id is None else 1, - } - return result + def advance_cluster_times(self, cluster_time) -> None: + """Manually synchronize entities when desired""" + for entity in self._entities.values(): + if isinstance(entity, ClientSession) and cluster_time: + entity.advance_cluster_time(cluster_time) class UnifiedSpecTestMixinV1(IntegrationTest): """Mixin class to run test cases from test specification files. Assumes that tests conform to the `unified test format - `_. + `_. Specification of the test suite being currently run is available as a class attribute ``TEST_SPEC``. """ - SCHEMA_VERSION = Version.from_string("1.9") + SCHEMA_VERSION = Version.from_string("1.25") RUN_ON_LOAD_BALANCER = True - RUN_ON_SERVERLESS = True TEST_SPEC: Any + TEST_PATH = "" # This gets filled in by generate_test_classes + mongos_clients: list[MongoClient] = [] @staticmethod def should_run_on(run_on_spec): @@ -836,6 +492,13 @@ def insert_initial_data(self, initial_data): wc = WriteConcern(w="majority") else: wc = WriteConcern(w=1) + + # Remove any encryption collections associated with the collection. + collections = db.list_collection_names() + for collection in collections: + if collection in [f"enxcol_.{coll_name}.esc", f"enxcol_.{coll_name}.ecoc"]: + db.drop_collection(collection) + if documents: if opts: db.create_collection(coll_name, **opts) @@ -845,29 +508,43 @@ def insert_initial_data(self, initial_data): db.create_collection(coll_name, write_concern=wc, **opts) @classmethod - def setUpClass(cls): + def setUpClass(cls) -> None: + # Speed up the tests by decreasing the heartbeat frequency. + cls.knobs = client_knobs( + heartbeat_frequency=0.1, + min_heartbeat_interval=0.1, + kill_cursor_frequency=0.1, + events_queue_frequency=0.1, + ) + cls.knobs.enable() + + @classmethod + def tearDownClass(cls) -> None: + cls.knobs.disable() + + def setUp(self): # super call creates internal client cls.client - super(UnifiedSpecTestMixinV1, cls).setUpClass() + super().setUp() # process file-level runOnRequirements - run_on_spec = cls.TEST_SPEC.get("runOnRequirements", []) - if not cls.should_run_on(run_on_spec): - raise unittest.SkipTest("%s runOnRequirements not satisfied" % (cls.__name__,)) + run_on_spec = self.TEST_SPEC.get("runOnRequirements", []) + if not self.should_run_on(run_on_spec): + raise unittest.SkipTest(f"{self.__class__.__name__} runOnRequirements not satisfied") # add any special-casing for skipping tests here - if client_context.storage_engine == "mmapv1": - if "retryable-writes" in cls.TEST_SPEC["description"]: - raise unittest.SkipTest("MMAPv1 does not support retryWrites=True") - def setUp(self): - super(UnifiedSpecTestMixinV1, self).setUp() + # Handle mongos_clients for transactions tests. + self.mongos_clients = [] + if client_context.supports_transactions() and not client_context.load_balancer: + for address in client_context.mongoses: + self.mongos_clients.append(self.single_client("{}:{}".format(*address))) + # process schemaVersion # note: we check major schema version during class generation - # note: we do this here because we cannot run assertions in setUpClass version = Version.from_string(self.TEST_SPEC["schemaVersion"]) self.assertLessEqual( version, self.SCHEMA_VERSION, - "expected schema version %s or lower, got %s" % (self.SCHEMA_VERSION, version), + f"expected schema version {self.SCHEMA_VERSION} or lower, got {version}", ) # initialize internals @@ -875,29 +552,63 @@ def setUp(self): def maybe_skip_test(self, spec): # add any special-casing for skipping tests here - if client_context.storage_engine == "mmapv1": - if ( - "Dirty explicit session is discarded" in spec["description"] - or "Dirty implicit session is discarded" in spec["description"] - ): - self.skipTest("MMAPv1 does not support retryWrites=True") - if "Client side error in command starting transaction" in spec["description"]: + class_name = self.__class__.__name__.lower() + description = spec["description"].lower() + + if "client side error in command starting transaction" in description: self.skipTest("Implement PYTHON-1894") - if "timeoutMS applied to entire download" in spec["description"]: + if "type=symbol" in description: + self.skipTest("PyMongo does not support the symbol type") + if "timeoutms applied to entire download" in description: self.skipTest("PyMongo's open_download_stream does not cap the stream's lifetime") + if any( + x in description + for x in [ + "first insertone is never committed", + "second updateone is never committed", + "third updateone is never committed", + ] + ): + self.skipTest("Implement PYTHON-4597") - class_name = self.__class__.__name__.lower() - description = spec["description"].lower() if "csot" in class_name: - if client_context.storage_engine == "mmapv1": - self.skipTest( - "MMAPv1 does not support retryable writes which is required for CSOT tests" - ) + # Skip tests that are too slow to run on a given platform. + slow_macos = [ + "operation fails after two consecutive socket timeouts.*", + "operation succeeds after one socket timeout.*", + "Non-tailable cursor lifetime remaining timeoutMS applied to getMore if timeoutMode is unset", + ] + slow_win32 = [ + *slow_macos, + "maxTimeMS value in the command is less than timeoutMS", + "timeoutMS applies to whole operation.*", + ] + slow_pypy = [ + "timeoutMS applies to whole operation.*", + ] + if "CI" in os.environ and sys.platform == "win32" and "gridfs" in class_name: + self.skipTest("PYTHON-3522 CSOT GridFS test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "win32": + for pat in slow_win32: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on Windows") + if "CI" in os.environ and sys.platform == "darwin": + for pat in slow_macos: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on MacOS") + if "CI" in os.environ and sys.implementation.name.lower() == "pypy": + for pat in slow_pypy: + if re.match(pat.lower(), description): + self.skipTest("PYTHON-3522 CSOT test runs too slow on PyPy") if "change" in description or "change" in class_name: self.skipTest("CSOT not implemented for watch()") if "cursors" in class_name: self.skipTest("CSOT not implemented for cursors") - if "tailable" in class_name: + if ( + "tailable" in class_name + or "tailable" in description + and "non-tailable" not in description + ): self.skipTest("CSOT not implemented for tailable cursors") if "sessions" in class_name: self.skipTest("CSOT not implemented for sessions") @@ -905,8 +616,6 @@ def maybe_skip_test(self, spec): self.skipTest("CSOT not implemented for with_transaction") if "transaction" in class_name or "transaction" in description: self.skipTest("CSOT not implemented for transactions") - if "socket timeout" in description: - self.skipTest("CSOT not implemented for socket timeouts") # Some tests need to be skipped based on the operations they try to run. for op in spec["operations"]: @@ -915,11 +624,6 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support count()") if name == "listIndexNames": self.skipTest("PyMongo does not support list_index_names()") - if client_context.storage_engine == "mmapv1": - if name == "createChangeStream": - self.skipTest("MMAPv1 does not support change streams") - if name == "withTransaction" or name == "startTransaction": - self.skipTest("MMAPv1 does not support document-level locking") if not client_context.test_commands_enabled: if name == "failPoint" or name == "targetedFailPoint": self.skipTest("Test commands must be enabled to use fail points") @@ -929,6 +633,8 @@ def maybe_skip_test(self, spec): self.skipTest("PyMongo does not support timeoutMode") def process_error(self, exception, spec): + if isinstance(exception, unittest.SkipTest): + raise is_error = spec.get("isError") is_client_error = spec.get("isClientError") is_timeout_error = spec.get("isTimeoutError") @@ -938,66 +644,93 @@ def process_error(self, exception, spec): error_labels_contain = spec.get("errorLabelsContain") error_labels_omit = spec.get("errorLabelsOmit") expect_result = spec.get("expectResult") + error_response = spec.get("errorResponse") + if error_response: + if isinstance(exception, ClientBulkWriteException): + self.match_evaluator.match_result(error_response, exception.error.details) + else: + self.match_evaluator.match_result(error_response, exception.details) if is_error: # already satisfied because exception was raised pass if is_client_error: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception # Connection errors are considered client errors. - if isinstance(exception, ConnectionFailure): - self.assertNotIsInstance(exception, NotPrimaryError) - elif isinstance(exception, (InvalidOperation, ConfigurationError, EncryptionError)): + if isinstance(error, ConnectionFailure): + self.assertNotIsInstance(error, NotPrimaryError) + elif isinstance(error, CorruptGridFile): + pass + elif isinstance(error, (InvalidOperation, ConfigurationError, EncryptionError, NoFile)): pass else: - self.assertNotIsInstance(exception, PyMongoError) + self.assertNotIsInstance(error, PyMongoError) if is_timeout_error: self.assertIsInstance(exception, PyMongoError) - self.assertTrue(exception.timeout, msg=exception) + if not exception.timeout: + # Re-raise the exception for better diagnostics. + raise exception if error_contains: if isinstance(exception, BulkWriteError): errmsg = str(exception.details).lower() + elif isinstance(exception, ClientBulkWriteException): + errmsg = str(exception.details).lower() else: errmsg = str(exception).lower() self.assertIn(error_contains.lower(), errmsg) if error_code: - self.assertEqual(error_code, exception.details.get("code")) + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("code")) + else: + self.assertEqual(error_code, exception.details.get("code")) if error_code_name: - self.assertEqual(error_code_name, exception.details.get("codeName")) + if isinstance(exception, ClientBulkWriteException): + self.assertEqual(error_code, exception.error.details.get("codeName")) + else: + self.assertEqual(error_code_name, exception.details.get("codeName")) if error_labels_contain: + if isinstance(exception, ClientBulkWriteException): + error = exception.error + else: + error = exception labels = [ - err_label - for err_label in error_labels_contain - if exception.has_error_label(err_label) + err_label for err_label in error_labels_contain if error.has_error_label(err_label) ] self.assertEqual(labels, error_labels_contain) if error_labels_omit: for err_label in error_labels_omit: if exception.has_error_label(err_label): - self.fail("Exception '%s' unexpectedly had label '%s'" % (exception, err_label)) + self.fail(f"Exception '{exception}' unexpectedly had label '{err_label}'") if expect_result: if isinstance(exception, BulkWriteError): result = parse_bulk_write_error_result(exception) self.match_evaluator.match_result(expect_result, result) + elif isinstance(exception, ClientBulkWriteException): + result = parse_client_bulk_write_error_result(exception) + self.match_evaluator.match_result(expect_result, result) else: self.fail( - "expectResult can only be specified with %s exceptions" % (BulkWriteError,) + f"expectResult can only be specified with {BulkWriteError} or {ClientBulkWriteException} exceptions, got {exception}" ) + return exception + def __raise_if_unsupported(self, opname, target, *target_types): if not isinstance(target, target_types): - self.fail("Operation %s not supported for entity of type %s" % (opname, type(target))) + self.fail(f"Operation {opname} not supported for entity of type {type(target)}") def __entityOperation_createChangeStream(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support change streams") self.__raise_if_unsupported("createChangeStream", target, MongoClient, Database, Collection) stream = target.watch(*args, **kwargs) self.addCleanup(stream.close) @@ -1020,11 +753,89 @@ def _databaseOperation_runCommand(self, target, **kwargs): kwargs["command"] = ordered_command return target.command(**kwargs) + def _databaseOperation_runCursorCommand(self, target, **kwargs): + return (self._databaseOperation_createCommandCursor(target, **kwargs)).to_list() + + def _databaseOperation_createCommandCursor(self, target, **kwargs): + self.__raise_if_unsupported("createCommandCursor", target, Database) + # Ensure the first key is the command name. + ordered_command = SON([(kwargs.pop("command_name"), 1)]) + ordered_command.update(kwargs["command"]) + kwargs["command"] = ordered_command + batch_size = 0 + + cursor_type = kwargs.pop("cursor_type", "nonTailable") + if cursor_type == CursorType.TAILABLE: + ordered_command["tailable"] = True + elif cursor_type == CursorType.TAILABLE_AWAIT: + ordered_command["tailable"] = True + ordered_command["awaitData"] = True + elif cursor_type != "nonTailable": + self.fail(f"unknown cursorType: {cursor_type}") + + if "maxTimeMS" in kwargs: + kwargs["max_await_time_ms"] = kwargs.pop("maxTimeMS") + + if "batch_size" in kwargs: + batch_size = kwargs.pop("batch_size") + + cursor = target.cursor_command(**kwargs) + + if batch_size > 0: + cursor.batch_size(batch_size) + + return cursor + + def _collectionOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _collectionOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _collectionOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + def _databaseOperation_assertIndexExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + index_names = [idx["name"] for idx in collection.list_indexes()] + self.assertIn(kwargs["index_name"], index_names) + + def _databaseOperation_assertIndexNotExists(self, target, **kwargs): + collection = self.client[kwargs["database_name"]][kwargs["collection_name"]] + for index in collection.list_indexes(): + self.assertNotEqual(kwargs["indexName"], index["name"]) + + def _databaseOperation_assertCollectionExists(self, target, **kwargs): + database_name = kwargs["database_name"] + collection_name = kwargs["collection_name"] + collection_name_list = self.client.get_database(database_name).list_collection_names() + self.assertIn(collection_name, collection_name_list) + + def kill_all_sessions(self): + if getattr(self, "client", None) is None: + return + clients = self.mongos_clients if self.mongos_clients else [self.client] + for client in clients: + try: + client.admin.command("killAllSessions", []) + except (OperationFailure, AutoReconnect): + # "operation was interrupted" by killing the command's + # own session. + # On 8.0+ killAllSessions sometimes returns a network error. + pass + def _databaseOperation_listCollections(self, target, *args, **kwargs): if "batch_size" in kwargs: kwargs["cursor"] = {"batchSize": kwargs.pop("batch_size")} cursor = target.list_collections(*args, **kwargs) - return list(cursor) + return cursor.to_list() def _databaseOperation_createCollection(self, target, *args, **kwargs): # PYTHON-1936 Ignore the listCollections event from create_collection. @@ -1034,7 +845,7 @@ def _databaseOperation_createCollection(self, target, *args, **kwargs): def __entityOperation_aggregate(self, target, *args, **kwargs): self.__raise_if_unsupported("aggregate", target, Database, Collection) - return list(target.aggregate(*args, **kwargs)) + return (target.aggregate(*args, **kwargs)).to_list() def _databaseOperation_aggregate(self, target, *args, **kwargs): return self.__entityOperation_aggregate(target, *args, **kwargs) @@ -1045,13 +856,13 @@ def _collectionOperation_aggregate(self, target, *args, **kwargs): def _collectionOperation_find(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) find_cursor = target.find(*args, **kwargs) - return list(find_cursor) + return find_cursor.to_list() def _collectionOperation_createFindCursor(self, target, *args, **kwargs): self.__raise_if_unsupported("find", target, Collection) if "filter" not in kwargs: self.fail('createFindCursor requires a "filter" argument') - cursor = NonLazyCursor(target.find(*args, **kwargs), target.database.client) + cursor = NonLazyCursor.create(target.find(*args, **kwargs), target.database.client) self.addCleanup(cursor.close) return cursor @@ -1061,20 +872,25 @@ def _collectionOperation_count(self, target, *args, **kwargs): def _collectionOperation_listIndexes(self, target, *args, **kwargs): if "batch_size" in kwargs: self.skipTest("PyMongo does not support batch_size for list_indexes") - return list(target.list_indexes(*args, **kwargs)) + return (target.list_indexes(*args, **kwargs)).to_list() def _collectionOperation_listIndexNames(self, target, *args, **kwargs): self.skipTest("PyMongo does not support list_index_names") + def _collectionOperation_createSearchIndexes(self, target, *args, **kwargs): + models = [SearchIndexModel(**i) for i in kwargs["models"]] + return target.create_search_indexes(models) + + def _collectionOperation_listSearchIndexes(self, target, *args, **kwargs): + name = kwargs.get("name") + agg_kwargs = kwargs.get("aggregation_options", dict()) + return (target.list_search_indexes(name, **agg_kwargs)).to_list() + def _sessionOperation_withTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("withTransaction", target, ClientSession) return target.with_transaction(*args, **kwargs) def _sessionOperation_startTransaction(self, target, *args, **kwargs): - if client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support document-level locking") self.__raise_if_unsupported("startTransaction", target, ClientSession) return target.start_transaction(*args, **kwargs) @@ -1083,27 +899,33 @@ def _changeStreamOperation_iterateUntilDocumentOrError(self, target, *args, **kw return next(target) def _cursor_iterateUntilDocumentOrError(self, target, *args, **kwargs): - self.__raise_if_unsupported("iterateUntilDocumentOrError", target, NonLazyCursor) + self.__raise_if_unsupported( + "iterateUntilDocumentOrError", target, NonLazyCursor, CommandCursor + ) while target.alive: try: return next(target) except StopIteration: pass + return None def _cursor_close(self, target, *args, **kwargs): - self.__raise_if_unsupported("close", target, NonLazyCursor) + self.__raise_if_unsupported("close", target, NonLazyCursor, CommandCursor) return target.close() + def _clientOperation_appendMetadata(self, target, *args, **kwargs): + info_opts = kwargs["driver_info_options"] + driver_info = DriverInfo(info_opts["name"], info_opts["version"], info_opts["platform"]) + target.append_metadata(driver_info) + def _clientEncryptionOperation_createDataKey(self, target, *args, **kwargs): if "opts" in kwargs: - opts = kwargs.pop("opts") - kwargs["master_key"] = opts.get("masterKey") - kwargs["key_alt_names"] = opts.get("keyAltNames") - kwargs["key_material"] = opts.get("keyMaterial") + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return target.create_data_key(*args, **kwargs) def _clientEncryptionOperation_getKeys(self, target, *args, **kwargs): - return list(target.get_keys(*args, **kwargs)) + return target.get_keys(*args, **kwargs).to_list() def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): result = target.delete_key(*args, **kwargs) @@ -1113,13 +935,16 @@ def _clientEncryptionOperation_deleteKey(self, target, *args, **kwargs): def _clientEncryptionOperation_rewrapManyDataKey(self, target, *args, **kwargs): if "opts" in kwargs: - opts = kwargs.pop("opts") - kwargs["provider"] = opts.get("provider") - kwargs["master_key"] = opts.get("masterKey") + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) data = target.rewrap_many_data_key(*args, **kwargs) if data.bulk_write_result: - return dict(bulkWriteResult=parse_bulk_write_result(data.bulk_write_result)) - return dict() + return {"bulkWriteResult": parse_bulk_write_result(data.bulk_write_result)} + return {} + + def _clientEncryptionOperation_encrypt(self, target, *args, **kwargs): + if "opts" in kwargs: + kwargs.update(camel_to_snake_args(kwargs.pop("opts"))) + return target.encrypt(*args, **kwargs) def _bucketOperation_download(self, target: GridFSBucket, *args: Any, **kwargs: Any) -> bytes: with target.open_download_stream(*args, **kwargs) as gout: @@ -1146,7 +971,7 @@ def _bucketOperation_uploadWithId(self, target: GridFSBucket, *args: Any, **kwar def _bucketOperation_find( self, target: GridFSBucket, *args: Any, **kwargs: Any ) -> List[GridOut]: - return list(target.find(*args, **kwargs)) + return target.find(*args, **kwargs).to_list() def run_entity_operation(self, spec): target = self.entity_map[spec["object"]] @@ -1164,36 +989,40 @@ def run_entity_operation(self, spec): if opargs: arguments = parse_spec_options(copy.deepcopy(opargs)) prepare_spec_arguments( - spec, arguments, camel_to_snake(opname), self.entity_map, self.run_operations + spec, + arguments, + camel_to_snake(opname), + self.entity_map, + self.run_operations_and_throw, ) else: arguments = {} if isinstance(target, MongoClient): - method_name = "_clientOperation_%s" % (opname,) + method_name = f"_clientOperation_{opname}" elif isinstance(target, Database): - method_name = "_databaseOperation_%s" % (opname,) + method_name = f"_databaseOperation_{opname}" elif isinstance(target, Collection): - method_name = "_collectionOperation_%s" % (opname,) + method_name = f"_collectionOperation_{opname}" # contentType is always stored in metadata in pymongo. if target.name.endswith(".files") and opname == "find": for doc in spec.get("expectResult", []): if "contentType" in doc: doc.setdefault("metadata", {})["contentType"] = doc.pop("contentType") elif isinstance(target, ChangeStream): - method_name = "_changeStreamOperation_%s" % (opname,) - elif isinstance(target, NonLazyCursor): - method_name = "_cursor_%s" % (opname,) + method_name = f"_changeStreamOperation_{opname}" + elif isinstance(target, (NonLazyCursor, CommandCursor)): + method_name = f"_cursor_{opname}" elif isinstance(target, ClientSession): - method_name = "_sessionOperation_%s" % (opname,) + method_name = f"_sessionOperation_{opname}" elif isinstance(target, GridFSBucket): - method_name = "_bucketOperation_%s" % (opname,) + method_name = f"_bucketOperation_{opname}" if "id" in arguments: arguments["file_id"] = arguments.pop("id") # MD5 is always disabled in pymongo. arguments.pop("disable_md5", None) elif isinstance(target, ClientEncryption): - method_name = "_clientEncryptionOperation_%s" % (opname,) + method_name = f"_clientEncryptionOperation_{opname}" else: method_name = "doesNotExist" @@ -1203,10 +1032,12 @@ def run_entity_operation(self, spec): target_opname = camel_to_snake(opname) if target_opname == "iterate_once": target_opname = "try_next" + if target_opname == "client_bulk_write": + target_opname = "bulk_write" try: cmd = getattr(target, target_opname) except AttributeError: - self.fail("Unsupported operation %s on entity %s" % (opname, target)) + self.fail(f"Unsupported operation {opname} on entity {target}") else: cmd = functools.partial(method, target) @@ -1222,15 +1053,13 @@ def run_entity_operation(self, spec): # Ignore all operation errors but to avoid masking bugs don't # ignore things like TypeError and ValueError. if ignore and isinstance(exc, (PyMongoError,)): - return + return exc if expect_error: return self.process_error(exc, expect_error) raise else: if expect_error: - self.fail( - 'Excepted error %s but "%s" succeeded: %s' % (expect_error, opname, result) - ) + self.fail(f'Expected error {expect_error} but "{opname}" succeeded: {result}') if expect_result: actual = coerce_result(opname, result) @@ -1238,17 +1067,15 @@ def run_entity_operation(self, spec): if save_as_entity: self.entity_map[save_as_entity] = result + return None + return None def __set_fail_point(self, client, command_args): if not client_context.test_commands_enabled: self.skipTest("Test commands must be enabled") - cmd_on = SON([("configureFailPoint", "failCommand")]) - cmd_on.update(command_args) - client.admin.command(cmd_on) - self.addCleanup( - client.admin.command, "configureFailPoint", cmd_on["configureFailPoint"], mode="off" - ) + self.configure_fail_point(client, command_args) + self.addCleanup(self.configure_fail_point, client, command_args, off=True) def _testOperation_failPoint(self, spec): self.__set_fail_point( @@ -1259,16 +1086,17 @@ def _testOperation_targetedFailPoint(self, spec): session = self.entity_map[spec["session"]] if not session._pinned_address: self.fail( - "Cannot use targetedFailPoint operation with unpinned " - "session %s" % (spec["session"],) + "Cannot use targetedFailPoint operation with unpinned " "session {}".format( + spec["session"] + ) ) - client = single_client("%s:%s" % session._pinned_address) - self.addCleanup(client.close) + client = self.single_client("{}:{}".format(*session._pinned_address)) self.__set_fail_point(client=client, command_args=spec["failPoint"]) def _testOperation_createEntities(self, spec): self.entity_map.create_entities_from_spec(spec["entities"], uri=self._uri) + self.entity_map.advance_cluster_times(self._cluster_time) def _testOperation_assertSessionTransactionState(self, spec): session = self.entity_map[spec["session"]] @@ -1339,6 +1167,88 @@ def _testOperation_assertNumberConnectionsCheckedOut(self, spec): pool = get_pool(client) self.assertEqual(spec["connections"], pool.active_sockets) + def _event_count(self, client_name, event): + listener = self.entity_map.get_listener_for_client(client_name) + actual_events = listener.get_events("all") + count = 0 + for actual in actual_events: + try: + self.match_evaluator.match_event(event, actual) + except AssertionError: + continue + else: + count += 1 + return count + + def _testOperation_assertEventCount(self, spec): + """Run the assertEventCount test operation. + + Assert the given event was published exactly `count` times. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + self.assertEqual(self._event_count(client, event), count, f"expected {count} not {event!r}") + + def _testOperation_waitForEvent(self, spec): + """Run the waitForEvent test operation. + + Wait for a number of events to be published, or fail. + """ + client, event, count = spec["client"], spec["event"], spec["count"] + wait_until( + lambda: self._event_count(client, event) >= count, + f"find {count} {event} event(s)", + ) + + def _testOperation_wait(self, spec): + """Run the "wait" test operation.""" + time.sleep(spec["ms"] / 1000.0) + + def _testOperation_recordTopologyDescription(self, spec): + """Run the recordTopologyDescription test operation.""" + self.entity_map[spec["id"]] = self.entity_map[spec["client"]].topology_description + + def _testOperation_assertTopologyType(self, spec): + """Run the assertTopologyType test operation.""" + description = self.entity_map[spec["topologyDescription"]] + self.assertIsInstance(description, TopologyDescription) + self.assertEqual(description.topology_type_name, spec["topologyType"]) + + def _testOperation_waitForPrimaryChange(self, spec: dict) -> None: + """Run the waitForPrimaryChange test operation.""" + client = self.entity_map[spec["client"]] + old_description: TopologyDescription = self.entity_map[spec["priorTopologyDescription"]] + timeout = spec["timeoutMS"] / 1000.0 + + def get_primary(td: TopologyDescription) -> Optional[_Address]: + servers = writable_server_selector(Selection.from_topology_description(td)) + if servers and servers[0].server_type == SERVER_TYPE.RSPrimary: + return servers[0].address + return None + + old_primary = get_primary(old_description) + + def primary_changed() -> bool: + primary = client.primary + if primary is None: + return False + return primary != old_primary + + wait_until(primary_changed, "change primary", timeout=timeout) + + def _testOperation_runOnThread(self, spec): + """Run the 'runOnThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.schedule(functools.partial(self.run_entity_operation, spec["operation"])) + + def _testOperation_waitForThread(self, spec): + """Run the 'waitForThread' operation.""" + thread = self.entity_map[spec["thread"]] + thread.stop() + thread.join(10) + if thread.exc: + raise thread.exc + self.assertFalse(thread.is_alive(), "Thread {} is still running".format(spec["thread"])) + def _testOperation_loop(self, spec): failure_key = spec.get("storeFailuresAsEntity") error_key = spec.get("storeErrorsAsEntity") @@ -1379,13 +1289,16 @@ def _testOperation_loop(self, spec): def run_special_operation(self, spec): opname = spec["name"] - method_name = "_testOperation_%s" % (opname,) + method_name = f"_testOperation_{opname}" try: method = getattr(self, method_name) except AttributeError: - self.fail("Unsupported special test operation %s" % (opname,)) + self.fail(f"Unsupported special test operation {opname}") else: - method(spec["arguments"]) + if iscoroutinefunction(method): + method(spec["arguments"]) + else: + method(spec["arguments"]) def run_operations(self, spec): for op in spec: @@ -1394,18 +1307,23 @@ def run_operations(self, spec): else: self.run_entity_operation(op) + def run_operations_and_throw(self, spec): + for op in spec: + if op["object"] == "testRunner": + self.run_special_operation(op) + else: + result = self.run_entity_operation(op) + if isinstance(result, Exception): + raise result + def check_events(self, spec): for event_spec in spec: client_name = event_spec["client"] events = event_spec["events"] - # Valid types: 'command', 'cmap' event_type = event_spec.get("eventType", "command") ignore_extra_events = event_spec.get("ignoreExtraEvents", False) server_connection_id = event_spec.get("serverConnectionId") has_server_connection_id = event_spec.get("hasServerConnectionId", False) - - assert event_type in ("command", "cmap") - listener = self.entity_map.get_listener_for_client(client_name) actual_events = listener.get_events(event_type) if ignore_extra_events: @@ -1415,10 +1333,17 @@ def check_events(self, spec): self.assertEqual(actual_events, []) continue - self.assertEqual(len(actual_events), len(events), actual_events) + if len(actual_events) != len(events): + expected = "\n".join(str(e) for e in events) + actual = "\n".join(str(a) for a in actual_events) + self.assertEqual( + len(actual_events), + len(events), + f"expected events:\n{expected}\nactual events:\n{actual}", + ) for idx, expected_event in enumerate(events): - self.match_evaluator.match_event(event_type, expected_event, actual_events[idx]) + self.match_evaluator.match_event(expected_event, actual_events[idx]) if has_server_connection_id: assert server_connection_id is not None @@ -1426,6 +1351,75 @@ def check_events(self, spec): else: assert server_connection_id is None + def process_ignore_messages(self, ignore_logs, actual_logs): + final_logs = [] + for log in actual_logs: + ignored = False + for ignore_log in ignore_logs: + if log["data"]["message"] == ignore_log["data"][ + "message" + ] and self.match_evaluator.match_result(ignore_log, log, test=False): + ignored = True + break + if not ignored: + final_logs.append(log) + return final_logs + + def check_log_messages(self, operations, spec): + def format_logs(log_list): + client_to_log = defaultdict(list) + for log in log_list: + if log.module == "ocsp_support": + continue + data = json_util.loads(log.getMessage()) + client_id = data.get("clientId", data.get("topologyId")) + client_to_log[client_id].append( + { + "level": log.levelname.lower(), + "component": log.name.replace("pymongo.", "", 1), + "data": data, + } + ) + return client_to_log + + with self.assertLogs("pymongo", level="DEBUG") as cm: + self.run_operations(operations) + formatted_logs = format_logs(cm.records) + for client in spec: + components = set() + for message in client["messages"]: + components.add(message["component"]) + + clientid = self.entity_map[client["client"]]._topology_settings._topology_id + actual_logs = formatted_logs[clientid] + actual_logs = [log for log in actual_logs if log["component"] in components] + + ignore_logs = client.get("ignoreMessages", []) + if ignore_logs: + actual_logs = self.process_ignore_messages(ignore_logs, actual_logs) + + if client.get("ignoreExtraMessages", False): + actual_logs = actual_logs[: len(client["messages"])] + self.assertEqual( + len(client["messages"]), + len(actual_logs), + f"expected {client['messages']} but got {actual_logs}", + ) + for expected_msg, actual_msg in zip(client["messages"], actual_logs): + expected_data, actual_data = expected_msg.pop("data"), actual_msg.pop("data") + + if "failureIsRedacted" in expected_msg: + self.assertIn("failure", actual_data) + should_redact = expected_msg.pop("failureIsRedacted") + if should_redact: + actual_fields = set(json_util.loads(actual_data["failure"]).keys()) + self.assertTrue( + {"code", "codeName", "errorLabels"}.issuperset(actual_fields) + ) + + self.match_evaluator.match_result(expected_data, actual_data) + self.match_evaluator.match_result(expected_msg, actual_msg) + def verify_outcome(self, spec): for collection_data in spec: coll_name = collection_data["collectionName"] @@ -1440,10 +1434,37 @@ def verify_outcome(self, spec): if expected_documents: sorted_expected_documents = sorted(expected_documents, key=lambda doc: doc["_id"]) - actual_documents = list(coll.find({}, sort=[("_id", ASCENDING)])) + actual_documents = coll.find({}, sort=[("_id", ASCENDING)]).to_list() self.assertListEqual(sorted_expected_documents, actual_documents) def run_scenario(self, spec, uri=None): + # Kill all sessions before and after each test to prevent an open + # transaction (from a test failure) from blocking collection/database + # operations during test set up and tear down. + self.kill_all_sessions() + + # Handle flaky tests. + flaky_tests = [ + ("PYTHON-5170", ".*test_discovery_and_monitoring.*"), + ("PYTHON-5174", ".*Driver_extends_timeout_while_streaming"), + ("PYTHON-5315", ".*TestSrvPolling.test_recover_from_initially_.*"), + ("PYTHON-4987", ".*UnknownTransactionCommitResult_labels_to_connection_errors"), + ("PYTHON-3689", ".*TestProse.test_load_balancing"), + ("PYTHON-3522", ".*csot.*"), + ] + for reason, flaky_test in flaky_tests: + if re.match(flaky_test.lower(), self.id().lower()) is not None: + func_name = self.id() + options = dict(reason=reason, reset_func=self.setUp, func_name=func_name) + if "csot" in func_name.lower(): + options["max_runs"] = 3 + options["affects_cpython_linux"] = True + decorator = flaky(**options) + decorator(self._run_scenario)(spec, uri) + return + self._run_scenario(spec, uri) + + def _run_scenario(self, spec, uri=None): # maybe skip test manually self.maybe_skip_test(spec) @@ -1455,17 +1476,26 @@ def run_scenario(self, spec, uri=None): # process skipReason skip_reason = spec.get("skipReason", None) if skip_reason is not None: - raise unittest.SkipTest("%s" % (skip_reason,)) + raise unittest.SkipTest(f"{skip_reason}") # process createEntities self._uri = uri self.entity_map = EntityMapUtil(self) self.entity_map.create_entities_from_spec(self.TEST_SPEC.get("createEntities", []), uri=uri) + self._cluster_time = None # process initialData - self.insert_initial_data(self.TEST_SPEC.get("initialData", [])) - - # process operations - self.run_operations(spec["operations"]) + if "initialData" in self.TEST_SPEC: + self.insert_initial_data(self.TEST_SPEC["initialData"]) + self._cluster_time = self.client._topology.max_cluster_time() + self.entity_map.advance_cluster_times(self._cluster_time) + + if "expectLogMessages" in spec: + expect_log_messages = spec["expectLogMessages"] + self.assertTrue(expect_log_messages, "expectEvents must be non-empty") + self.check_log_messages(spec["operations"], expect_log_messages) + else: + # process operations + self.run_operations(spec["operations"]) # process expectEvents if "expectEvents" in spec: @@ -1484,7 +1514,7 @@ class UnifiedSpecTestMeta(type): EXPECTED_FAILURES: Any def __init__(cls, *args, **kwargs): - super(UnifiedSpecTestMeta, cls).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def create_test(spec): def test_case(self): @@ -1494,7 +1524,9 @@ def test_case(self): for test_spec in cls.TEST_SPEC["tests"]: description = test_spec["description"] - test_name = "test_%s" % (description.strip(". ").replace(" ", "_").replace(".", "_"),) + test_name = "test_{}".format( + description.strip(". ").replace(" ", "_").replace(".", "_") + ) test_method = create_test(copy.deepcopy(test_spec)) test_method.__name__ = str(test_name) @@ -1526,19 +1558,28 @@ def generate_test_classes( **kwargs, ): """Method for generating test classes. Returns a dictionary where keys are - the names of test classes and values are the test class objects.""" + the names of test classes and values are the test class objects. + """ test_klasses = {} def test_base_class_factory(test_spec): """Utility that creates the base class to use for test generation. This is needed to ensure that cls.TEST_SPEC is appropriately set when - the metaclass __init__ is invoked.""" + the metaclass __init__ is invoked. + """ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore TEST_SPEC = test_spec EXPECTED_FAILURES = expected_failures - return SpecTestBase + base = SpecTestBase + + # Add "encryption" marker if the "csfle" runOnRequirement is set. + for req in test_spec.get("runOnRequirements", []): + if "csfle" in req: + base = pytest.mark.encryption(base) + + return base for dirpath, _, filenames in os.walk(test_path): dirname = os.path.split(dirpath)[-1] @@ -1552,7 +1593,7 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore scenario_def = json_util.loads(scenario_stream.read(), json_options=opts) test_type = os.path.splitext(filename)[0] - snake_class_name = "Test%s_%s_%s" % ( + snake_class_name = "Test{}_{}_{}".format( class_name_prefix, dirname.replace("-", "_"), test_type.replace("-", "_").replace(".", "_"), @@ -1564,10 +1605,9 @@ class SpecTestBase(with_metaclass(UnifiedSpecTestMeta)): # type: ignore mixin_class = _SCHEMA_VERSION_MAJOR_TO_MIXIN_CLASS.get(schema_version[0]) if mixin_class is None: raise ValueError( - "test file '%s' has unsupported schemaVersion '%s'" - % (fpath, schema_version) + f"test file '{fpath}' has unsupported schemaVersion '{schema_version}'" ) - module_dict = {"__module__": module} + module_dict = {"__module__": module, "TEST_PATH": test_path} module_dict.update(kwargs) test_klasses[class_name] = type( class_name, diff --git a/test/unified_format_shared.py b/test/unified_format_shared.py new file mode 100644 index 0000000000..5aa989cb24 --- /dev/null +++ b/test/unified_format_shared.py @@ -0,0 +1,685 @@ +# Copyright 2024-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utility functions and constants for the unified test format runner. + +https://github.com/mongodb/specifications/blob/master/source/unified-test-format/unified-test-format.md +""" +from __future__ import annotations + +import binascii +import collections +import datetime +import os +import time +import types +from collections import abc +from test.helpers_shared import ( + AWS_CREDS, + AWS_CREDS_2, + AWS_TEMP_CREDS, + AZURE_CREDS, + CA_PEM, + CLIENT_PEM, + GCP_CREDS, + KMIP_CREDS, + LOCAL_MASTER_KEY, +) +from test.utils_shared import CMAPListener, camel_to_snake, parse_collection_options +from typing import Any, MutableMapping, Union + +from bson import ( + RE_TYPE, + Binary, + Code, + DBRef, + Decimal128, + Int64, + MaxKey, + MinKey, + ObjectId, + Regex, + json_util, +) +from pymongo.monitoring import ( + _SENSITIVE_COMMANDS, + CommandFailedEvent, + CommandListener, + CommandStartedEvent, + CommandSucceededEvent, + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, + ServerClosedEvent, + ServerDescriptionChangedEvent, + ServerHeartbeatFailedEvent, + ServerHeartbeatListener, + ServerHeartbeatStartedEvent, + ServerHeartbeatSucceededEvent, + ServerListener, + ServerOpeningEvent, + TopologyClosedEvent, + TopologyDescriptionChangedEvent, + TopologyEvent, + TopologyListener, + TopologyOpenedEvent, + _CommandEvent, + _ConnectionEvent, + _PoolEvent, + _ServerEvent, + _ServerHeartbeatEvent, +) +from pymongo.results import BulkWriteResult +from pymongo.server_description import ServerDescription +from pymongo.topology_description import TopologyDescription + +JSON_OPTS = json_util.JSONOptions(tz_aware=False) + +IS_INTERRUPTED = False + +KMS_TLS_OPTS = { + "kmip": { + "tlsCAFile": CA_PEM, + "tlsCertificateKeyFile": CLIENT_PEM, + } +} + + +# Build up a placeholder maps. +PLACEHOLDER_MAP = {} +for provider_name, provider_data in [ + ("local", {"key": LOCAL_MASTER_KEY}), + ("local:name1", {"key": LOCAL_MASTER_KEY}), + ("aws_temp", AWS_TEMP_CREDS), + ("aws", AWS_CREDS), + ("aws:name1", AWS_CREDS), + ("aws:name2", AWS_CREDS_2), + ("azure", AZURE_CREDS), + ("azure:name1", AZURE_CREDS), + ("gcp", GCP_CREDS), + ("gcp:name1", GCP_CREDS), + ("kmip", KMIP_CREDS), + ("kmip:name1", KMIP_CREDS), +]: + for key, value in provider_data.items(): + placeholder = f"/clientEncryptionOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + + placeholder = f"/autoEncryptOpts/kmsProviders/{provider_name}/{key}" + PLACEHOLDER_MAP[placeholder] = value + +OIDC_ENV = os.environ.get("OIDC_ENV", "test") +if OIDC_ENV == "test": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "test"} +elif OIDC_ENV == "azure": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "azure", + "TOKEN_RESOURCE": os.environ["AZUREOIDC_RESOURCE"], + } +elif OIDC_ENV == "gcp": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = { + "ENVIRONMENT": "gcp", + "TOKEN_RESOURCE": os.environ["GCPOIDC_AUDIENCE"], + } +elif OIDC_ENV == "k8s": + PLACEHOLDER_MAP["/uriOptions/authMechanismProperties"] = {"ENVIRONMENT": "k8s"} + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass. + + Vendored from six: https://github.com/benjaminp/six/blob/master/six.py + """ + + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + # __orig_bases__ is required by PEP 560. + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d["__orig_bases__"] = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__( + cls, name: str, this_bases: tuple[type, ...], /, **kwds: Any + ) -> MutableMapping[str, object]: + return meta.__prepare__(name, bases) + + return type.__new__(metaclass, "temporary_class", (), {}) + + +def parse_collection_or_database_options(options): + return parse_collection_options(options) + + +def parse_bulk_write_result(result): + upserted_ids = {str(int_idx): result.upserted_ids[int_idx] for int_idx in result.upserted_ids} + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "upsertedIds": upserted_ids, + } + + +def parse_client_bulk_write_individual(op_type, result): + if op_type == "insert": + return {"insertedId": result.inserted_id} + if op_type == "update": + if result.upserted_id: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedId": result.upserted_id, + } + else: + return { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + } + if op_type == "delete": + return { + "deletedCount": result.deleted_count, + } + + +def parse_client_bulk_write_result(result): + insert_results, update_results, delete_results = {}, {}, {} + if result.has_verbose_results: + for idx, res in result.insert_results.items(): + insert_results[str(idx)] = parse_client_bulk_write_individual("insert", res) + for idx, res in result.update_results.items(): + update_results[str(idx)] = parse_client_bulk_write_individual("update", res) + for idx, res in result.delete_results.items(): + delete_results[str(idx)] = parse_client_bulk_write_individual("delete", res) + + return { + "deletedCount": result.deleted_count, + "insertedCount": result.inserted_count, + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": result.upserted_count, + "insertResults": insert_results, + "updateResults": update_results, + "deleteResults": delete_results, + } + + +def parse_bulk_write_error_result(error): + write_result = BulkWriteResult(error.details, True) + return parse_bulk_write_result(write_result) + + +def parse_client_bulk_write_error_result(error): + write_result = error.partial_result + if not write_result: + return None + return parse_client_bulk_write_result(write_result) + + +class EventListenerUtil( + CMAPListener, CommandListener, ServerListener, ServerHeartbeatListener, TopologyListener +): + def __init__( + self, observe_events, ignore_commands, observe_sensitive_commands, store_events, entity_map + ): + self._event_types = {name.lower() for name in observe_events} + if observe_sensitive_commands: + self._observe_sensitive_commands = True + self._ignore_commands = set(ignore_commands) + else: + self._observe_sensitive_commands = False + self._ignore_commands = _SENSITIVE_COMMANDS | set(ignore_commands) + self._ignore_commands.add("configurefailpoint") + self._event_mapping = collections.defaultdict(list) + self.entity_map = entity_map + if store_events: + for i in store_events: + id = i["id"] + events = (i.lower() for i in i["events"]) + for i in events: + self._event_mapping[i].append(id) + self.entity_map[id] = [] + super().__init__() + + def get_events(self, event_type): + assert event_type in ("command", "cmap", "sdam", "all"), event_type + if event_type == "all": + return list(self.events) + if event_type == "command": + return [e for e in self.events if isinstance(e, _CommandEvent)] + if event_type == "cmap": + return [e for e in self.events if isinstance(e, (_ConnectionEvent, _PoolEvent))] + return [ + e + for e in self.events + if isinstance(e, (_ServerEvent, TopologyEvent, _ServerHeartbeatEvent)) + ] + + def add_event(self, event): + event_name = type(event).__name__.lower() + if event_name in self._event_types: + super().add_event(event) + for id in self._event_mapping[event_name]: + self.entity_map[id].append( + { + "name": type(event).__name__, + "observedAt": time.time(), + "description": repr(event), + } + ) + + def _command_event(self, event): + if event.command_name.lower() not in self._ignore_commands: + self.add_event(event) + + def started(self, event): + if isinstance(event, CommandStartedEvent): + if event.command == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def succeeded(self, event): + if isinstance(event, CommandSucceededEvent): + if event.reply == {}: + # Command is redacted. Observe only if flag is set. + if self._observe_sensitive_commands: + self._command_event(event) + else: + self._command_event(event) + else: + self.add_event(event) + + def failed(self, event): + if isinstance(event, CommandFailedEvent): + self._command_event(event) + else: + self.add_event(event) + + def opened(self, event: Union[ServerOpeningEvent, TopologyOpenedEvent]) -> None: + self.add_event(event) + + def description_changed( + self, event: Union[ServerDescriptionChangedEvent, TopologyDescriptionChangedEvent] + ) -> None: + self.add_event(event) + + def topology_changed(self, event: TopologyDescriptionChangedEvent) -> None: + self.add_event(event) + + def closed(self, event: Union[ServerClosedEvent, TopologyClosedEvent]) -> None: + self.add_event(event) + + +binary_types = (Binary, bytes) +long_types = (Int64,) +unicode_type = str + + +BSON_TYPE_ALIAS_MAP = { + # https://mongodb.com/docs/manual/reference/operator/query/type/ + # https://pymongo.readthedocs.io/en/stable/api/bson/index.html + "double": (float,), + "string": (str,), + "object": (abc.Mapping,), + "array": (abc.MutableSequence,), + "binData": binary_types, + "undefined": (type(None),), + "objectId": (ObjectId,), + "bool": (bool,), + "date": (datetime.datetime,), + "null": (type(None),), + "regex": (Regex, RE_TYPE), + "dbPointer": (DBRef,), + "javascript": (unicode_type, Code), + "symbol": (unicode_type,), + "javascriptWithScope": (unicode_type, Code), + "int": (int,), + "long": (Int64,), + "decimal": (Decimal128,), + "maxKey": (MaxKey,), + "minKey": (MinKey,), + "number": (float, int, Int64, Decimal128), +} + + +class MatchEvaluatorUtil: + """Utility class that implements methods for evaluating matches as per + the unified test format specification. + """ + + def __init__(self, test_class): + self.test = test_class + + def _operation_exists(self, spec, actual, key_to_compare): + if spec is True: + if key_to_compare is None: + assert actual is not None + else: + self.test.assertIn(key_to_compare, actual) + elif spec is False: + if key_to_compare is None: + assert actual is None + else: + self.test.assertNotIn(key_to_compare, actual) + else: + self.test.fail(f"Expected boolean value for $$exists operator, got {spec}") + + def __type_alias_to_type(self, alias): + if alias not in BSON_TYPE_ALIAS_MAP: + self.test.fail(f"Unrecognized BSON type alias {alias}") + return BSON_TYPE_ALIAS_MAP[alias] + + def _operation_type(self, spec, actual, key_to_compare): + if isinstance(spec, abc.MutableSequence): + permissible_types = tuple( + [t for alias in spec for t in self.__type_alias_to_type(alias)] + ) + else: + permissible_types = self.__type_alias_to_type(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertIsInstance(value, permissible_types) + + def _operation_matchesEntity(self, spec, actual, key_to_compare): + expected_entity = self.test.entity_map[spec] + self.test.assertEqual(expected_entity, actual[key_to_compare]) + + def _operation_matchesHexBytes(self, spec, actual, key_to_compare): + expected = binascii.unhexlify(spec) + value = actual[key_to_compare] if key_to_compare else actual + self.test.assertEqual(value, expected) + + def _operation_unsetOrMatches(self, spec, actual, key_to_compare): + if key_to_compare is None and not actual: + # top-level document can be None when unset + return + + if key_to_compare not in actual: + # we add a dummy value for the compared key to pass map size check + actual[key_to_compare] = "dummyValue" + return + self.match_result(spec, actual[key_to_compare], in_recursive_call=True) + + def _operation_sessionLsid(self, spec, actual, key_to_compare): + expected_lsid = self.test.entity_map.get_lsid_for_session(spec) + self.test.assertEqual(expected_lsid, actual[key_to_compare]) + + def _operation_lte(self, spec, actual, key_to_compare): + if key_to_compare not in actual: + self.test.fail(f"Actual command is missing the {key_to_compare} field: {spec}") + self.test.assertLessEqual(actual[key_to_compare], spec) + + def _operation_matchAsDocument(self, spec, actual, key_to_compare): + self._match_document(spec, json_util.loads(actual[key_to_compare]), False, test=True) + + def _operation_matchAsRoot(self, spec, actual, key_to_compare): + if key_to_compare: + actual = actual[key_to_compare] + self._match_document(spec, actual, True, test=True) + + def _evaluate_special_operation(self, opname, spec, actual, key_to_compare): + method_name = "_operation_{}".format(opname.strip("$")) + try: + method = getattr(self, method_name) + except AttributeError: + self.test.fail(f"Unsupported special matching operator {opname}") + else: + method(spec, actual, key_to_compare) + + def _evaluate_if_special_operation(self, expectation, actual, key_to_compare=None): + """Returns True if a special operation is evaluated, False + otherwise. If the ``expectation`` map contains a single key, + value pair we check it for a special operation. + If given, ``key_to_compare`` is assumed to be the key in + ``expectation`` whose corresponding value needs to be + evaluated for a possible special operation. ``key_to_compare`` + is ignored when ``expectation`` has only one key. + """ + if not isinstance(expectation, abc.Mapping): + return False + + is_special_op, opname, spec = False, False, False + + if key_to_compare is not None: + if key_to_compare.startswith("$$"): + is_special_op = True + opname = key_to_compare + spec = expectation[key_to_compare] + key_to_compare = None + else: + nested = expectation[key_to_compare] + if isinstance(nested, abc.Mapping) and len(nested) == 1: + opname, spec = next(iter(nested.items())) + if opname.startswith("$$"): + is_special_op = True + elif len(expectation) == 1: + opname, spec = next(iter(expectation.items())) + if opname.startswith("$$"): + is_special_op = True + key_to_compare = None + + if is_special_op: + self._evaluate_special_operation( + opname=opname, spec=spec, actual=actual, key_to_compare=key_to_compare + ) + return True + + return False + + def _match_document(self, expectation, actual, is_root, test=False): + if self._evaluate_if_special_operation(expectation, actual): + return True + + self.test.assertIsInstance(actual, abc.Mapping) + for key, value in expectation.items(): + if self._evaluate_if_special_operation(expectation, actual, key): + continue + + self.test.assertIn(key, actual) + if not self.match_result(value, actual[key], in_recursive_call=True, test=test): + return False + + if not is_root: + expected_keys = set(expectation.keys()) + for key, value in expectation.items(): + if value == {"$$exists": False}: + expected_keys.remove(key) + if test: + self.test.assertEqual(expected_keys, set(actual.keys())) + else: + return set(expected_keys).issubset(set(actual.keys())) + return True + + def match_result(self, expectation, actual, in_recursive_call=False, test=True): + if isinstance(expectation, abc.Mapping): + return self._match_document( + expectation, actual, is_root=not in_recursive_call, test=test + ) + + if isinstance(expectation, abc.MutableSequence): + self.test.assertIsInstance(actual, abc.MutableSequence) + for e, a in zip(expectation, actual): + if isinstance(e, abc.Mapping): + res = self._match_document(e, a, is_root=not in_recursive_call, test=test) + else: + res = self.match_result(e, a, in_recursive_call=True, test=test) + if not res: + return False + return True + + # account for flexible numerics in element-wise comparison + if isinstance(expectation, (int, float)): + if test: + self.test.assertEqual(expectation, actual) + else: + return expectation == actual + else: + if test: + self.test.assertIsInstance(actual, type(expectation)) + self.test.assertEqual(expectation, actual) + else: + return isinstance(actual, type(expectation)) and expectation == actual + return True + + def match_server_description(self, actual: ServerDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "server_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_topology_description(self, actual: TopologyDescription, spec: dict) -> None: + for field, expected in spec.items(): + field = camel_to_snake(field) + if field == "type": + field = "topology_type_name" + self.test.assertEqual(getattr(actual, field), expected) + + def match_event_fields(self, actual: Any, spec: dict) -> None: + for field, expected in spec.items(): + if field == "command" and isinstance(actual, CommandStartedEvent): + command = spec["command"] + if command: + self.match_result(command, actual.command) + continue + if field == "reply" and isinstance(actual, CommandSucceededEvent): + reply = spec["reply"] + if reply: + self.match_result(reply, actual.reply) + continue + if field == "hasServiceId": + if spec["hasServiceId"]: + self.test.assertIsNotNone(actual.service_id) + self.test.assertIsInstance(actual.service_id, ObjectId) + else: + self.test.assertIsNone(actual.service_id) + continue + if field == "hasServerConnectionId": + if spec["hasServerConnectionId"]: + self.test.assertIsNotNone(actual.server_connection_id) + self.test.assertIsInstance(actual.server_connection_id, int) + else: + self.test.assertIsNone(actual.server_connection_id) + continue + if field in ("previousDescription", "newDescription"): + if isinstance(actual, ServerDescriptionChangedEvent): + self.match_server_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + if isinstance(actual, TopologyDescriptionChangedEvent): + self.match_topology_description( + getattr(actual, camel_to_snake(field)), spec[field] + ) + continue + + if field == "interruptInUseConnections": + field = "interrupt_connections" + else: + field = camel_to_snake(field) + self.test.assertEqual(getattr(actual, field), expected) + + def match_event(self, expectation, actual): + name, spec = next(iter(expectation.items())) + if name == "commandStartedEvent": + self.test.assertIsInstance(actual, CommandStartedEvent) + elif name == "commandSucceededEvent": + self.test.assertIsInstance(actual, CommandSucceededEvent) + elif name == "commandFailedEvent": + self.test.assertIsInstance(actual, CommandFailedEvent) + elif name == "poolCreatedEvent": + self.test.assertIsInstance(actual, PoolCreatedEvent) + elif name == "poolReadyEvent": + self.test.assertIsInstance(actual, PoolReadyEvent) + elif name == "poolClearedEvent": + self.test.assertIsInstance(actual, PoolClearedEvent) + self.test.assertIsInstance(actual.interrupt_connections, bool) + elif name == "poolClosedEvent": + self.test.assertIsInstance(actual, PoolClosedEvent) + elif name == "connectionCreatedEvent": + self.test.assertIsInstance(actual, ConnectionCreatedEvent) + elif name == "connectionReadyEvent": + self.test.assertIsInstance(actual, ConnectionReadyEvent) + elif name == "connectionClosedEvent": + self.test.assertIsInstance(actual, ConnectionClosedEvent) + elif name == "connectionCheckOutStartedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutStartedEvent) + elif name == "connectionCheckOutFailedEvent": + self.test.assertIsInstance(actual, ConnectionCheckOutFailedEvent) + elif name == "connectionCheckedOutEvent": + self.test.assertIsInstance(actual, ConnectionCheckedOutEvent) + elif name == "connectionCheckedInEvent": + self.test.assertIsInstance(actual, ConnectionCheckedInEvent) + elif name == "serverDescriptionChangedEvent": + self.test.assertIsInstance(actual, ServerDescriptionChangedEvent) + elif name == "serverHeartbeatStartedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatStartedEvent) + elif name == "serverHeartbeatSucceededEvent": + self.test.assertIsInstance(actual, ServerHeartbeatSucceededEvent) + elif name == "serverHeartbeatFailedEvent": + self.test.assertIsInstance(actual, ServerHeartbeatFailedEvent) + elif name == "topologyDescriptionChangedEvent": + self.test.assertIsInstance(actual, TopologyDescriptionChangedEvent) + elif name == "topologyOpeningEvent": + self.test.assertIsInstance(actual, TopologyOpenedEvent) + elif name == "topologyClosedEvent": + self.test.assertIsInstance(actual, TopologyClosedEvent) + else: + raise Exception(f"Unsupported event type {name}") + + self.match_event_fields(actual, spec) + + +def coerce_result(opname, result): + """Convert a pymongo result into the spec's result format.""" + if hasattr(result, "acknowledged") and not result.acknowledged: + return {"acknowledged": False} + if opname == "bulkWrite": + return parse_bulk_write_result(result) + if opname == "clientBulkWrite": + return parse_client_bulk_write_result(result) + if opname == "insertOne": + return {"insertedId": result.inserted_id} + if opname == "insertMany": + return dict(enumerate(result.inserted_ids)) + if opname in ("deleteOne", "deleteMany"): + return {"deletedCount": result.deleted_count} + if opname in ("updateOne", "updateMany", "replaceOne"): + value = { + "matchedCount": result.matched_count, + "modifiedCount": result.modified_count, + "upsertedCount": 0 if result.upserted_id is None else 1, + } + if result.upserted_id is not None: + value["upsertedId"] = result.upserted_id + return value + return result diff --git a/test/uri_options/auth-options.json b/test/uri_options/auth-options.json index fadbac35d2..d7fa14a134 100644 --- a/test/uri_options/auth-options.json +++ b/test/uri_options/auth-options.json @@ -2,7 +2,7 @@ "tests": [ { "description": "Valid auth options are parsed correctly (GSSAPI)", - "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:true&authSource=$external", + "uri": "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=SERVICE_NAME:other,CANONICALIZE_HOST_NAME:forward,SERVICE_HOST:example.com&authSource=$external", "valid": true, "warning": false, "hosts": null, @@ -11,7 +11,8 @@ "authMechanism": "GSSAPI", "authMechanismProperties": { "SERVICE_NAME": "other", - "CANONICALIZE_HOST_NAME": true + "SERVICE_HOST": "example.com", + "CANONICALIZE_HOST_NAME": "forward" }, "authSource": "$external" } diff --git a/test/uri_options/compression-options.json b/test/uri_options/compression-options.json index 16bd27b2cc..3c13dee062 100644 --- a/test/uri_options/compression-options.json +++ b/test/uri_options/compression-options.json @@ -35,7 +35,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low zlibCompressionLevel causes a warning", @@ -44,7 +44,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too high zlibCompressionLevel causes a warning", @@ -53,7 +53,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/concern-options.json b/test/uri_options/concern-options.json index 5a8ef6c272..f55f298087 100644 --- a/test/uri_options/concern-options.json +++ b/test/uri_options/concern-options.json @@ -43,7 +43,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low wTimeoutMS causes a warning", @@ -52,7 +52,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Invalid journal causes a warning", @@ -61,7 +61,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/connection-options.json b/test/uri_options/connection-options.json index b2669b6cf1..bbaa295ecb 100644 --- a/test/uri_options/connection-options.json +++ b/test/uri_options/connection-options.json @@ -27,7 +27,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low connectTimeoutMS causes a warning", @@ -36,7 +36,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric heartbeatFrequencyMS causes a warning", @@ -45,7 +45,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low heartbeatFrequencyMS causes a warning", @@ -54,7 +54,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric localThresholdMS causes a warning", @@ -63,7 +63,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low localThresholdMS causes a warning", @@ -72,7 +72,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Invalid retryWrites causes a warning", @@ -81,7 +81,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric serverSelectionTimeoutMS causes a warning", @@ -90,7 +90,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low serverSelectionTimeoutMS causes a warning", @@ -99,7 +99,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric socketTimeoutMS causes a warning", @@ -108,7 +108,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low socketTimeoutMS causes a warning", @@ -117,7 +117,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "directConnection=true", @@ -137,7 +137,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "directConnection=false", @@ -168,7 +168,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true", @@ -211,7 +211,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with multiple hosts causes an error", @@ -220,7 +220,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with directConnection=true causes an error", @@ -229,7 +229,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "loadBalanced=true with replicaSet causes an error", @@ -238,7 +238,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "timeoutMS=0", @@ -258,7 +258,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low timeoutMS causes a warning", @@ -267,7 +267,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/connection-pool-options.json b/test/uri_options/connection-pool-options.json index 118b2f6783..a582867d07 100644 --- a/test/uri_options/connection-pool-options.json +++ b/test/uri_options/connection-pool-options.json @@ -21,7 +21,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low maxIdleTimeMS causes a warning", @@ -30,7 +30,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "maxPoolSize=0 does not error", @@ -61,7 +61,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "maxConnecting<0 causes a warning", @@ -70,7 +70,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/read-preference-options.json b/test/uri_options/read-preference-options.json index cdac6a63c3..abbf0d0cc6 100644 --- a/test/uri_options/read-preference-options.json +++ b/test/uri_options/read-preference-options.json @@ -36,6 +36,21 @@ ] } }, + { + "description": "Read preference tags are case sensitive", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:NY", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "NY" + } + ] + } + }, { "description": "Invalid readPreferenceTags causes a warning", "uri": "mongodb://example.com/?readPreferenceTags=invalid", @@ -43,7 +58,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-numeric maxStalenessSeconds causes a warning", @@ -52,7 +67,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Too low maxStalenessSeconds causes a warning", @@ -61,7 +76,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/sdam-options.json b/test/uri_options/sdam-options.json new file mode 100644 index 0000000000..ae0aeb2821 --- /dev/null +++ b/test/uri_options/sdam-options.json @@ -0,0 +1,46 @@ +{ + "tests": [ + { + "description": "serverMonitoringMode=auto", + "uri": "mongodb://example.com/?serverMonitoringMode=auto", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "auto" + } + }, + { + "description": "serverMonitoringMode=stream", + "uri": "mongodb://example.com/?serverMonitoringMode=stream", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "stream" + } + }, + { + "description": "serverMonitoringMode=poll", + "uri": "mongodb://example.com/?serverMonitoringMode=poll", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "serverMonitoringMode": "poll" + } + }, + { + "description": "invalid serverMonitoringMode", + "uri": "mongodb://example.com/?serverMonitoringMode=invalid", + "valid": true, + "warning": true, + "hosts": null, + "auth": null, + "options": null + } + ] +} diff --git a/test/uri_options/single-threaded-options.json b/test/uri_options/single-threaded-options.json index fcd24fb880..80ac3fa4ee 100644 --- a/test/uri_options/single-threaded-options.json +++ b/test/uri_options/single-threaded-options.json @@ -18,7 +18,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/uri_options/srv-options.json b/test/uri_options/srv-options.json index ffc356f12f..0670612c0d 100644 --- a/test/uri_options/srv-options.json +++ b/test/uri_options/srv-options.json @@ -18,7 +18,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with srvMaxHosts", @@ -38,7 +38,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with invalid type for srvMaxHosts", @@ -47,7 +47,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "Non-SRV URI with srvMaxHosts", @@ -56,7 +56,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and replicaSet", @@ -65,7 +65,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and loadBalanced=true", @@ -74,7 +74,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "SRV URI with positive srvMaxHosts and loadBalanced=false", diff --git a/test/uri_options/tls-options.json b/test/uri_options/tls-options.json index 8beaaddd86..526cde1cbe 100644 --- a/test/uri_options/tls-options.json +++ b/test/uri_options/tls-options.json @@ -31,7 +31,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates is parsed correctly", @@ -62,7 +62,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure is parsed correctly", @@ -82,7 +82,7 @@ "warning": true, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -91,7 +91,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -100,7 +100,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and true) raises an error", @@ -109,7 +109,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsInsecure both present (and false) raises an error", @@ -118,7 +118,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and true) raises an error", @@ -127,7 +127,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsAllowInvalidHostnames both present (and false) raises an error", @@ -136,7 +136,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and true) raises an error", @@ -145,7 +145,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidHostnames and tlsInsecure both present (and false) raises an error", @@ -154,7 +154,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tls=true and ssl=true doesn't warn", @@ -199,7 +199,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tls=true and ssl=false raises error", @@ -208,7 +208,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "ssl=false and tls=true raises error", @@ -217,7 +217,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "ssl=true and tls=false raises error", @@ -226,7 +226,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck can be set to true", @@ -259,7 +259,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -268,7 +268,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -277,7 +277,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -286,7 +286,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -295,7 +295,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsAllowInvalidCertificates=false raises an error", @@ -304,7 +304,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsAllowInvalidCertificates=true raises an error", @@ -313,7 +313,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -322,7 +322,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and true) raises an error", @@ -331,7 +331,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -340,7 +340,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -349,7 +349,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -358,7 +358,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and true) raises an error", @@ -367,7 +367,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsInsecure=false raises an error", @@ -376,7 +376,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsInsecure=true raises an error", @@ -385,7 +385,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsInsecure both present (and false) raises an error", @@ -394,7 +394,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and true) raises an error", @@ -403,7 +403,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -412,7 +412,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -421,7 +421,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableCertificateRevocationCheck and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -430,7 +430,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and true) raises an error", @@ -439,7 +439,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsDisableCertificateRevocationCheck=false raises an error", @@ -448,7 +448,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsDisableCertificateRevocationCheck=true raises an error", @@ -457,7 +457,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsDisableCertificateRevocationCheck both present (and false) raises an error", @@ -466,7 +466,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck can be set to true", @@ -499,7 +499,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -508,7 +508,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -517,7 +517,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsInsecure and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -526,7 +526,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and true) raises an error", @@ -535,7 +535,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsInsecure=false raises an error", @@ -544,7 +544,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsInsecure=true raises an error", @@ -553,7 +553,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsInsecure both present (and false) raises an error", @@ -562,7 +562,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and true) raises an error", @@ -571,7 +571,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=true and tlsDisableOCSPEndpointCheck=false raises an error", @@ -580,7 +580,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates=false and tlsDisableOCSPEndpointCheck=true raises an error", @@ -589,7 +589,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsAllowInvalidCertificates and tlsDisableOCSPEndpointCheck both present (and false) raises an error", @@ -598,7 +598,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and true) raises an error", @@ -607,7 +607,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=true and tlsAllowInvalidCertificates=false raises an error", @@ -616,7 +616,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck=false and tlsAllowInvalidCertificates=true raises an error", @@ -625,7 +625,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null }, { "description": "tlsDisableOCSPEndpointCheck and tlsAllowInvalidCertificates both present (and false) raises an error", @@ -634,7 +634,7 @@ "warning": false, "hosts": null, "auth": null, - "options": {} + "options": null } ] } diff --git a/test/utils.py b/test/utils.py index 29ee1ca477..bfc606fe83 100644 --- a/test/utils.py +++ b/test/utils.py @@ -12,261 +12,218 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Utilities for testing pymongo -""" +"""Utilities for testing pymongo that require synchronization.""" +from __future__ import annotations +import asyncio import contextlib -import copy -import functools import os -import re -import shutil +import random import sys -import threading +import threading # Used in the synchronized version of this file import time -import unittest -import warnings -from collections import abc, defaultdict -from functools import partial -from test import client_context, db_pwd, db_user +import traceback +from functools import wraps +from inspect import iscoroutinefunction -from bson import json_util -from bson.objectid import ObjectId from bson.son import SON -from pymongo import MongoClient, monitoring, operations, read_preferences -from pymongo.collection import ReturnDocument -from pymongo.cursor import CursorType -from pymongo.errors import ConfigurationError, OperationFailure +from pymongo import MongoClient +from pymongo.errors import ConfigurationError from pymongo.hello import HelloCompat -from pymongo.monitoring import ( - _SENSITIVE_COMMANDS, - ConnectionCheckedInEvent, - ConnectionCheckedOutEvent, - ConnectionCheckOutFailedEvent, - ConnectionCheckOutStartedEvent, - ConnectionClosedEvent, - ConnectionCreatedEvent, - ConnectionReadyEvent, - PoolClearedEvent, - PoolClosedEvent, - PoolCreatedEvent, - PoolReadyEvent, -) -from pymongo.pool import _CancellationContext, _PoolGeneration -from pymongo.read_concern import ReadConcern +from pymongo.lock import _create_lock +from pymongo.operations import _Op from pymongo.read_preferences import ReadPreference from pymongo.server_selectors import any_server_selector, writable_server_selector -from pymongo.server_type import SERVER_TYPE -from pymongo.uri_parser import parse_uri -from pymongo.write_concern import WriteConcern +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration -IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) +_IS_SYNC = True -class BaseListener(object): - def __init__(self): - self.events = [] - - def reset(self): - self.events = [] - - def add_event(self, event): - self.events.append(event) - - def event_count(self, event_type): - return len(self.events_by_type(event_type)) - - def events_by_type(self, event_type): - """Return the matching events by event class. - - event_type can be a single class or a tuple of classes. - """ - return self.matching(lambda e: isinstance(e, event_type)) - - def matching(self, matcher): - """Return the matching events.""" - return [event for event in self.events[:] if matcher(event)] - - def wait_for_event(self, event, count): - """Wait for a number of events to be published, or fail.""" - wait_until(lambda: self.event_count(event) >= count, "find %s %s event(s)" % (count, event)) - - -class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): - def connection_created(self, event): - assert isinstance(event, ConnectionCreatedEvent) - self.add_event(event) - - def connection_ready(self, event): - assert isinstance(event, ConnectionReadyEvent) - self.add_event(event) - - def connection_closed(self, event): - assert isinstance(event, ConnectionClosedEvent) - self.add_event(event) - - def connection_check_out_started(self, event): - assert isinstance(event, ConnectionCheckOutStartedEvent) - self.add_event(event) - - def connection_check_out_failed(self, event): - assert isinstance(event, ConnectionCheckOutFailedEvent) - self.add_event(event) - - def connection_checked_out(self, event): - assert isinstance(event, ConnectionCheckedOutEvent) - self.add_event(event) - - def connection_checked_in(self, event): - assert isinstance(event, ConnectionCheckedInEvent) - self.add_event(event) - - def pool_created(self, event): - assert isinstance(event, PoolCreatedEvent) - self.add_event(event) - - def pool_ready(self, event): - assert isinstance(event, PoolReadyEvent) - self.add_event(event) - - def pool_cleared(self, event): - assert isinstance(event, PoolClearedEvent) - self.add_event(event) - - def pool_closed(self, event): - assert isinstance(event, PoolClosedEvent) - self.add_event(event) - - -class EventListener(monitoring.CommandListener): - def __init__(self): - self.results = defaultdict(list) - - def started(self, event): - self.results["started"].append(event) - - def succeeded(self, event): - self.results["succeeded"].append(event) - - def failed(self, event): - self.results["failed"].append(event) - - def started_command_names(self): - """Return list of command names started.""" - return [event.command_name for event in self.results["started"]] - - def reset(self): - """Reset the state of this listener.""" - self.results.clear() - - -class TopologyEventListener(monitoring.TopologyListener): - def __init__(self): - self.results = defaultdict(list) - - def closed(self, event): - self.results["closed"].append(event) +def get_pool(client): + """Get the standalone, primary, or mongos pool.""" + topology = client._get_topology() + server = topology._select_server(writable_server_selector, _Op.TEST) + return server.pool - def description_changed(self, event): - self.results["description_changed"].append(event) - def opened(self, event): - self.results["opened"].append(event) +def get_pools(client): + """Get all pools.""" + return [ + server.pool + for server in (client._get_topology()).select_servers(any_server_selector, _Op.TEST) + ] - def reset(self): - """Reset the state of this listener.""" - self.results.clear() +def wait_until(predicate, success_description, timeout=10): + """Wait up to 10 seconds (by default) for predicate to be true. -class AllowListEventListener(EventListener): - def __init__(self, *commands): - self.commands = set(commands) - super(AllowListEventListener, self).__init__() + E.g.: - def started(self, event): - if event.command_name in self.commands: - super(AllowListEventListener, self).started(event) + wait_until(lambda: client.primary == ('a', 1), + 'connect to the primary') - def succeeded(self, event): - if event.command_name in self.commands: - super(AllowListEventListener, self).succeeded(event) + If the lambda-expression isn't true after 10 seconds, we raise + AssertionError("Didn't ever connect to the primary"). - def failed(self, event): - if event.command_name in self.commands: - super(AllowListEventListener, self).failed(event) + Returns the predicate's first true value. + """ + start = time.time() + interval = min(float(timeout) / 100, 0.1) + while True: + if iscoroutinefunction(predicate): + retval = predicate() + else: + retval = predicate() + if retval: + return retval + if time.time() - start > timeout: + raise AssertionError("Didn't ever %s" % success_description) -class OvertCommandListener(EventListener): - """A CommandListener that ignores sensitive commands.""" + time.sleep(interval) - ignore_list_collections = False - def started(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).started(event) +def is_mongos(client): + res = client.admin.command(HelloCompat.LEGACY_CMD) + return res.get("msg", "") == "isdbgrid" - def succeeded(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).succeeded(event) - def failed(self, event): - if event.command_name.lower() not in _SENSITIVE_COMMANDS: - super(OvertCommandListener, self).failed(event) +def ensure_all_connected(client: MongoClient) -> None: + """Ensure that the client's connection pool has socket connections to all + members of a replica set. Raises ConfigurationError when called with a + non-replica set client. + Depending on the use-case, the caller may need to clear any event listeners + that are configured on the client. + """ + hello: dict = client.admin.command(HelloCompat.LEGACY_CMD) + if "setName" not in hello: + raise ConfigurationError("cluster is not a replica set") -class _ServerEventListener(object): - """Listens to all events.""" + target_host_list = set(hello["hosts"] + hello.get("passives", [])) + connected_host_list = {hello["me"]} - def __init__(self): - self.results = [] + # Run hello until we have connected to each host at least once. + def discover(): + i = 0 + while i < 100 and connected_host_list != target_host_list: + hello: dict = client.admin.command( + HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY + ) + connected_host_list.update([hello["me"]]) + i += 1 + return connected_host_list - def opened(self, event): - self.results.append(event) + try: - def description_changed(self, event): - self.results.append(event) + def predicate(): + return target_host_list == discover() - def closed(self, event): - self.results.append(event) + wait_until(predicate, "connected to all hosts") + except AssertionError as exc: + raise AssertionError( + f"{exc}, {connected_host_list} != {target_host_list}, {client.topology_description}" + ) - def matching(self, matcher): - """Return the matching events.""" - results = self.results[:] - return [event for event in results if matcher(event)] - def reset(self): - self.results = [] +def assertRaisesExactly(cls, fn, *args, **kwargs): + """ + Unlike the standard assertRaises, this checks that a function raises a + specific class of exception, and not a subclass. E.g., check that + MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. + """ + try: + fn(*args, **kwargs) + except Exception as e: + assert e.__class__ == cls, f"got {e.__class__.__name__}, expected {cls.__name__}" + else: + raise AssertionError("%s not raised" % cls) -class ServerEventListener(_ServerEventListener, monitoring.ServerListener): - """Listens to Server events.""" +def set_fail_point(client, command_args): + cmd = SON([("configureFailPoint", "failCommand")]) + cmd.update(command_args) + client.admin.command(cmd) -class ServerAndTopologyEventListener( # type: ignore[misc] - ServerEventListener, monitoring.TopologyListener +def joinall(tasks): + """Join threads with a 5-minute timeout, assert joins succeeded""" + if _IS_SYNC: + for t in tasks: + t.join(300) + assert not t.is_alive(), "Thread %s hung" % t + else: + asyncio.wait([t.task for t in tasks if t is not None], timeout=300) + + +def flaky( + *, + reason=None, + max_runs=2, + min_passes=1, + delay=1, + affects_cpython_linux=False, + func_name=None, + reset_func=None, ): - """Listens to Server and Topology events.""" + """Decorate a test as flaky. + :param reason: the reason why the test is flaky + :param max_runs: the maximum number of runs before raising an error + :param min_passes: the minimum number of passing runs + :param delay: the delay in seconds between retries + :param affects_cpython_links: whether the test is flaky on CPython on Linux + :param func_name: the name of the function, used for the rety message + :param reset_func: a function to call before retrying -class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): - """Listens to only server heartbeat events.""" - - def started(self, event): - self.add_event(event) + """ + if reason is None: + raise ValueError("flaky requires a reason input") + is_cpython_linux = sys.platform == "linux" and sys.implementation.name == "cpython" + disable_flaky = "DISABLE_FLAKY" in os.environ + if "CI" not in os.environ and "ENABLE_FLAKY" not in os.environ: + disable_flaky = True + + if disable_flaky or (is_cpython_linux and not affects_cpython_linux): + max_runs = 1 + min_passes = 1 + + def decorator(target_func): + @wraps(target_func) + def wrapper(*args, **kwargs): + passes = 0 + for i in range(max_runs): + try: + result = target_func(*args, **kwargs) + passes += 1 + if passes == min_passes: + return result + except Exception as e: + if i == max_runs - 1: + raise e + print( + f"Retrying after attempt {i+1} of {func_name or target_func.__name__} failed with ({reason})):\n" + f"{traceback.format_exc()}", + file=sys.stderr, + ) + time.sleep(delay) + if reset_func: + reset_func() - def succeeded(self, event): - self.add_event(event) + return wrapper - def failed(self, event): - self.add_event(event) + return decorator -class MockSocketInfo(object): +class MockConnection: def __init__(self): self.cancel_context = _CancellationContext() self.more_to_come = False + self.id = random.randint(0, 100) + self.is_sdam = False + self.server_connection_id = random.randint(0, 100) - def close_socket(self, reason): + def close_conn(self, reason): pass def __enter__(self): @@ -276,20 +233,22 @@ def __exit__(self, exc_type, exc_val, exc_tb): pass -class MockPool(object): - def __init__(self, address, options, handshake=True): +class MockPool: + def __init__(self, address, options, is_sdam=False, client_id=None): self.gen = _PoolGeneration() - self._lock = threading.Lock() + self._lock = _create_lock() self.opts = options self.operation_count = 0 + self.conns = [] def stale_generation(self, gen, service_id): return self.gen.stale(gen, service_id) - def get_socket(self, handler=None): - return MockSocketInfo() + @contextlib.contextmanager + def checkout(self, handler=None): + yield MockConnection() - def return_socket(self, *args, **kwargs): + def checkin(self, *args, **kwargs): pass def _reset(self, service_id=None): @@ -299,7 +258,7 @@ def _reset(self, service_id=None): def ready(self): pass - def reset(self, service_id=None): + def reset(self, service_id=None, interrupt_connections=False): self._reset() def reset_without_pause(self): @@ -313,814 +272,3 @@ def update_is_writable(self, is_writable): def remove_stale_sockets(self, *args, **kwargs): pass - - -class ScenarioDict(dict): - """Dict that returns {} for any unknown key, recursively.""" - - def __init__(self, data): - def convert(v): - if isinstance(v, abc.Mapping): - return ScenarioDict(v) - if isinstance(v, (str, bytes)): - return v - if isinstance(v, abc.Sequence): - return [convert(item) for item in v] - return v - - dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) - - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - # Unlike a defaultdict, don't set the key, just return a dict. - return ScenarioDict({}) - - -class CompareType(object): - """Class that compares equal to any object of the given type.""" - - def __init__(self, type): - self.type = type - - def __eq__(self, other): - return isinstance(other, self.type) - - def __ne__(self, other): - """Needed for Python 2.""" - return not self.__eq__(other) - - -class FunctionCallRecorder(object): - """Utility class to wrap a callable and record its invocations.""" - - def __init__(self, function): - self._function = function - self._call_list = [] - - def __call__(self, *args, **kwargs): - self._call_list.append((args, kwargs)) - return self._function(*args, **kwargs) - - def reset(self): - """Wipes the call list.""" - self._call_list = [] - - def call_list(self): - """Returns a copy of the call list.""" - return self._call_list[:] - - @property - def call_count(self): - """Returns the number of times the function has been called.""" - return len(self._call_list) - - -class TestCreator(object): - """Class to create test cases from specifications.""" - - def __init__(self, create_test, test_class, test_path): - """Create a TestCreator object. - - :Parameters: - - `create_test`: callback that returns a test case. The callback - must accept the following arguments - a dictionary containing the - entire test specification (the `scenario_def`), a dictionary - containing the specification for which the test case will be - generated (the `test_def`). - - `test_class`: the unittest.TestCase class in which to create the - test case. - - `test_path`: path to the directory containing the JSON files with - the test specifications. - """ - self._create_test = create_test - self._test_class = test_class - self.test_path = test_path - - def _ensure_min_max_server_version(self, scenario_def, method): - """Test modifier that enforces a version range for the server on a - test case.""" - if "minServerVersion" in scenario_def: - min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) - if min_ver is not None: - method = client_context.require_version_min(*min_ver)(method) - - if "maxServerVersion" in scenario_def: - max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) - if max_ver is not None: - method = client_context.require_version_max(*max_ver)(method) - - if "serverless" in scenario_def: - serverless = scenario_def["serverless"] - if serverless == "require": - serverless_satisfied = client_context.serverless - elif serverless == "forbid": - serverless_satisfied = not client_context.serverless - else: # unset or "allow" - serverless_satisfied = True - method = unittest.skipUnless( - serverless_satisfied, "Serverless requirement not satisfied" - )(method) - - return method - - @staticmethod - def valid_topology(run_on_req): - return client_context.is_topology_type( - run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) - ) - - @staticmethod - def min_server_version(run_on_req): - version = run_on_req.get("minServerVersion") - if version: - min_ver = tuple(int(elt) for elt in version.split(".")) - return client_context.version >= min_ver - return True - - @staticmethod - def max_server_version(run_on_req): - version = run_on_req.get("maxServerVersion") - if version: - max_ver = tuple(int(elt) for elt in version.split(".")) - return client_context.version <= max_ver - return True - - @staticmethod - def valid_auth_enabled(run_on_req): - if "authEnabled" in run_on_req: - if run_on_req["authEnabled"]: - return client_context.auth_enabled - return not client_context.auth_enabled - return True - - @staticmethod - def serverless_ok(run_on_req): - serverless = run_on_req["serverless"] - if serverless == "require": - return client_context.serverless - elif serverless == "forbid": - return not client_context.serverless - else: # unset or "allow" - return True - - def should_run_on(self, scenario_def): - run_on = scenario_def.get("runOn", []) - if not run_on: - # Always run these tests. - return True - - for req in run_on: - if ( - self.valid_topology(req) - and self.min_server_version(req) - and self.max_server_version(req) - and self.valid_auth_enabled(req) - and self.serverless_ok(req) - ): - return True - return False - - def ensure_run_on(self, scenario_def, method): - """Test modifier that enforces a 'runOn' on a test case.""" - return client_context._require( - lambda: self.should_run_on(scenario_def), "runOn not satisfied", method - ) - - def tests(self, scenario_def): - """Allow CMAP spec test to override the location of test.""" - return scenario_def["tests"] - - def create_tests(self): - for dirpath, _, filenames in os.walk(self.test_path): - dirname = os.path.split(dirpath)[-1] - - for filename in filenames: - with open(os.path.join(dirpath, filename)) as scenario_stream: - # Use tz_aware=False to match how CodecOptions decodes - # dates. - opts = json_util.JSONOptions(tz_aware=False) - scenario_def = ScenarioDict( - json_util.loads(scenario_stream.read(), json_options=opts) - ) - - test_type = os.path.splitext(filename)[0] - - # Construct test from scenario. - for test_def in self.tests(scenario_def): - test_name = "test_%s_%s_%s" % ( - dirname, - test_type.replace("-", "_").replace(".", "_"), - str(test_def["description"].replace(" ", "_").replace(".", "_")), - ) - - new_test = self._create_test(scenario_def, test_def, test_name) - new_test = self._ensure_min_max_server_version(scenario_def, new_test) - new_test = self.ensure_run_on(scenario_def, new_test) - - new_test.__name__ = test_name - setattr(self._test_class, new_test.__name__, new_test) - - -def _connection_string(h): - if h.startswith("mongodb://") or h.startswith("mongodb+srv://"): - return h - return "mongodb://%s" % (str(h),) - - -def _mongo_client(host, port, authenticate=True, directConnection=None, **kwargs): - """Create a new client over SSL/TLS if necessary.""" - host = host or client_context.host - port = port or client_context.port - client_options: dict = client_context.default_client_options.copy() - if client_context.replica_set_name and not directConnection: - client_options["replicaSet"] = client_context.replica_set_name - if directConnection is not None: - client_options["directConnection"] = directConnection - client_options.update(kwargs) - - uri = _connection_string(host) - if client_context.auth_enabled and authenticate: - # Only add the default username or password if one is not provided. - res = parse_uri(uri) - if ( - not res["username"] - and not res["password"] - and "username" not in client_options - and "password" not in client_options - ): - client_options["username"] = db_user - client_options["password"] = db_pwd - - return MongoClient(uri, port, **client_options) - - -def single_client_noauth(h=None, p=None, **kwargs): - """Make a direct connection. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, directConnection=True, **kwargs) - - -def single_client(h=None, p=None, **kwargs): - """Make a direct connection, and authenticate if necessary.""" - return _mongo_client(h, p, directConnection=True, **kwargs) - - -def rs_client_noauth(h=None, p=None, **kwargs): - """Connect to the replica set. Don't authenticate.""" - return _mongo_client(h, p, authenticate=False, **kwargs) - - -def rs_client(h=None, p=None, **kwargs): - """Connect to the replica set and authenticate if necessary.""" - return _mongo_client(h, p, **kwargs) - - -def rs_or_single_client_noauth(h=None, p=None, **kwargs): - """Connect to the replica set if there is one, otherwise the standalone. - - Like rs_or_single_client, but does not authenticate. - """ - return _mongo_client(h, p, authenticate=False, **kwargs) - - -def rs_or_single_client(h=None, p=None, **kwargs): - """Connect to the replica set if there is one, otherwise the standalone. - - Authenticates if necessary. - """ - return _mongo_client(h, p, **kwargs) - - -def ensure_all_connected(client): - """Ensure that the client's connection pool has socket connections to all - members of a replica set. Raises ConfigurationError when called with a - non-replica set client. - - Depending on the use-case, the caller may need to clear any event listeners - that are configured on the client. - """ - hello = client.admin.command(HelloCompat.LEGACY_CMD) - if "setName" not in hello: - raise ConfigurationError("cluster is not a replica set") - - target_host_list = set(hello["hosts"]) - connected_host_list = set([hello["me"]]) - admindb = client.get_database("admin") - - # Run hello until we have connected to each host at least once. - while connected_host_list != target_host_list: - hello = admindb.command(HelloCompat.LEGACY_CMD, read_preference=ReadPreference.SECONDARY) - connected_host_list.update([hello["me"]]) - - -def one(s): - """Get one element of a set""" - return next(iter(s)) - - -def oid_generated_on_process(oid): - """Makes a determination as to whether the given ObjectId was generated - by the current process, based on the 5-byte random number in the ObjectId. - """ - return ObjectId._random() == oid.binary[4:9] - - -def delay(sec): - return """function() { sleep(%f * 1000); return true; }""" % sec - - -def get_command_line(client): - command_line = client.admin.command("getCmdLineOpts") - assert command_line["ok"] == 1, "getCmdLineOpts() failed" - return command_line - - -def camel_to_snake(camel): - # Regex to convert CamelCase to snake_case. - snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) - return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() - - -def camel_to_upper_camel(camel): - return camel[0].upper() + camel[1:] - - -def camel_to_snake_args(arguments): - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - arguments[c2s] = arguments.pop(arg_name) - return arguments - - -def snake_to_camel(snake): - # Regex to convert snake_case to lowerCamelCase. - return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) - - -def parse_collection_options(opts): - if "readPreference" in opts: - opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - - if "writeConcern" in opts: - opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - - if "readConcern" in opts: - opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - - if "timeoutMS" in opts: - opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 - return opts - - -def server_started_with_option(client, cmdline_opt, config_opt): - """Check if the server was started with a particular option. - - :Parameters: - - `cmdline_opt`: The command line option (i.e. --nojournal) - - `config_opt`: The config file option (i.e. nojournal) - """ - command_line = get_command_line(client) - if "parsed" in command_line: - parsed = command_line["parsed"] - if config_opt in parsed: - return parsed[config_opt] - argv = command_line["argv"] - return cmdline_opt in argv - - -def server_started_with_auth(client): - try: - command_line = get_command_line(client) - except OperationFailure as e: - msg = e.details.get("errmsg", "") # type: ignore - if e.code == 13 or "unauthorized" in msg or "login" in msg: - # Unauthorized. - return True - raise - - # MongoDB >= 2.0 - if "parsed" in command_line: - parsed = command_line["parsed"] - # MongoDB >= 2.6 - if "security" in parsed: - security = parsed["security"] - # >= rc3 - if "authorization" in security: - return security["authorization"] == "enabled" - # < rc3 - return security.get("auth", False) or bool(security.get("keyFile")) - return parsed.get("auth", False) or bool(parsed.get("keyFile")) - # Legacy - argv = command_line["argv"] - return "--auth" in argv or "--keyFile" in argv - - -def drop_collections(db): - # Drop all non-system collections in this database. - for coll in db.list_collection_names(filter={"name": {"$regex": r"^(?!system\.)"}}): - db.drop_collection(coll) - - -def remove_all_users(db): - db.command("dropAllUsersFromDatabase", 1, writeConcern={"w": client_context.w}) - - -def joinall(threads): - """Join threads with a 5-minute timeout, assert joins succeeded""" - for t in threads: - t.join(300) - assert not t.is_alive(), "Thread %s hung" % t - - -def connected(client): - """Convenience to wait for a newly-constructed client to connect.""" - with warnings.catch_warnings(): - # Ignore warning that ping is always routed to primary even - # if client's read preference isn't PRIMARY. - warnings.simplefilter("ignore", UserWarning) - client.admin.command("ping") # Force connection. - - return client - - -def wait_until(predicate, success_description, timeout=10): - """Wait up to 10 seconds (by default) for predicate to be true. - - E.g.: - - wait_until(lambda: client.primary == ('a', 1), - 'connect to the primary') - - If the lambda-expression isn't true after 10 seconds, we raise - AssertionError("Didn't ever connect to the primary"). - - Returns the predicate's first true value. - """ - start = time.time() - interval = min(float(timeout) / 100, 0.1) - while True: - retval = predicate() - if retval: - return retval - - if time.time() - start > timeout: - raise AssertionError("Didn't ever %s" % success_description) - - time.sleep(interval) - - -def repl_set_step_down(client, **kwargs): - """Run replSetStepDown, first unfreezing a secondary with replSetFreeze.""" - cmd = SON([("replSetStepDown", 1)]) - cmd.update(kwargs) - - # Unfreeze a secondary to ensure a speedy election. - client.admin.command("replSetFreeze", 0, read_preference=ReadPreference.SECONDARY) - client.admin.command(cmd) - - -def is_mongos(client): - res = client.admin.command(HelloCompat.LEGACY_CMD) - return res.get("msg", "") == "isdbgrid" - - -def assertRaisesExactly(cls, fn, *args, **kwargs): - """ - Unlike the standard assertRaises, this checks that a function raises a - specific class of exception, and not a subclass. E.g., check that - MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect. - """ - try: - fn(*args, **kwargs) - except Exception as e: - assert e.__class__ == cls, "got %s, expected %s" % (e.__class__.__name__, cls.__name__) - else: - raise AssertionError("%s not raised" % cls) - - -@contextlib.contextmanager -def _ignore_deprecations(): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - yield - - -def ignore_deprecations(wrapped=None): - """A context manager or a decorator.""" - if wrapped: - - @functools.wraps(wrapped) - def wrapper(*args, **kwargs): - with _ignore_deprecations(): - return wrapped(*args, **kwargs) - - return wrapper - - else: - return _ignore_deprecations() - - -class DeprecationFilter(object): - def __init__(self, action="ignore"): - """Start filtering deprecations.""" - self.warn_context = warnings.catch_warnings() - self.warn_context.__enter__() - warnings.simplefilter(action, DeprecationWarning) - - def stop(self): - """Stop filtering deprecations.""" - self.warn_context.__exit__() # type: ignore - self.warn_context = None # type: ignore - - -def get_pool(client): - """Get the standalone, primary, or mongos pool.""" - topology = client._get_topology() - server = topology.select_server(writable_server_selector) - return server.pool - - -def get_pools(client): - """Get all pools.""" - return [server.pool for server in client._get_topology().select_servers(any_server_selector)] - - -# Constants for run_threads and lazy_client_trial. -NTRIALS = 5 -NTHREADS = 10 - - -def run_threads(collection, target): - """Run a target function in many threads. - - target is a function taking a Collection and an integer. - """ - threads = [] - for i in range(NTHREADS): - bound_target = partial(target, collection, i) - threads.append(threading.Thread(target=bound_target)) - - for t in threads: - t.start() - - for t in threads: - t.join(60) - assert not t.is_alive() - - -@contextlib.contextmanager -def frequent_thread_switches(): - """Make concurrency bugs more likely to manifest.""" - interval = sys.getswitchinterval() - sys.setswitchinterval(1e-6) - - try: - yield - finally: - sys.setswitchinterval(interval) - - -def lazy_client_trial(reset, target, test, get_client): - """Test concurrent operations on a lazily-connecting client. - - `reset` takes a collection and resets it for the next trial. - - `target` takes a lazily-connecting collection and an index from - 0 to NTHREADS, and performs some operation, e.g. an insert. - - `test` takes the lazily-connecting collection and asserts a - post-condition to prove `target` succeeded. - """ - collection = client_context.client.pymongo_test.test - - with frequent_thread_switches(): - for i in range(NTRIALS): - reset(collection) - lazy_client = get_client() - lazy_collection = lazy_client.pymongo_test.test - run_threads(lazy_collection, target) - test(lazy_collection) - - -def gevent_monkey_patched(): - """Check if gevent's monkey patching is active.""" - try: - import socket - - import gevent.socket - - return socket.socket is gevent.socket.socket - except ImportError: - return False - - -def eventlet_monkey_patched(): - """Check if eventlet's monkey patching is active.""" - import threading - - return threading.current_thread.__module__ == "eventlet.green.threading" - - -def is_greenthread_patched(): - return gevent_monkey_patched() or eventlet_monkey_patched() - - -def disable_replication(client): - """Disable replication on all secondaries.""" - for host, port in client.secondaries: - secondary = single_client(host, port) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="alwaysOn") - - -def enable_replication(client): - """Enable replication on all secondaries.""" - for host, port in client.secondaries: - secondary = single_client(host, port) - secondary.admin.command("configureFailPoint", "stopReplProducer", mode="off") - - -class ExceptionCatchingThread(threading.Thread): - """A thread that stores any exception encountered from run().""" - - def __init__(self, *args, **kwargs): - self.exc = None - super(ExceptionCatchingThread, self).__init__(*args, **kwargs) - - def run(self): - try: - super(ExceptionCatchingThread, self).run() - except BaseException as exc: - self.exc = exc - raise - - -def parse_read_preference(pref): - # Make first letter lowercase to match read_pref's modes. - mode_string = pref.get("mode", "primary") - mode_string = mode_string[:1].lower() + mode_string[1:] - mode = read_preferences.read_pref_mode_from_name(mode_string) - max_staleness = pref.get("maxStalenessSeconds", -1) - tag_sets = pref.get("tag_sets") - return read_preferences.make_read_preference( - mode, tag_sets=tag_sets, max_staleness=max_staleness - ) - - -def server_name_to_type(name): - """Convert a ServerType name to the corresponding value. For SDAM tests.""" - # Special case, some tests in the spec include the PossiblePrimary - # type, but only single-threaded drivers need that type. We call - # possible primaries Unknown. - if name == "PossiblePrimary": - return SERVER_TYPE.Unknown - return getattr(SERVER_TYPE, name) - - -def cat_files(dest, *sources): - """Cat multiple files into dest.""" - with open(dest, "wb") as fdst: - for src in sources: - with open(src, "rb") as fsrc: - shutil.copyfileobj(fsrc, fdst) - - -@contextlib.contextmanager -def assertion_context(msg): - """A context manager that adds info to an assertion failure.""" - try: - yield - except AssertionError as exc: - msg = "%s (%s)" % (exc, msg) - exc_type, exc_val, exc_tb = sys.exc_info() - assert exc_type is not None - raise exc_type(exc_val).with_traceback(exc_tb) - - -def parse_spec_options(opts): - if "readPreference" in opts: - opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) - - if "writeConcern" in opts: - opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) - - if "readConcern" in opts: - opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) - - if "timeoutMS" in opts: - assert isinstance(opts["timeoutMS"], int) - opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 - - if "maxTimeMS" in opts: - opts["max_time_ms"] = opts.pop("maxTimeMS") - - if "maxCommitTimeMS" in opts: - opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") - - if "hint" in opts: - hint = opts.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - opts["hint"] = hint - - # Properly format 'hint' arguments for the Bulk API tests. - if "requests" in opts: - reqs = opts.pop("requests") - for req in reqs: - if "name" in req: - # CRUD v2 format - args = req.pop("arguments", {}) - if "hint" in args: - hint = args.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - args["hint"] = hint - req["arguments"] = args - else: - # Unified test format - bulk_model, spec = next(iter(req.items())) - if "hint" in spec: - hint = spec.pop("hint") - if not isinstance(hint, str): - hint = list(hint.items()) - spec["hint"] = hint - opts["requests"] = reqs - - return dict(opts) - - -def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): - for arg_name in list(arguments): - c2s = camel_to_snake(arg_name) - # PyMongo accepts sort as list of tuples. - if arg_name == "sort": - sort_dict = arguments[arg_name] - arguments[arg_name] = list(sort_dict.items()) - # Named "key" instead not fieldName. - if arg_name == "fieldName": - arguments["key"] = arguments.pop(arg_name) - # Aggregate uses "batchSize", while find uses batch_size. - elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": - continue - elif arg_name == "timeoutMode": - raise unittest.SkipTest("PyMongo does not support timeoutMode") - # Requires boolean returnDocument. - elif arg_name == "returnDocument": - arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) - elif c2s == "requests": - # Parse each request into a bulk write model. - requests = [] - for request in arguments["requests"]: - if "name" in request: - # CRUD v2 format - bulk_model = camel_to_upper_camel(request["name"]) - bulk_class = getattr(operations, bulk_model) - bulk_arguments = camel_to_snake_args(request["arguments"]) - else: - # Unified test format - bulk_model, spec = next(iter(request.items())) - bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) - bulk_arguments = camel_to_snake_args(spec) - requests.append(bulk_class(**dict(bulk_arguments))) - arguments["requests"] = requests - elif arg_name == "session": - arguments["session"] = entity_map[arguments["session"]] - elif opname == "open_download_stream" and arg_name == "id": - arguments["file_id"] = arguments.pop(arg_name) - elif opname not in ("find", "find_one") and c2s == "max_time_ms": - # find is the only method that accepts snake_case max_time_ms. - # All other methods take kwargs which must use the server's - # camelCase maxTimeMS. See PYTHON-1855. - arguments["maxTimeMS"] = arguments.pop("max_time_ms") - elif opname == "with_transaction" and arg_name == "callback": - if "operations" in arguments[arg_name]: - # CRUD v2 format - callback_ops = arguments[arg_name]["operations"] - else: - # Unified test format - callback_ops = arguments[arg_name] - arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) - elif opname == "drop_collection" and arg_name == "collection": - arguments["name_or_collection"] = arguments.pop(arg_name) - elif opname == "create_collection": - if arg_name == "collection": - arguments["name"] = arguments.pop(arg_name) - arguments["check_exists"] = False - # Any other arguments to create_collection are passed through - # **kwargs. - elif opname == "create_index" and arg_name == "keys": - arguments["keys"] = list(arguments.pop(arg_name).items()) - elif opname == "drop_index" and arg_name == "name": - arguments["index_or_name"] = arguments.pop(arg_name) - elif opname == "rename" and arg_name == "to": - arguments["new_name"] = arguments.pop(arg_name) - elif arg_name == "cursorType": - cursor_type = arguments.pop(arg_name) - if cursor_type == "tailable": - arguments["cursor_type"] = CursorType.TAILABLE - elif cursor_type == "tailableAwait": - arguments["cursor_type"] = CursorType.TAILABLE - else: - assert False, f"Unsupported cursorType: {cursor_type}" - else: - arguments[c2s] = arguments.pop(arg_name) diff --git a/test/utils_selection_tests.py b/test/utils_selection_tests.py index e693fc25f0..2772f06070 100644 --- a/test/utils_selection_tests.py +++ b/test/utils_selection_tests.py @@ -13,105 +13,42 @@ # limitations under the License. """Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations import datetime import os import sys +from test import PyMongoTestCase +from test.utils import MockPool sys.path[0:0] = [""] from test import unittest from test.pymongo_mocks import DummyMonitor -from test.utils import MockPool, parse_read_preference +from test.utils_selection_tests_shared import ( + get_addresses, + get_topology_type_name, + make_server_description, +) +from test.utils_shared import parse_read_preference from bson import json_util -from pymongo.common import HEARTBEAT_FREQUENCY, clean_node +from pymongo.common import HEARTBEAT_FREQUENCY from pymongo.errors import AutoReconnect, ConfigurationError -from pymongo.hello import Hello, HelloCompat -from pymongo.server_description import ServerDescription +from pymongo.operations import _Op from pymongo.server_selectors import writable_server_selector -from pymongo.settings import TopologySettings -from pymongo.topology import Topology - - -def get_addresses(server_list): - seeds = [] - hosts = [] - for server in server_list: - seeds.append(clean_node(server["address"])) - hosts.append(server["address"]) - return seeds, hosts - - -def make_last_write_date(server): - epoch = datetime.datetime.utcfromtimestamp(0) - millis = server.get("lastWrite", {}).get("lastWriteDate") - if millis: - diff = ((millis % 1000) + 1000) % 1000 - seconds = (millis - diff) / 1000 - micros = diff * 1000 - return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) - else: - # "Unknown" server. - return epoch - - -def make_server_description(server, hosts): - """Make a ServerDescription from server info in a JSON test.""" - server_type = server["type"] - if server_type in ("Unknown", "PossiblePrimary"): - return ServerDescription(clean_node(server["address"]), Hello({})) - - hello_response = {"ok": True, "hosts": hosts} - if server_type not in ("Standalone", "Mongos", "RSGhost"): - hello_response["setName"] = "rs" - - if server_type == "RSPrimary": - hello_response[HelloCompat.LEGACY_CMD] = True - elif server_type == "RSSecondary": - hello_response["secondary"] = True - elif server_type == "Mongos": - hello_response["msg"] = "isdbgrid" - elif server_type == "RSGhost": - hello_response["isreplicaset"] = True - elif server_type == "RSArbiter": - hello_response["arbiterOnly"] = True - - hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} - - for field in "maxWireVersion", "tags", "idleWritePeriodMillis": - if field in server: - hello_response[field] = server[field] - - hello_response.setdefault("maxWireVersion", 6) - - # Sets _last_update_time to now. - sd = ServerDescription( - clean_node(server["address"]), - Hello(hello_response), - round_trip_time=server["avg_rtt_ms"] / 1000.0, - ) - - if "lastUpdateTime" in server: - sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. - - return sd - - -def get_topology_type_name(scenario_def): - td = scenario_def["topology_description"] - name = td["type"] - if name == "Unknown": - # PyMongo never starts a topology in type Unknown. - return "Sharded" if len(td["servers"]) > 1 else "Single" - else: - return name +from pymongo.synchronous.settings import TopologySettings +from pymongo.synchronous.topology import Topology + +_IS_SYNC = True def get_topology_settings_dict(**kwargs): - settings = dict( - monitor_class=DummyMonitor, heartbeat_frequency=HEARTBEAT_FREQUENCY, pool_class=MockPool - ) + settings = { + "monitor_class": DummyMonitor, + "heartbeat_frequency": HEARTBEAT_FREQUENCY, + "pool_class": MockPool, + } settings.update(kwargs) return settings @@ -175,7 +112,7 @@ def run_scenario(self): with self.assertRaises((ConfigurationError, ValueError)): # Error can be raised when making Read Pref or selecting. pref = parse_read_preference(pref_def) - top_latency.select_server(pref) + top_latency.select_server(pref, _Op.TEST) return pref = parse_read_preference(pref_def) @@ -183,18 +120,18 @@ def run_scenario(self): # Select servers. if not scenario_def.get("suitable_servers"): with self.assertRaises(AutoReconnect): - top_suitable.select_server(pref, server_selection_timeout=0) + top_suitable.select_server(pref, _Op.TEST, server_selection_timeout=0) return if not scenario_def["in_latency_window"]: with self.assertRaises(AutoReconnect): - top_latency.select_server(pref, server_selection_timeout=0) + top_latency.select_server(pref, _Op.TEST, server_selection_timeout=0) return - actual_suitable_s = top_suitable.select_servers(pref, server_selection_timeout=0) - actual_latency_s = top_latency.select_servers(pref, server_selection_timeout=0) + actual_suitable_s = top_suitable.select_servers(pref, _Op.TEST, server_selection_timeout=0) + actual_latency_s = top_latency.select_servers(pref, _Op.TEST, server_selection_timeout=0) expected_suitable_servers = {} for server in scenario_def["suitable_servers"]: @@ -240,7 +177,7 @@ def run_scenario(self): def create_selection_tests(test_dir): - class TestAllScenarios(unittest.TestCase): + class TestAllScenarios(PyMongoTestCase): pass for dirpath, _, filenames in os.walk(test_dir): @@ -255,7 +192,7 @@ class TestAllScenarios(unittest.TestCase): # Construct test from scenario. new_test = create_test(scenario_def) - test_name = "test_%s_%s" % (dirname, os.path.splitext(filename)[0]) + test_name = f"test_{dirname}_{os.path.splitext(filename)[0]}" new_test.__name__ = test_name setattr(TestAllScenarios, new_test.__name__, new_test) diff --git a/test/utils_selection_tests_shared.py b/test/utils_selection_tests_shared.py new file mode 100644 index 0000000000..dbaed1034f --- /dev/null +++ b/test/utils_selection_tests_shared.py @@ -0,0 +1,100 @@ +# Copyright 2015-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for testing Server Selection and Max Staleness.""" +from __future__ import annotations + +import datetime +import os +import sys + +sys.path[0:0] = [""] + +from pymongo.common import MIN_SUPPORTED_WIRE_VERSION, clean_node +from pymongo.hello import Hello, HelloCompat +from pymongo.server_description import ServerDescription + + +def get_addresses(server_list): + seeds = [] + hosts = [] + for server in server_list: + seeds.append(clean_node(server["address"])) + hosts.append(server["address"]) + return seeds, hosts + + +def make_last_write_date(server): + epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc).replace(tzinfo=None) + millis = server.get("lastWrite", {}).get("lastWriteDate") + if millis: + diff = ((millis % 1000) + 1000) % 1000 + seconds = (millis - diff) / 1000 + micros = diff * 1000 + return epoch + datetime.timedelta(seconds=seconds, microseconds=micros) + else: + # "Unknown" server. + return epoch + + +def make_server_description(server, hosts): + """Make a ServerDescription from server info in a JSON test.""" + server_type = server["type"] + if server_type in ("Unknown", "PossiblePrimary"): + return ServerDescription(clean_node(server["address"]), Hello({})) + + hello_response = {"ok": True, "hosts": hosts} + if server_type not in ("Standalone", "Mongos", "RSGhost"): + hello_response["setName"] = "rs" + + if server_type == "RSPrimary": + hello_response[HelloCompat.LEGACY_CMD] = True + elif server_type == "RSSecondary": + hello_response["secondary"] = True + elif server_type == "Mongos": + hello_response["msg"] = "isdbgrid" + elif server_type == "RSGhost": + hello_response["isreplicaset"] = True + elif server_type == "RSArbiter": + hello_response["arbiterOnly"] = True + + hello_response["lastWrite"] = {"lastWriteDate": make_last_write_date(server)} + + for field in "maxWireVersion", "tags", "idleWritePeriodMillis": + if field in server: + hello_response[field] = server[field] + + hello_response.setdefault("maxWireVersion", MIN_SUPPORTED_WIRE_VERSION) + + # Sets _last_update_time to now. + sd = ServerDescription( + clean_node(server["address"]), + Hello(hello_response), + round_trip_time=server["avg_rtt_ms"] / 1000.0, + ) + + if "lastUpdateTime" in server: + sd._last_update_time = server["lastUpdateTime"] / 1000.0 # ms to sec. + + return sd + + +def get_topology_type_name(scenario_def): + td = scenario_def["topology_description"] + name = td["type"] + if name == "Unknown": + # PyMongo never starts a topology in type Unknown. + return "Sharded" if len(td["servers"]) > 1 else "Single" + else: + return name diff --git a/test/utils_shared.py b/test/utils_shared.py new file mode 100644 index 0000000000..72fb943fc1 --- /dev/null +++ b/test/utils_shared.py @@ -0,0 +1,702 @@ +# Copyright 2012-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared utilities for testing pymongo""" +from __future__ import annotations + +import asyncio +import contextlib +import copy +import functools +import random +import re +import shutil +import sys +import threading +import unittest +import warnings +from collections import abc, defaultdict +from functools import partial +from inspect import iscoroutinefunction +from test import client_context +from test.asynchronous.utils import async_wait_until +from test.utils import wait_until +from typing import List + +from bson.objectid import ObjectId +from pymongo import monitoring, operations, read_preferences +from pymongo.cursor_shared import CursorType +from pymongo.errors import OperationFailure +from pymongo.helpers_shared import _SENSITIVE_COMMANDS +from pymongo.lock import _async_create_lock, _create_lock +from pymongo.monitoring import ( + ConnectionCheckedInEvent, + ConnectionCheckedOutEvent, + ConnectionCheckOutFailedEvent, + ConnectionCheckOutStartedEvent, + ConnectionClosedEvent, + ConnectionCreatedEvent, + ConnectionReadyEvent, + PoolClearedEvent, + PoolClosedEvent, + PoolCreatedEvent, + PoolReadyEvent, +) +from pymongo.read_concern import ReadConcern +from pymongo.server_type import SERVER_TYPE +from pymongo.synchronous.collection import ReturnDocument +from pymongo.synchronous.pool import _CancellationContext, _PoolGeneration +from pymongo.write_concern import WriteConcern + +IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=50) + + +class BaseListener: + def __init__(self): + self.events = [] + + def reset(self): + self.events = [] + + def add_event(self, event): + self.events.append(event) + + def event_count(self, event_type): + return len(self.events_by_type(event_type)) + + def events_by_type(self, event_type): + """Return the matching events by event class. + + event_type can be a single class or a tuple of classes. + """ + return self.matching(lambda e: isinstance(e, event_type)) + + def matching(self, matcher): + """Return the matching events.""" + return [event for event in self.events[:] if matcher(event)] + + def wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + wait_until(lambda: self.event_count(event) >= count, f"find {count} {event} event(s)") + + async def async_wait_for_event(self, event, count): + """Wait for a number of events to be published, or fail.""" + await async_wait_until( + lambda: self.event_count(event) >= count, f"find {count} {event} event(s)" + ) + + +class CMAPListener(BaseListener, monitoring.ConnectionPoolListener): + def connection_created(self, event): + assert isinstance(event, ConnectionCreatedEvent) + self.add_event(event) + + def connection_ready(self, event): + assert isinstance(event, ConnectionReadyEvent) + self.add_event(event) + + def connection_closed(self, event): + assert isinstance(event, ConnectionClosedEvent) + self.add_event(event) + + def connection_check_out_started(self, event): + assert isinstance(event, ConnectionCheckOutStartedEvent) + self.add_event(event) + + def connection_check_out_failed(self, event): + assert isinstance(event, ConnectionCheckOutFailedEvent) + self.add_event(event) + + def connection_checked_out(self, event): + assert isinstance(event, ConnectionCheckedOutEvent) + self.add_event(event) + + def connection_checked_in(self, event): + assert isinstance(event, ConnectionCheckedInEvent) + self.add_event(event) + + def pool_created(self, event): + assert isinstance(event, PoolCreatedEvent) + self.add_event(event) + + def pool_ready(self, event): + assert isinstance(event, PoolReadyEvent) + self.add_event(event) + + def pool_cleared(self, event): + assert isinstance(event, PoolClearedEvent) + self.add_event(event) + + def pool_closed(self, event): + assert isinstance(event, PoolClosedEvent) + self.add_event(event) + + +class EventListener(BaseListener, monitoring.CommandListener): + def __init__(self): + super().__init__() + self.results = defaultdict(list) + + @property + def started_events(self) -> List[monitoring.CommandStartedEvent]: + return self.results["started"] + + @property + def succeeded_events(self) -> List[monitoring.CommandSucceededEvent]: + return self.results["succeeded"] + + @property + def failed_events(self) -> List[monitoring.CommandFailedEvent]: + return self.results["failed"] + + def started(self, event: monitoring.CommandStartedEvent) -> None: + self.started_events.append(event) + self.add_event(event) + + def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: + self.succeeded_events.append(event) + self.add_event(event) + + def failed(self, event: monitoring.CommandFailedEvent) -> None: + self.failed_events.append(event) + self.add_event(event) + + def started_command_names(self) -> List[str]: + """Return list of command names started.""" + return [event.command_name for event in self.started_events] + + def reset(self) -> None: + """Reset the state of this listener.""" + self.results.clear() + super().reset() + + +class TopologyEventListener(monitoring.TopologyListener): + def __init__(self): + self.results = defaultdict(list) + + def closed(self, event): + self.results["closed"].append(event) + + def description_changed(self, event): + self.results["description_changed"].append(event) + + def opened(self, event): + self.results["opened"].append(event) + + def reset(self): + """Reset the state of this listener.""" + self.results.clear() + + +class AllowListEventListener(EventListener): + def __init__(self, *commands): + self.commands = set(commands) + super().__init__() + + def started(self, event): + if event.command_name in self.commands: + super().started(event) + + def succeeded(self, event): + if event.command_name in self.commands: + super().succeeded(event) + + def failed(self, event): + if event.command_name in self.commands: + super().failed(event) + + +class OvertCommandListener(EventListener): + """A CommandListener that ignores sensitive commands.""" + + ignore_list_collections = False + + def started(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().started(event) + + def succeeded(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().succeeded(event) + + def failed(self, event): + if event.command_name.lower() not in _SENSITIVE_COMMANDS: + super().failed(event) + + +class _ServerEventListener: + """Listens to all events.""" + + def __init__(self): + self.results = [] + + def opened(self, event): + self.results.append(event) + + def description_changed(self, event): + self.results.append(event) + + def closed(self, event): + self.results.append(event) + + def matching(self, matcher): + """Return the matching events.""" + results = self.results[:] + return [event for event in results if matcher(event)] + + def reset(self): + self.results = [] + + +class ServerEventListener(_ServerEventListener, monitoring.ServerListener): + """Listens to Server events.""" + + +class ServerAndTopologyEventListener( # type: ignore[misc] + ServerEventListener, monitoring.TopologyListener +): + """Listens to Server and Topology events.""" + + +class HeartbeatEventListener(BaseListener, monitoring.ServerHeartbeatListener): + """Listens to only server heartbeat events.""" + + def started(self, event): + self.add_event(event) + + def succeeded(self, event): + self.add_event(event) + + def failed(self, event): + self.add_event(event) + + +class HeartbeatEventsListListener(HeartbeatEventListener): + """Listens to only server heartbeat events and publishes them to a provided list.""" + + def __init__(self, events): + super().__init__() + self.event_list = events + + def started(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatStartedEvent") + + def succeeded(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatSucceededEvent") + + def failed(self, event): + self.add_event(event) + self.event_list.append("serverHeartbeatFailedEvent") + + +class ScenarioDict(dict): + """Dict that returns {} for any unknown key, recursively.""" + + def __init__(self, data): + def convert(v): + if isinstance(v, abc.Mapping): + return ScenarioDict(v) + if isinstance(v, (str, bytes)): + return v + if isinstance(v, abc.Sequence): + return [convert(item) for item in v] + return v + + dict.__init__(self, [(k, convert(v)) for k, v in data.items()]) + + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + # Unlike a defaultdict, don't set the key, just return a dict. + return ScenarioDict({}) + + +class CompareType: + """Class that compares equal to any object of the given type(s).""" + + def __init__(self, types): + self.types = types + + def __eq__(self, other): + return isinstance(other, self.types) + + +class FunctionCallRecorder: + """Utility class to wrap a callable and record its invocations.""" + + def __init__(self, function): + self._function = function + self._call_list = [] + + def __call__(self, *args, **kwargs): + self._call_list.append((args, kwargs)) + if iscoroutinefunction(self._function): + return self._function(*args, **kwargs) + else: + return self._function(*args, **kwargs) + + def reset(self): + """Wipes the call list.""" + self._call_list = [] + + def call_list(self): + """Returns a copy of the call list.""" + return self._call_list[:] + + @property + def call_count(self): + """Returns the number of times the function has been called.""" + return len(self._call_list) + + +def one(s): + """Get one element of a set""" + return next(iter(s)) + + +def oid_generated_on_process(oid): + """Makes a determination as to whether the given ObjectId was generated + by the current process, based on the 5-byte random number in the ObjectId. + """ + return ObjectId._random() == oid.binary[4:9] + + +def delay(sec): + return """function() { sleep(%f * 1000); return true; }""" % sec + + +def camel_to_snake(camel): + # Regex to convert CamelCase to snake_case. + snake = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel) + return re.sub("([a-z0-9])([A-Z])", r"\1_\2", snake).lower() + + +def camel_to_upper_camel(camel): + return camel[0].upper() + camel[1:] + + +def camel_to_snake_args(arguments): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + arguments[c2s] = arguments.pop(arg_name) + return arguments + + +def snake_to_camel(snake): + # Regex to convert snake_case to lowerCamelCase. + return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), snake) + + +def parse_collection_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + opts["write_concern"] = WriteConcern(**dict(opts.pop("writeConcern"))) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + return opts + + +@contextlib.contextmanager +def _ignore_deprecations(): + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + yield + + +def ignore_deprecations(wrapped=None): + """A context manager or a decorator.""" + if wrapped: + if iscoroutinefunction(wrapped): + + @functools.wraps(wrapped) + async def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return await wrapped(*args, **kwargs) + else: + + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + with _ignore_deprecations(): + return wrapped(*args, **kwargs) + + return wrapper + + else: + return _ignore_deprecations() + + +class DeprecationFilter: + def __init__(self, action="ignore"): + """Start filtering deprecations.""" + self.warn_context = warnings.catch_warnings() + self.warn_context.__enter__() + warnings.simplefilter(action, DeprecationWarning) + + def stop(self): + """Stop filtering deprecations.""" + self.warn_context.__exit__() # type: ignore + self.warn_context = None # type: ignore + + +# Constants for run_threads and lazy_client_trial. +NTRIALS = 5 +NTHREADS = 10 + + +def run_threads(collection, target): + """Run a target function in many threads. + + target is a function taking a Collection and an integer. + """ + threads = [] + for i in range(NTHREADS): + bound_target = partial(target, collection, i) + threads.append(threading.Thread(target=bound_target)) + + for t in threads: + t.start() + + for t in threads: + t.join(60) + assert not t.is_alive() + + +@contextlib.contextmanager +def frequent_thread_switches(): + """Make concurrency bugs more likely to manifest.""" + interval = sys.getswitchinterval() + sys.setswitchinterval(1e-6) + + try: + yield + finally: + sys.setswitchinterval(interval) + + +def lazy_client_trial(reset, target, test, get_client): + """Test concurrent operations on a lazily-connecting client. + + `reset` takes a collection and resets it for the next trial. + + `target` takes a lazily-connecting collection and an index from + 0 to NTHREADS, and performs some operation, e.g. an insert. + + `test` takes the lazily-connecting collection and asserts a + post-condition to prove `target` succeeded. + """ + collection = client_context.client.pymongo_test.test + + with frequent_thread_switches(): + for _i in range(NTRIALS): + reset(collection) + lazy_client = get_client() + lazy_collection = lazy_client.pymongo_test.test + run_threads(lazy_collection, target) + test(lazy_collection) + + +def gevent_monkey_patched(): + """Check if gevent's monkey patching is active.""" + try: + import socket + + import gevent.socket # type:ignore[import] + + return socket.socket is gevent.socket.socket + except ImportError: + return False + + +def is_greenthread_patched(): + return gevent_monkey_patched() + + +def parse_read_preference(pref): + # Make first letter lowercase to match read_pref's modes. + mode_string = pref.get("mode", "primary") + mode_string = mode_string[:1].lower() + mode_string[1:] + mode = read_preferences.read_pref_mode_from_name(mode_string) + max_staleness = pref.get("maxStalenessSeconds", -1) + tag_sets = pref.get("tagSets") or pref.get("tag_sets") + return read_preferences.make_read_preference( + mode, tag_sets=tag_sets, max_staleness=max_staleness + ) + + +def server_name_to_type(name): + """Convert a ServerType name to the corresponding value. For SDAM tests.""" + # Special case, some tests in the spec include the PossiblePrimary + # type, but only single-threaded drivers need that type. We call + # possible primaries Unknown. + if name == "PossiblePrimary": + return SERVER_TYPE.Unknown + return getattr(SERVER_TYPE, name) + + +def cat_files(dest, *sources): + """Cat multiple files into dest.""" + with open(dest, "wb") as fdst: + for src in sources: + with open(src, "rb") as fsrc: + shutil.copyfileobj(fsrc, fdst) + + +@contextlib.contextmanager +def assertion_context(msg): + """A context manager that adds info to an assertion failure.""" + try: + yield + except AssertionError as exc: + raise AssertionError(f"{msg}: {exc}") + + +def parse_spec_options(opts): + if "readPreference" in opts: + opts["read_preference"] = parse_read_preference(opts.pop("readPreference")) + + if "writeConcern" in opts: + w_opts = opts.pop("writeConcern") + if "journal" in w_opts: + w_opts["j"] = w_opts.pop("journal") + if "wtimeoutMS" in w_opts: + w_opts["wtimeout"] = w_opts.pop("wtimeoutMS") + opts["write_concern"] = WriteConcern(**dict(w_opts)) + + if "readConcern" in opts: + opts["read_concern"] = ReadConcern(**dict(opts.pop("readConcern"))) + + if "timeoutMS" in opts: + assert isinstance(opts["timeoutMS"], int) + opts["timeout"] = int(opts.pop("timeoutMS")) / 1000.0 + + if "maxTimeMS" in opts: + opts["max_time_ms"] = opts.pop("maxTimeMS") + + if "maxCommitTimeMS" in opts: + opts["max_commit_time_ms"] = opts.pop("maxCommitTimeMS") + + return dict(opts) + + +def prepare_spec_arguments(spec, arguments, opname, entity_map, with_txn_callback): + for arg_name in list(arguments): + c2s = camel_to_snake(arg_name) + # Named "key" instead not fieldName. + if arg_name == "fieldName": + arguments["key"] = arguments.pop(arg_name) + # Aggregate uses "batchSize", while find uses batch_size. + elif (arg_name == "batchSize" or arg_name == "allowDiskUse") and opname == "aggregate": + continue + elif arg_name == "bypassDocumentValidation" and ( + opname == "aggregate" or "find_one_and" in opname + ): + continue + elif arg_name == "timeoutMode": + raise unittest.SkipTest("PyMongo does not support timeoutMode") + # Requires boolean returnDocument. + elif arg_name == "returnDocument": + arguments[c2s] = getattr(ReturnDocument, arguments.pop(arg_name).upper()) + elif "bulk_write" in opname and (c2s == "requests" or c2s == "models"): + # Parse each request into a bulk write model. + requests = [] + for request in arguments[c2s]: + if "name" in request: + # CRUD v2 format + bulk_model = camel_to_upper_camel(request["name"]) + bulk_class = getattr(operations, bulk_model) + bulk_arguments = camel_to_snake_args(request["arguments"]) + else: + # Unified test format + bulk_model, spec = next(iter(request.items())) + bulk_class = getattr(operations, camel_to_upper_camel(bulk_model)) + bulk_arguments = camel_to_snake_args(spec) + requests.append(bulk_class(**dict(bulk_arguments))) + arguments[c2s] = requests + elif arg_name == "session": + arguments["session"] = entity_map[arguments["session"]] + elif opname == "open_download_stream" and arg_name == "id": + arguments["file_id"] = arguments.pop(arg_name) + elif opname not in ("find", "find_one") and c2s == "max_time_ms": + # find is the only method that accepts snake_case max_time_ms. + # All other methods take kwargs which must use the server's + # camelCase maxTimeMS. See PYTHON-1855. + arguments["maxTimeMS"] = arguments.pop("max_time_ms") + elif opname == "with_transaction" and arg_name == "callback": + if "operations" in arguments[arg_name]: + # CRUD v2 format + callback_ops = arguments[arg_name]["operations"] + else: + # Unified test format + callback_ops = arguments[arg_name] + arguments["callback"] = lambda _: with_txn_callback(copy.deepcopy(callback_ops)) + elif opname == "drop_collection" and arg_name == "collection": + arguments["name_or_collection"] = arguments.pop(arg_name) + elif opname == "create_collection": + if arg_name == "collection": + arguments["name"] = arguments.pop(arg_name) + arguments["check_exists"] = False + # Any other arguments to create_collection are passed through + # **kwargs. + elif opname == "create_index" and arg_name == "keys": + arguments["keys"] = list(arguments.pop(arg_name).items()) + elif opname == "drop_index" and arg_name == "name": + arguments["index_or_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "to": + arguments["new_name"] = arguments.pop(arg_name) + elif opname == "rename" and arg_name == "dropTarget": + arguments["dropTarget"] = arguments.pop(arg_name) + elif arg_name == "cursorType": + cursor_type = arguments.pop(arg_name) + if cursor_type == "tailable": + arguments["cursor_type"] = CursorType.TAILABLE + elif cursor_type == "tailableAwait": + arguments["cursor_type"] = CursorType.TAILABLE + else: + raise AssertionError(f"Unsupported cursorType: {cursor_type}") + else: + arguments[c2s] = arguments.pop(arg_name) + + +def create_async_event(): + return asyncio.Event() + + +def create_event(): + return threading.Event() + + +def async_create_barrier(n_tasks: int): + return asyncio.Barrier(n_tasks) + + +def create_barrier(n_tasks: int, timeout: float | None = None): + return threading.Barrier(n_tasks, timeout=timeout) + + +async def async_barrier_wait(barrier, timeout: float | None = None): + await asyncio.wait_for(barrier.wait(), timeout=timeout) + + +def barrier_wait(barrier, timeout: float | None = None): + barrier.wait(timeout=timeout) diff --git a/test/utils_spec_runner.py b/test/utils_spec_runner.py index f8ad26efe7..46adeaefb5 100644 --- a/test/utils_spec_runner.py +++ b/test/utils_spec_runner.py @@ -13,49 +13,57 @@ # limitations under the License. """Utilities for testing driver specs.""" +from __future__ import annotations +import asyncio import functools -import threading +import os +import time +import unittest from collections import abc +from inspect import iscoroutinefunction from test import IntegrationTest, client_context, client_knobs -from test.utils import ( +from test.helpers import ConcurrentRunner +from test.utils_shared import ( CMAPListener, CompareType, EventListener, OvertCommandListener, + ScenarioDict, ServerAndTopologyEventListener, camel_to_snake, camel_to_snake_args, parse_spec_options, prepare_spec_arguments, - rs_client, ) from typing import List -from bson import decode, encode +from bson import ObjectId, decode, encode, json_util from bson.binary import Binary from bson.int64 import Int64 from bson.son import SON from gridfs import GridFSBucket -from pymongo import client_session -from pymongo.command_cursor import CommandCursor -from pymongo.cursor import Cursor -from pymongo.errors import BulkWriteError, OperationFailure, PyMongoError +from gridfs.synchronous.grid_file import GridFSBucket +from pymongo.errors import AutoReconnect, BulkWriteError, OperationFailure, PyMongoError +from pymongo.lock import _cond_wait, _create_condition, _create_lock from pymongo.read_concern import ReadConcern from pymongo.read_preferences import ReadPreference from pymongo.results import BulkWriteResult, _WriteResult +from pymongo.synchronous import client_session +from pymongo.synchronous.command_cursor import CommandCursor +from pymongo.synchronous.cursor import Cursor from pymongo.write_concern import WriteConcern +_IS_SYNC = True -class SpecRunnerThread(threading.Thread): + +class SpecRunnerThread(ConcurrentRunner): def __init__(self, name): - super(SpecRunnerThread, self).__init__() - self.name = name + super().__init__(name=name) self.exc = None - self.setDaemon(True) - self.cond = threading.Condition() + self.daemon = True + self.cond = _create_condition(_create_lock()) self.ops = [] - self.stopped = False def schedule(self, work): self.ops.append(work) @@ -71,7 +79,7 @@ def run(self): while not self.stopped or self.ops: if not self.ops: with self.cond: - self.cond.wait(10) + _cond_wait(self.cond, 10) if self.ops: try: work = self.ops.pop(0) @@ -81,42 +89,163 @@ def run(self): self.stop() +class SpecTestCreator: + """Class to create test cases from specifications.""" + + def __init__(self, create_test, test_class, test_path): + """Create a TestCreator object. + + :Parameters: + - `create_test`: callback that returns a test case. The callback + must accept the following arguments - a dictionary containing the + entire test specification (the `scenario_def`), a dictionary + containing the specification for which the test case will be + generated (the `test_def`). + - `test_class`: the unittest.TestCase class in which to create the + test case. + - `test_path`: path to the directory containing the JSON files with + the test specifications. + """ + self._create_test = create_test + self._test_class = test_class + self.test_path = test_path + + def _ensure_min_max_server_version(self, scenario_def, method): + """Test modifier that enforces a version range for the server on a + test case. + """ + if "minServerVersion" in scenario_def: + min_ver = tuple(int(elt) for elt in scenario_def["minServerVersion"].split(".")) + if min_ver is not None: + method = client_context.require_version_min(*min_ver)(method) + + if "maxServerVersion" in scenario_def: + max_ver = tuple(int(elt) for elt in scenario_def["maxServerVersion"].split(".")) + if max_ver is not None: + method = client_context.require_version_max(*max_ver)(method) + + return method + + @staticmethod + def valid_topology(run_on_req): + return client_context.is_topology_type( + run_on_req.get("topology", ["single", "replicaset", "sharded", "load-balanced"]) + ) + + @staticmethod + def min_server_version(run_on_req): + version = run_on_req.get("minServerVersion") + if version: + min_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version >= min_ver + return True + + @staticmethod + def max_server_version(run_on_req): + version = run_on_req.get("maxServerVersion") + if version: + max_ver = tuple(int(elt) for elt in version.split(".")) + return client_context.version <= max_ver + return True + + @staticmethod + def valid_auth_enabled(run_on_req): + if "authEnabled" in run_on_req: + if run_on_req["authEnabled"]: + return client_context.auth_enabled + return not client_context.auth_enabled + return True + + def should_run_on(self, scenario_def): + run_on = scenario_def.get("runOn", []) + if not run_on: + # Always run these tests. + return True + + for req in run_on: + if ( + self.valid_topology(req) + and self.min_server_version(req) + and self.max_server_version(req) + and self.valid_auth_enabled(req) + ): + return True + return False + + def ensure_run_on(self, scenario_def, method): + """Test modifier that enforces a 'runOn' on a test case.""" + + def predicate(): + return self.should_run_on(scenario_def) + + return client_context._require(predicate, "runOn not satisfied", method) + + def tests(self, scenario_def): + """Allow CMAP spec test to override the location of test.""" + return scenario_def["tests"] + + def _create_tests(self): + for dirpath, _, filenames in os.walk(self.test_path): + dirname = os.path.split(dirpath)[-1] + + for filename in filenames: + with open(os.path.join(dirpath, filename)) as scenario_stream: # noqa: ASYNC101, RUF100 + # Use tz_aware=False to match how CodecOptions decodes + # dates. + opts = json_util.JSONOptions(tz_aware=False) + scenario_def = ScenarioDict( + json_util.loads(scenario_stream.read(), json_options=opts) + ) + + test_type = os.path.splitext(filename)[0] + + # Construct test from scenario. + for test_def in self.tests(scenario_def): + test_name = "test_{}_{}_{}".format( + dirname, + test_type.replace("-", "_").replace(".", "_"), + str(test_def["description"].replace(" ", "_").replace(".", "_")), + ) + + new_test = self._create_test(scenario_def, test_def, test_name) + new_test = self._ensure_min_max_server_version(scenario_def, new_test) + new_test = self.ensure_run_on(scenario_def, new_test) + + new_test.__name__ = test_name + setattr(self._test_class, new_test.__name__, new_test) + + def create_tests(self): + if _IS_SYNC: + self._create_tests() + else: + asyncio.run(self._create_tests()) + + class SpecRunner(IntegrationTest): mongos_clients: List knobs: client_knobs listener: EventListener - @classmethod - def setUpClass(cls): - super(SpecRunner, cls).setUpClass() - cls.mongos_clients = [] + def setUp(self) -> None: + super().setUp() + self.mongos_clients = [] # Speed up the tests by decreasing the heartbeat frequency. - cls.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) - cls.knobs.enable() - - @classmethod - def tearDownClass(cls): - cls.knobs.disable() - super(SpecRunner, cls).tearDownClass() - - def setUp(self): - super(SpecRunner, self).setUp() + self.knobs = client_knobs(heartbeat_frequency=0.1, min_heartbeat_interval=0.1) + self.knobs.enable() self.targets = {} self.listener = None # type: ignore self.pool_listener = None self.server_listener = None self.maxDiff = None - def _set_fail_point(self, client, command_args): - cmd = SON([("configureFailPoint", "failCommand")]) - cmd.update(command_args) - client.admin.command(cmd) + def tearDown(self) -> None: + self.knobs.disable() def set_fail_point(self, command_args): clients = self.mongos_clients if self.mongos_clients else [self.client] for client in clients: - self._set_fail_point(client, command_args) + self.configure_fail_point(client, command_args) def targeted_fail_point(self, session, fail_point): """Run the targetedFailPoint test operation. @@ -125,7 +254,7 @@ def targeted_fail_point(self, session, fail_point): """ clients = {c.address: c for c in self.mongos_clients} client = clients[session._pinned_address] - self._set_fail_point(client, fail_point) + self.configure_fail_point(client, fail_point) self.addCleanup(self.set_fail_point, {"mode": "off"}) def assert_session_pinned(self, session): @@ -163,6 +292,10 @@ def assert_index_not_exists(self, database, collection, index): coll = self.client[database][collection] self.assertNotIn(index, [doc["name"] for doc in coll.list_indexes()]) + def wait(self, ms): + """Run the "wait" test operation.""" + time.sleep(ms / 1000.0) + def assertErrorLabelsContain(self, exc, expected_labels): labels = [l for l in expected_labels if exc.has_error_label(l)] self.assertEqual(labels, expected_labels) @@ -170,7 +303,7 @@ def assertErrorLabelsContain(self, exc, expected_labels): def assertErrorLabelsOmit(self, exc, omit_labels): for label in omit_labels: self.assertFalse( - exc.has_error_label(label), msg="error labels should not contain %s" % (label,) + exc.has_error_label(label), msg=f"error labels should not contain {label}" ) def kill_all_sessions(self): @@ -178,9 +311,10 @@ def kill_all_sessions(self): for client in clients: try: client.admin.command("killAllSessions", []) - except OperationFailure: + except (OperationFailure, AutoReconnect): # "operation was interrupted" by killing the command's # own session. + # On 8.0+ killAllSessions sometimes returns a network error. pass def check_command_result(self, expected_result, result): @@ -242,6 +376,7 @@ def _helper(expected_result, result): self.assertEqual(expected_result, result) _helper(expected_result, result) + return None def get_object_name(self, op): """Allow subclasses to override handling of 'object' @@ -307,7 +442,10 @@ def run_operation(self, sessions, collection, operation): args.update(arguments) arguments = args - result = cmd(**dict(arguments)) + if not _IS_SYNC and iscoroutinefunction(cmd): + result = cmd(**dict(arguments)) + else: + result = cmd(**dict(arguments)) # Cleanup open change stream cursors. if name == "watch": self.addCleanup(result.close) @@ -323,7 +461,7 @@ def run_operation(self, sessions, collection, operation): result = Binary(result.read()) if isinstance(result, Cursor) or isinstance(result, CommandCursor): - return list(result) + return result.to_list() return result @@ -335,23 +473,25 @@ def _run_op(self, sessions, collection, op, in_with_transaction): expected_result = op.get("result") if expect_error(op): with self.assertRaises(self.allowable_errors(op), msg=op["name"]) as context: - out = self.run_operation(sessions, collection, op.copy()) + self.run_operation(sessions, collection, op.copy()) + exc = context.exception if expect_error_message(expected_result): - if isinstance(context.exception, BulkWriteError): - errmsg = str(context.exception.details).lower() + if isinstance(exc, BulkWriteError): + errmsg = str(exc.details).lower() else: - errmsg = str(context.exception).lower() + errmsg = str(exc).lower() self.assertIn(expected_result["errorContains"].lower(), errmsg) if expect_error_code(expected_result): - self.assertEqual( - expected_result["errorCodeName"], context.exception.details.get("codeName") - ) + self.assertEqual(expected_result["errorCodeName"], exc.details.get("codeName")) if expect_error_labels_contain(expected_result): - self.assertErrorLabelsContain( - context.exception, expected_result["errorLabelsContain"] - ) + self.assertErrorLabelsContain(exc, expected_result["errorLabelsContain"]) if expect_error_labels_omit(expected_result): - self.assertErrorLabelsOmit(context.exception, expected_result["errorLabelsOmit"]) + self.assertErrorLabelsOmit(exc, expected_result["errorLabelsOmit"]) + if expect_timeout_error(expected_result): + self.assertIsInstance(exc, PyMongoError) + if not exc.timeout: + # Re-raise the exception for better diagnostics. + raise exc # Reraise the exception if we're in the with_transaction # callback. @@ -371,16 +511,16 @@ def run_operations(self, sessions, collection, ops, in_with_transaction=False): # TODO: factor with test_command_monitoring.py def check_events(self, test, listener, session_ids): - res = listener.results + events = listener.started_events if not len(test["expectations"]): return # Give a nicer message when there are missing or extra events - cmds = decode_raw([event.command for event in res["started"]]) - self.assertEqual(len(res["started"]), len(test["expectations"]), cmds) + cmds = decode_raw([event.command for event in events]) + self.assertEqual(len(events), len(test["expectations"]), cmds) for i, expectation in enumerate(test["expectations"]): event_type = next(iter(expectation)) - event = res["started"][i] + event = events[i] # The tests substitute 42 for any number other than 0. if event.command_name == "getMore" and event.command["getMore"]: @@ -423,12 +563,12 @@ def check_events(self, test, listener, session_ids): for key, val in expected.items(): if val is None: if key in actual: - self.fail("Unexpected key [%s] in %r" % (key, actual)) + self.fail(f"Unexpected key [{key}] in {actual!r}") elif key not in actual: - self.fail("Expected key [%s] in %r" % (key, actual)) + self.fail(f"Expected key [{key}] in {actual!r}") else: self.assertEqual( - val, decode_raw(actual[key]), "Key [%s] in %s" % (key, actual) + val, decode_raw(actual[key]), f"Key [{key}] in {actual}" ) else: self.assertEqual(actual, expected) @@ -451,7 +591,8 @@ def get_outcome_coll_name(self, outcome, collection): def run_test_ops(self, sessions, collection, test): """Added to allow retryable writes spec to override a test's - operation.""" + operation. + """ self.run_operations(sessions, collection, test["operations"]) def parse_client_options(self, opts): @@ -507,25 +648,20 @@ def run_scenario(self, scenario_def, test): server_listener = ServerAndTopologyEventListener() # Create a new client, to avoid interference from pooled sessions. client_options = self.parse_client_options(test["clientOptions"]) - # MMAPv1 does not support retryable writes. - if client_options.get("retryWrites") is True and client_context.storage_engine == "mmapv1": - self.skipTest("MMAPv1 does not support retryWrites=True") use_multi_mongos = test["useMultipleMongoses"] host = None if use_multi_mongos: - if client_context.load_balancer or client_context.serverless: + if client_context.load_balancer: host = client_context.MULTI_MONGOS_LB_URI elif client_context.is_mongos: host = client_context.mongos_seeds() - client = rs_client( + client = self.rs_client( h=host, event_listeners=[listener, pool_listener, server_listener], **client_options ) self.scenario_client = client self.listener = listener self.pool_listener = pool_listener self.server_listener = server_listener - # Close the client explicitly to avoid having too many threads open. - self.addCleanup(client.close) # Create session0 and session1. sessions = {} @@ -575,7 +711,7 @@ def run_scenario(self, scenario_def, test): read_preference=ReadPreference.PRIMARY, read_concern=ReadConcern("local"), ) - actual_data = list(outcome_coll.find(sort=[("_id", 1)])) + actual_data = outcome_coll.find(sort=[("_id", 1)]).to_list() # The expected data needs to be the left hand side here otherwise # CompareType(Binary) doesn't work. @@ -617,6 +753,13 @@ def expect_error_labels_omit(expected_result): return False +def expect_timeout_error(expected_result): + if isinstance(expected_result, dict): + return expected_result["isTimeoutError"] + + return False + + def expect_error(op): expected_result = op.get("result") return ( @@ -625,6 +768,7 @@ def expect_error(op): or expect_error_code(expected_result) or expect_error_labels_contain(expected_result) or expect_error_labels_omit(expected_result) + or expect_timeout_error(expected_result) ) @@ -644,6 +788,11 @@ def decode_raw(val): TYPES = { "binData": Binary, "long": Int64, + "int": int, + "string": str, + "objectId": ObjectId, + "object": dict, + "array": list, } @@ -654,7 +803,11 @@ def wrap_types(val): if isinstance(val, abc.Mapping): typ = val.get("$$type") if typ: - return CompareType(TYPES[typ]) + if isinstance(typ, str): + types = TYPES[typ] + else: + types = tuple(TYPES[t] for t in typ) + return CompareType(types) d = {} for key in val: d[key] = wrap_types(val[key]) diff --git a/test/version.py b/test/version.py index e102db7111..ae6ecb331f 100644 --- a/test/version.py +++ b/test/version.py @@ -13,65 +13,12 @@ # limitations under the License. """Some tools for running tests based on MongoDB server version.""" +from __future__ import annotations +from pymongo.common import Version as BaseVersion -class Version(tuple): - def __new__(cls, *version): - padded_version = cls._padded(version, 4) - return super(Version, cls).__new__(cls, tuple(padded_version)) - - @classmethod - def _padded(cls, iter, length, padding=0): - l = list(iter) - if len(l) < length: - for _ in range(length - len(l)): - l.append(padding) - return l - - @classmethod - def from_string(cls, version_string): - mod = 0 - bump_patch_level = False - if version_string.endswith("+"): - version_string = version_string[0:-1] - mod = 1 - elif version_string.endswith("-pre-"): - version_string = version_string[0:-5] - mod = -1 - elif version_string.endswith("-"): - version_string = version_string[0:-1] - mod = -1 - # Deal with '-rcX' substrings - if "-rc" in version_string: - version_string = version_string[0 : version_string.find("-rc")] - mod = -1 - # Deal with git describe generated substrings - elif "-" in version_string: - version_string = version_string[0 : version_string.find("-")] - mod = -1 - bump_patch_level = True - - version = [int(part) for part in version_string.split(".")] - version = cls._padded(version, 3) - # Make from_string and from_version_array agree. For example: - # MongoDB Enterprise > db.runCommand('buildInfo').versionArray - # [ 3, 2, 1, -100 ] - # MongoDB Enterprise > db.runCommand('buildInfo').version - # 3.2.0-97-g1ef94fe - if bump_patch_level: - version[-1] += 1 - version.append(mod) - - return Version(*version) - - @classmethod - def from_version_array(cls, version_array): - version = list(version_array) - if version[-1] < 0: - version[-1] = -1 - version = cls._padded(version, 3) - return Version(*version) +class Version(BaseVersion): @classmethod def from_client(cls, client): info = client.server_info() @@ -79,8 +26,9 @@ def from_client(cls, client): return cls.from_version_array(info["versionArray"]) return cls.from_string(info["version"]) - def at_least(self, *other_version): - return self >= Version(*other_version) - - def __str__(self): - return ".".join(map(str, self)) + @classmethod + async def async_from_client(cls, client): + info = await client.server_info() + if "versionArray" in info: + return cls.from_version_array(info["versionArray"]) + return cls.from_string(info["version"]) diff --git a/test/versioned-api/crud-api-version-1.json b/test/versioned-api/crud-api-version-1.json index a387d0587e..23ef59a6d9 100644 --- a/test/versioned-api/crud-api-version-1.json +++ b/test/versioned-api/crud-api-version-1.json @@ -50,7 +50,8 @@ }, "apiDeprecationErrors": true } - ] + ], + "namespace": "versioned-api-tests.test" }, "initialData": [ { @@ -426,6 +427,86 @@ } ] }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, { "description": "countDocuments appends declared API version", "operations": [ diff --git a/test/versioned-api/transaction-handling.json b/test/versioned-api/transaction-handling.json index c00c5240ae..32031296af 100644 --- a/test/versioned-api/transaction-handling.json +++ b/test/versioned-api/transaction-handling.json @@ -6,7 +6,7 @@ "minServerVersion": "4.9", "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -92,7 +92,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } @@ -221,7 +221,7 @@ { "topologies": [ "replicaset", - "sharded-replicaset", + "sharded", "load-balanced" ] } diff --git a/tools/clean.py b/tools/clean.py index 0ea31fc3d9..b6e1867a0a 100644 --- a/tools/clean.py +++ b/tools/clean.py @@ -16,20 +16,21 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations -import os import sys +from pathlib import Path try: - os.remove("pymongo/_cmessage.so") - os.remove("bson/_cbson.so") -except BaseException: + Path("pymongo/_cmessage.so").unlink() + Path("bson/_cbson.so").unlink() +except BaseException: # noqa: S110 pass try: - os.remove("pymongo/_cmessage.pyd") - os.remove("bson/_cbson.pyd") -except BaseException: + Path("pymongo/_cmessage.pyd").unlink() + Path("bson/_cbson.pyd").unlink() +except BaseException: # noqa: S110 pass try: @@ -40,7 +41,7 @@ pass try: - from bson import _cbson # noqa: F401 + from bson import _cbson # type: ignore[attr-defined] # noqa: F401 sys.exit("could still import _cbson") except ImportError: diff --git a/tools/compare_import_time.py b/tools/compare_import_time.py new file mode 100644 index 0000000000..fdc344f2e9 --- /dev/null +++ b/tools/compare_import_time.py @@ -0,0 +1,37 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import sys + +base_sha = sys.argv[-1] +head_sha = sys.argv[-2] + + +def get_total_time(sha: str) -> int: + with open(f"pymongo-{sha}.log") as fid: + last_line = fid.readlines()[-1] + return int(last_line.split()[4]) + + +base_time = get_total_time(base_sha) +curr_time = get_total_time(head_sha) + +# Check if we got 20% or more slower. +change = int((curr_time - base_time) / base_time * 100) +if change > 20: + print(f"PyMongo import got {change} percent worse") + sys.exit(1) + +print(f"Import time changed by {change} percent") diff --git a/tools/convert_test_to_async.py b/tools/convert_test_to_async.py new file mode 100644 index 0000000000..6c68c34bf3 --- /dev/null +++ b/tools/convert_test_to_async.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +import inspect +import sys + +from pymongo import AsyncMongoClient +from pymongo.asynchronous.collection import AsyncCollection +from pymongo.asynchronous.command_cursor import AsyncCommandCursor +from pymongo.asynchronous.cursor import AsyncCursor +from pymongo.asynchronous.database import AsyncDatabase + +replacements = { + "Collection": "AsyncCollection", + "Database": "AsyncDatabase", + "Cursor": "AsyncCursor", + "MongoClient": "AsyncMongoClient", + "CommandCursor": "AsyncCommandCursor", + "RawBatchCursor": "AsyncRawBatchCursor", + "RawBatchCommandCursor": "AsyncRawBatchCommandCursor", + "ClientSession": "AsyncClientSession", + "ChangeStream": "AsyncChangeStream", + "CollectionChangeStream": "AsyncCollectionChangeStream", + "DatabaseChangeStream": "AsyncDatabaseChangeStream", + "ClusterChangeStream": "AsyncClusterChangeStream", + "_Bulk": "_AsyncBulk", + "_ClientBulk": "_AsyncClientBulk", + "Connection": "AsyncConnection", + "synchronous": "asynchronous", + "Synchronous": "Asynchronous", + "next": "await anext", + "_Lock": "_ALock", + "_Condition": "_ACondition", + "GridFS": "AsyncGridFS", + "GridFSBucket": "AsyncGridFSBucket", + "GridIn": "AsyncGridIn", + "GridOut": "AsyncGridOut", + "GridOutCursor": "AsyncGridOutCursor", + "GridOutIterator": "AsyncGridOutIterator", + "GridOutChunkIterator": "_AsyncGridOutChunkIterator", + "_grid_in_property": "_a_grid_in_property", + "_grid_out_property": "_a_grid_out_property", + "ClientEncryption": "AsyncClientEncryption", + "MongoCryptCallback": "AsyncMongoCryptCallback", + "ExplicitEncrypter": "AsyncExplicitEncrypter", + "AutoEncrypter": "AsyncAutoEncrypter", + "ContextManager": "AsyncContextManager", + "ClientContext": "AsyncClientContext", + "TestCollection": "AsyncTestCollection", + "IntegrationTest": "AsyncIntegrationTest", + "PyMongoTestCase": "AsyncPyMongoTestCase", + "MockClientTest": "AsyncMockClientTest", + "client_context": "async_client_context", + "setUp": "asyncSetUp", + "tearDown": "asyncTearDown", + "wait_until": "await async_wait_until", + "addCleanup": "addAsyncCleanup", + "TestCase": "IsolatedAsyncioTestCase", + "UnitTest": "AsyncUnitTest", + "MockClient": "AsyncMockClient", + "SpecRunner": "AsyncSpecRunner", + "TransactionsBase": "AsyncTransactionsBase", + "get_pool": "await async_get_pool", + "is_mongos": "await async_is_mongos", + "rs_or_single_client": "await async_rs_or_single_client", + "rs_or_single_client_noauth": "await async_rs_or_single_client_noauth", + "rs_client": "await async_rs_client", + "single_client": "await async_single_client", + "from_client": "await async_from_client", + "closing": "aclosing", + "assertRaisesExactly": "asyncAssertRaisesExactly", + "get_mock_client": "await get_async_mock_client", + "close": "await aclose", +} + +async_classes = [AsyncMongoClient, AsyncDatabase, AsyncCollection, AsyncCursor, AsyncCommandCursor] + + +def get_async_methods() -> set[str]: + result: set[str] = set() + for x in async_classes: + methods = { + k + for k, v in vars(x).items() + if callable(v) + and not isinstance(v, classmethod) + and inspect.iscoroutinefunction(v) + and v.__name__[0] != "_" + } + result = result | methods + return result + + +async_methods = get_async_methods() + + +def apply_replacements(lines: list[str]) -> list[str]: + for i in range(len(lines)): + if "_IS_SYNC = True" in lines[i]: + lines[i] = "_IS_SYNC = False" + if "def test" in lines[i]: + lines[i] = lines[i].replace("def test", "async def test") + for k in replacements: + if k in lines[i]: + lines[i] = lines[i].replace(k, replacements[k]) + for k in async_methods: + if k + "(" in lines[i]: + tokens = lines[i].split(" ") + for j in range(len(tokens)): + if k + "(" in tokens[j]: + if j < 2: + tokens.insert(0, "await") + else: + tokens.insert(j, "await") + break + new_line = " ".join(tokens) + + lines[i] = new_line + + return lines + + +def process_file(input_file: str, output_file: str) -> None: + with open(input_file, "r+") as f: + lines = f.readlines() + lines = apply_replacements(lines) + + with open(output_file, "w+") as f2: + f2.seek(0) + f2.writelines(lines) + f2.truncate() + + +def main() -> None: + args = sys.argv[1:] + sync_file = "./test/" + args[0] + async_file = "./" + args[0] + + process_file(sync_file, async_file) + + +main() diff --git a/tools/ensure_future_annotations_import.py b/tools/ensure_future_annotations_import.py new file mode 100644 index 0000000000..55080148e4 --- /dev/null +++ b/tools/ensure_future_annotations_import.py @@ -0,0 +1,41 @@ +# Copyright 2023-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ensure that 'from __future__ import annotations' is used in all package files +""" +from __future__ import annotations + +import sys +from pathlib import Path + +pattern = "from __future__ import annotations" +missing = [] +for dirname in ["pymongo", "bson", "gridfs"]: + for path in Path(dirname).glob("*.py"): + if Path(path).name in ["_version.py", "errors.py"]: + continue + found = False + with open(path) as fid: + for line in fid.readlines(): + if line.strip() == pattern: + found = True + break + if not found: + missing.append(path) + +if missing: + print(f"Missing '{pattern}' import in:") + for item in missing: + print(item) + sys.exit(1) diff --git a/tools/fail_if_no_c.py b/tools/fail_if_no_c.py index 6cb82eed57..64280a81d2 100644 --- a/tools/fail_if_no_c.py +++ b/tools/fail_if_no_c.py @@ -16,13 +16,32 @@ Only really intended to be used by internal build scripts. """ +from __future__ import annotations +import logging import sys +LOGGER = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO, format="%(levelname)-8s %(message)s") + sys.path[0:0] = [""] import bson # noqa: E402 import pymongo # noqa: E402 -if not pymongo.has_c() or not bson.has_c(): - sys.exit("could not load C extensions") + +def main() -> None: + if not pymongo.has_c() or not bson.has_c(): + try: + from pymongo import _cmessage # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + try: + from bson import _cbson # type:ignore[attr-defined] # noqa: F401 + except Exception as e: + LOGGER.exception(e) + sys.exit("could not load C extensions") + + +if __name__ == "__main__": + main() diff --git a/tools/ocsptest.py b/tools/ocsptest.py index 14df8a8fe3..8596db226d 100644 --- a/tools/ocsptest.py +++ b/tools/ocsptest.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. +from __future__ import annotations import argparse import logging @@ -25,7 +26,7 @@ logging.basicConfig(format=FORMAT, level=logging.DEBUG) -def check_ocsp(host, port, capath): +def check_ocsp(host: str, port: int, capath: str) -> None: ctx = get_ssl_context( None, # certfile None, # passphrase @@ -34,6 +35,7 @@ def check_ocsp(host, port, capath): False, # allow_invalid_certificates False, # allow_invalid_hostnames False, + True, # is sync ) # disable_ocsp_endpoint_check # Ensure we're using pyOpenSSL. @@ -42,12 +44,12 @@ def check_ocsp(host, port, capath): s = socket.socket() s.connect((host, port)) try: - s = ctx.wrap_socket(s, server_hostname=host) + s = ctx.wrap_socket(s, server_hostname=host) # type: ignore[assignment] finally: s.close() -def main(): +def main() -> None: parser = argparse.ArgumentParser(description="Debug OCSP") parser.add_argument("--host", type=str, required=True, help="Host to connect to") parser.add_argument("-p", "--port", type=int, default=443, help="Port to connect to") diff --git a/tools/synchro.py b/tools/synchro.py new file mode 100644 index 0000000000..e3d4835502 --- /dev/null +++ b/tools/synchro.py @@ -0,0 +1,465 @@ +# Copyright 2024-Present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Synchronization of asynchronous modules. + +Used as part of our build system to generate synchronous code. +""" + +from __future__ import annotations + +import os +import re +import sys +from os import listdir +from pathlib import Path + +from unasync import Rule, unasync_files # type: ignore[import-not-found] + +replacements = { + "AsyncCollection": "Collection", + "AsyncDatabase": "Database", + "AsyncCursor": "Cursor", + "AsyncMongoClient": "MongoClient", + "AsyncCommandCursor": "CommandCursor", + "AsyncRawBatchCursor": "RawBatchCursor", + "AsyncRawBatchCommandCursor": "RawBatchCommandCursor", + "AsyncClientSession": "ClientSession", + "AsyncChangeStream": "ChangeStream", + "AsyncCollectionChangeStream": "CollectionChangeStream", + "AsyncDatabaseChangeStream": "DatabaseChangeStream", + "AsyncClusterChangeStream": "ClusterChangeStream", + "_AsyncBulk": "_Bulk", + "_AsyncClientBulk": "_ClientBulk", + "AsyncConnection": "Connection", + "async_command": "command", + "async_receive_message": "receive_message", + "async_receive_data": "receive_data", + "async_sendall": "sendall", + "async_socket_sendall": "sendall", + "asynchronous": "synchronous", + "Asynchronous": "Synchronous", + "AsyncBulkTestBase": "BulkTestBase", + "AsyncBulkAuthorizationTestBase": "BulkAuthorizationTestBase", + "anext": "next", + "aiter": "iter", + "_ALock": "_Lock", + "_ACondition": "_Condition", + "AsyncGridFS": "GridFS", + "AsyncGridFSBucket": "GridFSBucket", + "AsyncGridIn": "GridIn", + "AsyncGridOut": "GridOut", + "AsyncGridOutCursor": "GridOutCursor", + "AsyncGridOutIterator": "GridOutIterator", + "_AsyncGridOutChunkIterator": "GridOutChunkIterator", + "_a_grid_in_property": "_grid_in_property", + "_a_grid_out_property": "_grid_out_property", + "AsyncClientEncryption": "ClientEncryption", + "AsyncMongoCryptCallback": "MongoCryptCallback", + "AsyncExplicitEncrypter": "ExplicitEncrypter", + "AsyncAutoEncrypter": "AutoEncrypter", + "AsyncContextManager": "ContextManager", + "AsyncClientContext": "ClientContext", + "AsyncTestCollection": "TestCollection", + "AsyncIntegrationTest": "IntegrationTest", + "AsyncPyMongoTestCase": "PyMongoTestCase", + "AsyncMockClientTest": "MockClientTest", + "async_client_context": "client_context", + "async_setup": "setup", + "asyncSetUp": "setUp", + "asyncTearDown": "tearDown", + "async_teardown": "teardown", + "pytest_asyncio": "pytest", + "async_wait_until": "wait_until", + "addAsyncCleanup": "addCleanup", + "async_setup_class": "setup_class", + "IsolatedAsyncioTestCase": "TestCase", + "AsyncUnitTest": "UnitTest", + "AsyncMockClient": "MockClient", + "AsyncSpecRunner": "SpecRunner", + "AsyncTransactionsBase": "TransactionsBase", + "async_get_pool": "get_pool", + "async_is_mongos": "is_mongos", + "async_rs_or_single_client": "rs_or_single_client", + "async_rs_or_single_client_noauth": "rs_or_single_client_noauth", + "async_rs_client": "rs_client", + "async_single_client": "single_client", + "async_from_client": "from_client", + "aclosing": "closing", + "asyncAssertRaisesExactly": "assertRaisesExactly", + "get_async_mock_client": "get_mock_client", + "aconnect": "_connect", + "async-transactions-ref": "transactions-ref", + "async-snapshot-reads-ref": "snapshot-reads-ref", + "default_async": "default", + "aclose": "close", + "PyMongo|async": "PyMongo", + "PyMongo|c|async": "PyMongo|c", + "AsyncTestGridFile": "TestGridFile", + "AsyncTestGridFileNoConnect": "TestGridFileNoConnect", + "AsyncTestSpec": "TestSpec", + "AsyncSpecTestCreator": "SpecTestCreator", + "async_set_fail_point": "set_fail_point", + "async_ensure_all_connected": "ensure_all_connected", + "async_repl_set_step_down": "repl_set_step_down", + "AsyncPeriodicExecutor": "PeriodicExecutor", + "async_wait_for_event": "wait_for_event", + "pymongo_server_monitor_task": "pymongo_server_monitor_thread", + "pymongo_server_rtt_task": "pymongo_server_rtt_thread", + "_async_create_lock": "_create_lock", + "_async_create_condition": "_create_condition", + "_async_cond_wait": "_cond_wait", + "AsyncNetworkingInterface": "NetworkingInterface", + "_configured_protocol_interface": "_configured_socket_interface", + "_async_configured_socket": "_configured_socket", + "SpecRunnerTask": "SpecRunnerThread", + "AsyncMockConnection": "MockConnection", + "AsyncMockPool": "MockPool", + "StopAsyncIteration": "StopIteration", + "create_async_event": "create_event", + "async_create_barrier": "create_barrier", + "async_barrier_wait": "barrier_wait", + "async_joinall": "joinall", + "async_simple_test_client": "simple_test_client", + "_async_create_connection": "_create_connection", + "pymongo.asynchronous.srv_resolver._SrvResolver.get_hosts": "pymongo.synchronous.srv_resolver._SrvResolver.get_hosts", + "dns.asyncresolver.resolve": "dns.resolver.resolve", +} + +docstring_replacements: dict[tuple[str, str], str] = { + ("MongoClient", "connect"): """If ``True`` (the default), immediately + begin connecting to MongoDB in the background. Otherwise connect + on the first operation. The default value is ``False`` when + running in a Function-as-a-service environment.""", + ("Collection", "create"): """If ``True``, force collection + creation even without options being set.""", + ("Collection", "session"): """A + :class:`~pymongo.client_session.ClientSession` that is used with + the create collection command.""", + ("Collection", "kwargs"): """Additional keyword arguments will + be passed as options for the create collection command.""", +} + +docstring_removals: set[str] = { + ".. warning:: This API is currently in beta, meaning the classes, methods, and behaviors described within may change before the full release." +} + +import_replacements = {"test.synchronous": "test"} + +_pymongo_base = "./pymongo/asynchronous/" +_gridfs_base = "./gridfs/asynchronous/" +_test_base = "./test/asynchronous/" + +_pymongo_dest_base = "./pymongo/synchronous/" +_gridfs_dest_base = "./gridfs/synchronous/" +_test_dest_base = "./test/" + +if not Path.exists(Path(_pymongo_dest_base)): + Path.mkdir(Path(_pymongo_dest_base)) +if not Path.exists(Path(_gridfs_dest_base)): + Path.mkdir(Path(_gridfs_dest_base)) + +async_files = [ + _pymongo_base + f for f in listdir(_pymongo_base) if (Path(_pymongo_base) / f).is_file() +] + +gridfs_files = [ + _gridfs_base + f for f in listdir(_gridfs_base) if (Path(_gridfs_base) / f).is_file() +] + + +def async_only_test(f: str) -> bool: + """Return True for async tests that should not be converted to sync.""" + return f in [ + "test_locks.py", + "test_concurrency.py", + "test_async_cancellation.py", + "test_async_loop_safety.py", + "test_async_contextvars_reset.py", + "test_async_loop_unblocked.py", + ] + + +test_files = [ + _test_base + f + for f in listdir(_test_base) + if (Path(_test_base) / f).is_file() and not async_only_test(f) +] + +# Add each asynchronized test here as part of the converting PR +converted_tests = [ + "__init__.py", + "conftest.py", + "helpers.py", + "pymongo_mocks.py", + "utils_spec_runner.py", + "qcheck.py", + "test_auth.py", + "test_auth_oidc.py", + "test_auth_spec.py", + "test_bulk.py", + "test_change_stream.py", + "test_client.py", + "test_client_bulk_write.py", + "test_client_context.py", + "test_client_metadata.py", + "test_collation.py", + "test_collection.py", + "test_collection_management.py", + "test_command_logging.py", + "test_command_logging.py", + "test_command_monitoring.py", + "test_comment.py", + "test_common.py", + "test_connection_logging.py", + "test_connection_monitoring.py", + "test_connections_survive_primary_stepdown_spec.py", + "test_create_entities.py", + "test_crud_unified.py", + "test_csot.py", + "test_cursor.py", + "test_custom_types.py", + "test_database.py", + "test_discovery_and_monitoring.py", + "test_dns.py", + "test_encryption.py", + "test_examples.py", + "test_grid_file.py", + "test_gridfs.py", + "test_gridfs_bucket.py", + "test_gridfs_spec.py", + "test_heartbeat_monitoring.py", + "test_index_management.py", + "test_json_util_integration.py", + "test_load_balancer.py", + "test_logger.py", + "test_max_staleness.py", + "test_monitor.py", + "test_monitoring.py", + "test_mongos_load_balancing.py", + "test_on_demand_csfle.py", + "test_pooling.py", + "test_raw_bson.py", + "test_read_concern.py", + "test_read_preferences.py", + "test_read_write_concern_spec.py", + "test_retryable_reads.py", + "test_retryable_reads_unified.py", + "test_retryable_writes.py", + "test_retryable_writes_unified.py", + "test_run_command.py", + "test_sdam_monitoring_spec.py", + "test_server_selection.py", + "test_server_selection_in_window.py", + "test_server_selection_logging.py", + "test_server_selection_rtt.py", + "test_session.py", + "test_sessions_unified.py", + "test_srv_polling.py", + "test_ssl.py", + "test_streaming_protocol.py", + "test_transactions.py", + "test_transactions_unified.py", + "test_unified_format.py", + "test_versioned_api_integration.py", + "unified_format.py", + "utils_selection_tests.py", + "utils.py", +] + + +def process_files( + files: list[str], docstring_translate_files: list[str], sync_test_files: list[str] +) -> None: + for file in files: + if "__init__" not in file or "__init__" and "test" in file: + with open(file, "r+") as f: + lines = f.readlines() + lines = apply_is_sync(lines, file) + lines = translate_coroutine_types(lines) + lines = translate_async_sleeps(lines) + if file in docstring_translate_files: + lines = translate_docstrings(lines) + if file in sync_test_files: + lines = translate_imports(lines) + lines = process_ignores(lines) + f.seek(0) + f.writelines(lines) + f.truncate() + + +def apply_is_sync(lines: list[str], file: str) -> list[str]: + try: + is_sync = next(iter([line for line in lines if line.startswith("_IS_SYNC = ")])) + index = lines.index(is_sync) + is_sync = is_sync.replace("False", "True") + lines[index] = is_sync + except StopIteration as e: + print( + f"Missing _IS_SYNC at top of async file {file.replace('synchronous', 'asynchronous')}" + ) + raise e + return lines + + +def translate_coroutine_types(lines: list[str]) -> list[str]: + coroutine_types = [line for line in lines if "Coroutine[" in line] + for type in coroutine_types: + res = re.search(r"Coroutine\[([A-z]+), ([A-z]+), ([A-z]+)\]", type) + if res: + old = res[0] + index = lines.index(type) + new = type.replace(old, res.group(3)) + lines[index] = new + return lines + + +def translate_imports(lines: list[str]) -> list[str]: + for k, v in import_replacements.items(): + matches = [line for line in lines if k in line and "import" in line] + for line in matches: + index = lines.index(line) + lines[index] = line.replace(k, v) + return lines + + +def translate_async_sleeps(lines: list[str]) -> list[str]: + blocking_sleeps = [line for line in lines if "asyncio.sleep(0)" in line] + lines = [line for line in lines if line not in blocking_sleeps] + sleeps = [line for line in lines if "asyncio.sleep" in line] + + for line in sleeps: + res = re.search(r"asyncio.sleep\(([^()]*)\)", line) + if res: + old = res[0] + index = lines.index(line) + new = f"time.sleep({res[1]})" + lines[index] = line.replace(old, new) + + return lines + + +def translate_docstrings(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k in replacements: + if k in lines[i]: + # This sequence of replacements fixes the grammar issues caused by translating async -> sync + if "an Async" in lines[i]: + lines[i] = lines[i].replace("an Async", "a Async") + if "an 'Async" in lines[i]: + lines[i] = lines[i].replace("an 'Async", "a 'Async") + if "An Async" in lines[i]: + lines[i] = lines[i].replace("An Async", "A Async") + if "An 'Async" in lines[i]: + lines[i] = lines[i].replace("An 'Async", "A 'Async") + if "an asynchronous" in lines[i]: + lines[i] = lines[i].replace("an asynchronous", "a") + if "An asynchronous" in lines[i]: + lines[i] = lines[i].replace("An asynchronous", "A") + # This ensures docstring links are for `pymongo.X` instead of `pymongo.synchronous.X` + if "pymongo.asynchronous" in lines[i] and "import" not in lines[i]: + lines[i] = lines[i].replace("pymongo.asynchronous", "pymongo") + lines[i] = lines[i].replace(k, replacements[k]) + if "Sync" in lines[i] and "Synchronous" not in lines[i] and replacements[k] in lines[i]: + lines[i] = lines[i].replace("Sync", "") + if "rsApplyStop" in lines[i]: + lines[i] = lines[i].replace("rsApplyStop", "rsSyncApplyStop") + if "async for" in lines[i] or "async with" in lines[i] or "async def" in lines[i]: + lines[i] = lines[i].replace("async ", "") + if "await " in lines[i] and "tailable" not in lines[i]: + lines[i] = lines[i].replace("await ", "") + for i in range(len(lines)): + for k in docstring_replacements: # type: ignore[assignment] + if f":param {k[1]}: **Not supported by {k[0]}**." in lines[i]: + lines[i] = lines[i].replace( + f"**Not supported by {k[0]}**.", + docstring_replacements[k], # type: ignore[index] + ) + + for line in docstring_removals: + if line in lines[i]: + lines[i] = "DOCSTRING_REMOVED" + lines[i + 1] = "DOCSTRING_REMOVED" + + return [line for line in lines if line != "DOCSTRING_REMOVED"] + + +def process_ignores(lines: list[str]) -> list[str]: + for i in range(len(lines)): + for k, v in replacements.items(): + if "unasync: off" in lines[i] and v in lines[i]: + lines[i] = lines[i].replace(v, k) + return lines + + +def unasync_directory(files: list[str], src: str, dest: str, replacements: dict[str, str]) -> None: + unasync_files( + files, + [ + Rule( + fromdir=src, + todir=dest, + additional_replacements=replacements, + ) + ], + ) + + +def main() -> None: + modified_files = [f"./{f}" for f in sys.argv[1:]] + errored = False + for fname in async_files + gridfs_files + test_files: + # If the async file was modified, we don't need to check if the sync file was also modified. + if str(fname) in modified_files: + continue + sync_name = str(fname).replace("asynchronous", "synchronous") + test_sync_name = str(fname).replace("/asynchronous", "") + if ( + sync_name in modified_files + or test_sync_name in modified_files + and "OVERRIDE_SYNCHRO_CHECK" not in os.environ + ): + print(f"Refusing to overwrite {test_sync_name}") + errored = True + if errored: + raise ValueError("Aborting synchro due to errors") + + unasync_directory(async_files, _pymongo_base, _pymongo_dest_base, replacements) + unasync_directory(gridfs_files, _gridfs_base, _gridfs_dest_base, replacements) + unasync_directory(test_files, _test_base, _test_dest_base, replacements) + + sync_files = [ + _pymongo_dest_base + f + for f in listdir(_pymongo_dest_base) + if (Path(_pymongo_dest_base) / f).is_file() + ] + + sync_gridfs_files = [ + _gridfs_dest_base + f + for f in listdir(_gridfs_dest_base) + if (Path(_gridfs_dest_base) / f).is_file() + ] + sync_test_files = [ + _test_dest_base + f for f in converted_tests if (Path(_test_dest_base) / f).is_file() + ] + + docstring_translate_files = sync_files + sync_gridfs_files + sync_test_files + + process_files( + sync_files + sync_gridfs_files + sync_test_files, docstring_translate_files, sync_test_files + ) + + +if __name__ == "__main__": + main() diff --git a/tools/synchro.sh b/tools/synchro.sh new file mode 100755 index 0000000000..28b9c6d6c4 --- /dev/null +++ b/tools/synchro.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Keep the synchronous folders in sync with there async counterparts. +set -eu + +python ./tools/synchro.py "$@" +python -m ruff check pymongo/synchronous/ gridfs/synchronous/ test/ --fix --silent +python -m ruff format pymongo/synchronous/ gridfs/synchronous/ test/ --silent diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000000..f9a389c896 --- /dev/null +++ b/uv.lock @@ -0,0 +1,2208 @@ +version = 1 +revision = 3 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[[package]] +name = "accessible-pygments" +version = "0.0.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bc/c1/bbac6a50d02774f91572938964c582fff4270eee73ab822a4aeea4d8b11b/accessible_pygments-0.0.5.tar.gz", hash = "sha256:40918d3e6a2b619ad424cb91e556bd3bd8865443d9f22f1dcdf79e33c8046872", size = 1377899, upload-time = "2024-05-10T11:23:10.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/3f/95338030883d8c8b91223b4e21744b04d11b161a3ef117295d8241f50ab4/accessible_pygments-0.0.5-py3-none-any.whl", hash = "sha256:88ae3211e68a1d0b011504b2ffc1691feafce124b845bd072ab6f9f66f34d4b7", size = 1395903, upload-time = "2024-05-10T11:23:08.421Z" }, +] + +[[package]] +name = "alabaster" +version = "0.7.16" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/3e/13dd8e5ed9094e734ac430b5d0eb4f2bb001708a8b7856cbf8e084e001ba/alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65", size = 23776, upload-time = "2024-01-10T00:56:10.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/34/d4e1c02d3bee589efb5dfa17f88ea08bdb3e3eac12bc475462aec52ed223/alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92", size = 13511, upload-time = "2024-01-10T00:56:08.388Z" }, +] + +[[package]] +name = "alabaster" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/f8/d9c74d0daf3f742840fd818d69cfae176fa332022fd44e3469487d5a9420/alabaster-1.0.0.tar.gz", hash = "sha256:c00dca57bca26fa62a6d7d0a9fcce65f3e026e9bfe33e9c538fd3fbb2144fd9e", size = 24210, upload-time = "2024-07-26T18:15:03.762Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, +] + +[[package]] +name = "anyio" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949, upload-time = "2025-03-17T00:02:54.77Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916, upload-time = "2025-03-17T00:02:52.713Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "babel" +version = "2.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" }, +] + +[[package]] +name = "backports-asyncio-runner" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.13.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067, upload-time = "2025-04-15T17:05:13.836Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285, upload-time = "2025-04-15T17:05:12.221Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/34/298ef2023d7d88069776c9cc26b42ba6f05d143a1c9b44a0f65cd795c65b/boto3-1.40.0.tar.gz", hash = "sha256:fc1b3ca3baf3d8820c6faddf47cbba8ad3cd16f8e8d7e2f76d304bf995932eb7", size = 111847, upload-time = "2025-07-31T19:21:06.735Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/44/158581021038c5fc886ffa27fa4731fb4939258da7a23e0bc70b2d5757c9/boto3-1.40.0-py3-none-any.whl", hash = "sha256:959443055d2af676c336cc6033b3f870a8a924384b70d0b2905081d649378179", size = 139882, upload-time = "2025-07-31T19:21:04.65Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8f/e7/770ce910457ac6c68ea79b83892ab7a7cb08528f5d1dd77e51bf02a8529e/botocore-1.40.0.tar.gz", hash = "sha256:850242560dc8e74d542045a81eb6cc15f1b730b4ba55ba5b30e6d686548dfcaf", size = 14262316, upload-time = "2025-07-31T19:20:56.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/5a/bebc53f022514412613615b09aef20fbe804abb3ea26ec27e504a2d21c8f/botocore-1.40.0-py3-none-any.whl", hash = "sha256:2063e6d035a6a382b2ae37e40f5144044e55d4e091910d0c9f1be3121ad3e4e6", size = 13921768, upload-time = "2025-07-31T19:20:51.487Z" }, +] + +[[package]] +name = "certifi" +version = "2025.7.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/76/52c535bcebe74590f296d6c77c86dabf761c41980e1347a2422e4aa2ae41/certifi-2025.7.14.tar.gz", hash = "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995", size = 163981, upload-time = "2025-07-14T03:29:28.449Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4f/52/34c6cf5bb9285074dc3531c437b3919e825d976fde097a7a73f79e726d03/certifi-2025.7.14-py3-none-any.whl", hash = "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2", size = 162722, upload-time = "2025-07-14T03:29:26.863Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, + { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" }, + { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" }, + { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" }, + { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" }, + { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" }, + { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" }, + { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" }, + { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/95/28/9901804da60055b406e1a1c5ba7aac1276fb77f1dde635aabfc7fd84b8ab/charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941", size = 201818, upload-time = "2025-05-02T08:31:46.725Z" }, + { url = "https://files.pythonhosted.org/packages/d9/9b/892a8c8af9110935e5adcbb06d9c6fe741b6bb02608c6513983048ba1a18/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd", size = 144649, upload-time = "2025-05-02T08:31:48.889Z" }, + { url = "https://files.pythonhosted.org/packages/7b/a5/4179abd063ff6414223575e008593861d62abfc22455b5d1a44995b7c101/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6", size = 155045, upload-time = "2025-05-02T08:31:50.757Z" }, + { url = "https://files.pythonhosted.org/packages/3b/95/bc08c7dfeddd26b4be8c8287b9bb055716f31077c8b0ea1cd09553794665/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d", size = 147356, upload-time = "2025-05-02T08:31:52.634Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/7a5b635aa65284bf3eab7653e8b4151ab420ecbae918d3e359d1947b4d61/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86", size = 149471, upload-time = "2025-05-02T08:31:56.207Z" }, + { url = "https://files.pythonhosted.org/packages/ae/38/51fc6ac74251fd331a8cfdb7ec57beba8c23fd5493f1050f71c87ef77ed0/charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c", size = 151317, upload-time = "2025-05-02T08:31:57.613Z" }, + { url = "https://files.pythonhosted.org/packages/b7/17/edee1e32215ee6e9e46c3e482645b46575a44a2d72c7dfd49e49f60ce6bf/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0", size = 146368, upload-time = "2025-05-02T08:31:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/2c/ea3e66f2b5f21fd00b2825c94cafb8c326ea6240cd80a91eb09e4a285830/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef", size = 154491, upload-time = "2025-05-02T08:32:01.219Z" }, + { url = "https://files.pythonhosted.org/packages/52/47/7be7fa972422ad062e909fd62460d45c3ef4c141805b7078dbab15904ff7/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6", size = 157695, upload-time = "2025-05-02T08:32:03.045Z" }, + { url = "https://files.pythonhosted.org/packages/2f/42/9f02c194da282b2b340f28e5fb60762de1151387a36842a92b533685c61e/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366", size = 154849, upload-time = "2025-05-02T08:32:04.651Z" }, + { url = "https://files.pythonhosted.org/packages/67/44/89cacd6628f31fb0b63201a618049be4be2a7435a31b55b5eb1c3674547a/charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db", size = 150091, upload-time = "2025-05-02T08:32:06.719Z" }, + { url = "https://files.pythonhosted.org/packages/1f/79/4b8da9f712bc079c0f16b6d67b099b0b8d808c2292c937f267d816ec5ecc/charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a", size = 98445, upload-time = "2025-05-02T08:32:08.66Z" }, + { url = "https://files.pythonhosted.org/packages/7d/d7/96970afb4fb66497a40761cdf7bd4f6fca0fc7bafde3a84f836c1f57a926/charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509", size = 105782, upload-time = "2025-05-02T08:32:10.46Z" }, + { url = "https://files.pythonhosted.org/packages/05/85/4c40d00dcc6284a1c1ad5de5e0996b06f39d8232f1031cd23c2f5c07ee86/charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2", size = 198794, upload-time = "2025-05-02T08:32:11.945Z" }, + { url = "https://files.pythonhosted.org/packages/41/d9/7a6c0b9db952598e97e93cbdfcb91bacd89b9b88c7c983250a77c008703c/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645", size = 142846, upload-time = "2025-05-02T08:32:13.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/82/a37989cda2ace7e37f36c1a8ed16c58cf48965a79c2142713244bf945c89/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd", size = 153350, upload-time = "2025-05-02T08:32:15.873Z" }, + { url = "https://files.pythonhosted.org/packages/df/68/a576b31b694d07b53807269d05ec3f6f1093e9545e8607121995ba7a8313/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8", size = 145657, upload-time = "2025-05-02T08:32:17.283Z" }, + { url = "https://files.pythonhosted.org/packages/92/9b/ad67f03d74554bed3aefd56fe836e1623a50780f7c998d00ca128924a499/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f", size = 147260, upload-time = "2025-05-02T08:32:18.807Z" }, + { url = "https://files.pythonhosted.org/packages/a6/e6/8aebae25e328160b20e31a7e9929b1578bbdc7f42e66f46595a432f8539e/charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7", size = 149164, upload-time = "2025-05-02T08:32:20.333Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f2/b3c2f07dbcc248805f10e67a0262c93308cfa149a4cd3d1fe01f593e5fd2/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9", size = 144571, upload-time = "2025-05-02T08:32:21.86Z" }, + { url = "https://files.pythonhosted.org/packages/60/5b/c3f3a94bc345bc211622ea59b4bed9ae63c00920e2e8f11824aa5708e8b7/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544", size = 151952, upload-time = "2025-05-02T08:32:23.434Z" }, + { url = "https://files.pythonhosted.org/packages/e2/4d/ff460c8b474122334c2fa394a3f99a04cf11c646da895f81402ae54f5c42/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82", size = 155959, upload-time = "2025-05-02T08:32:24.993Z" }, + { url = "https://files.pythonhosted.org/packages/a2/2b/b964c6a2fda88611a1fe3d4c400d39c66a42d6c169c924818c848f922415/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0", size = 153030, upload-time = "2025-05-02T08:32:26.435Z" }, + { url = "https://files.pythonhosted.org/packages/59/2e/d3b9811db26a5ebf444bc0fa4f4be5aa6d76fc6e1c0fd537b16c14e849b6/charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5", size = 148015, upload-time = "2025-05-02T08:32:28.376Z" }, + { url = "https://files.pythonhosted.org/packages/90/07/c5fd7c11eafd561bb51220d600a788f1c8d77c5eef37ee49454cc5c35575/charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a", size = 98106, upload-time = "2025-05-02T08:32:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/a8/05/5e33dbef7e2f773d672b6d79f10ec633d4a71cd96db6673625838a4fd532/charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28", size = 105402, upload-time = "2025-05-02T08:32:32.191Z" }, + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/28/f8/dfb01ff6cc9af38552c69c9027501ff5a5117c4cc18dcd27cb5259fa1888/charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4", size = 201671, upload-time = "2025-05-02T08:34:12.696Z" }, + { url = "https://files.pythonhosted.org/packages/32/fb/74e26ee556a9dbfe3bd264289b67be1e6d616329403036f6507bb9f3f29c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7", size = 144744, upload-time = "2025-05-02T08:34:14.665Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/8499ee5aa7addc6f6d72e068691826ff093329fe59891e83b092ae4c851c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836", size = 154993, upload-time = "2025-05-02T08:34:17.134Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a2/5e4c187680728219254ef107a6949c60ee0e9a916a5dadb148c7ae82459c/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597", size = 147382, upload-time = "2025-05-02T08:34:19.081Z" }, + { url = "https://files.pythonhosted.org/packages/4c/fe/56aca740dda674f0cc1ba1418c4d84534be51f639b5f98f538b332dc9a95/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7", size = 149536, upload-time = "2025-05-02T08:34:21.073Z" }, + { url = "https://files.pythonhosted.org/packages/53/13/db2e7779f892386b589173dd689c1b1e304621c5792046edd8a978cbf9e0/charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f", size = 151349, upload-time = "2025-05-02T08:34:23.193Z" }, + { url = "https://files.pythonhosted.org/packages/69/35/e52ab9a276186f729bce7a0638585d2982f50402046e4b0faa5d2c3ef2da/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba", size = 146365, upload-time = "2025-05-02T08:34:25.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/d8/af7333f732fc2e7635867d56cb7c349c28c7094910c72267586947561b4b/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12", size = 154499, upload-time = "2025-05-02T08:34:27.359Z" }, + { url = "https://files.pythonhosted.org/packages/7a/3d/a5b2e48acef264d71e036ff30bcc49e51bde80219bb628ba3e00cf59baac/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518", size = 157735, upload-time = "2025-05-02T08:34:29.798Z" }, + { url = "https://files.pythonhosted.org/packages/85/d8/23e2c112532a29f3eef374375a8684a4f3b8e784f62b01da931186f43494/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5", size = 154786, upload-time = "2025-05-02T08:34:31.858Z" }, + { url = "https://files.pythonhosted.org/packages/c7/57/93e0169f08ecc20fe82d12254a200dfaceddc1c12a4077bf454ecc597e33/charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3", size = 150203, upload-time = "2025-05-02T08:34:33.88Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9d/9bf2b005138e7e060d7ebdec7503d0ef3240141587651f4b445bdf7286c2/charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471", size = 98436, upload-time = "2025-05-02T08:34:35.907Z" }, + { url = "https://files.pythonhosted.org/packages/6d/24/5849d46cf4311bbf21b424c443b09b459f5b436b1558c04e45dbb7cc478b/charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e", size = 105772, upload-time = "2025-05-02T08:34:37.935Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.1.8" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "coverage" +version = "7.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/d3/3ec80acdd57a0d6a1111b978ade388824f37126446fd6750d38bfaca949c/coverage-7.5.0.tar.gz", hash = "sha256:cf62d17310f34084c59c01e027259076479128d11e4661bb6c9acb38c5e19bb8", size = 798314, upload-time = "2024-04-23T17:42:35.508Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/db/08d54dbc12fdfe5857b06105fd1235bdebb7da7c11cd1a0fae936556162a/coverage-7.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:432949a32c3e3f820af808db1833d6d1631664d53dd3ce487aa25d574e18ad1c", size = 210025, upload-time = "2024-04-23T17:40:22.328Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ff/02c4bcff1025b4a788aa3933e1cd1474d79de43e0d859273b3319ef43cd3/coverage-7.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2bd7065249703cbeb6d4ce679c734bef0ee69baa7bff9724361ada04a15b7e3b", size = 210499, upload-time = "2024-04-23T17:40:25.747Z" }, + { url = "https://files.pythonhosted.org/packages/ab/b1/7820a8ef62adeebd37612af9d2369f4467a3bc2641dea1243450def5489e/coverage-7.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbfe6389c5522b99768a93d89aca52ef92310a96b99782973b9d11e80511f932", size = 238399, upload-time = "2024-04-23T17:40:27.591Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0e/23a388f3ce16c5ea01a454fef6a9039115abd40b748027d4fef18b3628a7/coverage-7.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39793731182c4be939b4be0cdecde074b833f6171313cf53481f869937129ed3", size = 236676, upload-time = "2024-04-23T17:40:30.455Z" }, + { url = "https://files.pythonhosted.org/packages/f8/81/e871b0d58ca5d6cc27d00b2f668ce09c4643ef00512341f3a592a81fb6cd/coverage-7.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85a5dbe1ba1bf38d6c63b6d2c42132d45cbee6d9f0c51b52c59aa4afba057517", size = 237467, upload-time = "2024-04-23T17:40:32.704Z" }, + { url = "https://files.pythonhosted.org/packages/95/cb/42a6d34d5840635394f1e172aaa0e7cbd9346155e5004a8ee75d8e434c6b/coverage-7.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:357754dcdfd811462a725e7501a9b4556388e8ecf66e79df6f4b988fa3d0b39a", size = 243539, upload-time = "2024-04-23T17:40:35.068Z" }, + { url = "https://files.pythonhosted.org/packages/6a/6a/18b3819919fdfd3e2062a75219b363f895f24ae5b80e72ffe5dfb1a7e9c8/coverage-7.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a81eb64feded34f40c8986869a2f764f0fe2db58c0530d3a4afbcde50f314880", size = 241725, upload-time = "2024-04-23T17:40:37.251Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/a0650978e8b8f78d269358421b7401acaf7cb89e957b2e1be5205ea5940e/coverage-7.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51431d0abbed3a868e967f8257c5faf283d41ec882f58413cf295a389bb22e58", size = 242913, upload-time = "2024-04-23T17:40:39.992Z" }, + { url = "https://files.pythonhosted.org/packages/8a/fe/95a74158fa0eda56d39783e918edc6fbb3dd3336be390557fc0a2815ecd4/coverage-7.5.0-cp310-cp310-win32.whl", hash = "sha256:f609ebcb0242d84b7adeee2b06c11a2ddaec5464d21888b2c8255f5fd6a98ae4", size = 212381, upload-time = "2024-04-23T17:40:42.632Z" }, + { url = "https://files.pythonhosted.org/packages/4c/26/b276e0c70cba5059becce2594a268a2731d5b4f2386e9a6afdf37ffa3d44/coverage-7.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:6782cd6216fab5a83216cc39f13ebe30adfac2fa72688c5a4d8d180cd52e8f6a", size = 213225, upload-time = "2024-04-23T17:40:45.175Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/964bb667ea37d64b25f04d4cfaf6232cdb7a6472e1f4a4faf0459ddcec40/coverage-7.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e768d870801f68c74c2b669fc909839660180c366501d4cc4b87efd6b0eee375", size = 210130, upload-time = "2024-04-23T17:40:47.325Z" }, + { url = "https://files.pythonhosted.org/packages/aa/56/31edd4baa132fe2b991437e0acf3e36c50418370044a89b65518e5581f4c/coverage-7.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:84921b10aeb2dd453247fd10de22907984eaf80901b578a5cf0bb1e279a587cb", size = 210617, upload-time = "2024-04-23T17:40:49.82Z" }, + { url = "https://files.pythonhosted.org/packages/26/6d/4cd14bd0221180c307fae4f8ef00dbd86a13507c25081858c620aa6fafd8/coverage-7.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710c62b6e35a9a766b99b15cdc56d5aeda0914edae8bb467e9c355f75d14ee95", size = 242048, upload-time = "2024-04-23T17:40:52.779Z" }, + { url = "https://files.pythonhosted.org/packages/84/60/7eb84255bd9947b140e0382721b0a1b25fd670b4f0f176f11f90b5632d02/coverage-7.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c379cdd3efc0658e652a14112d51a7668f6bfca7445c5a10dee7eabecabba19d", size = 239619, upload-time = "2024-04-23T17:40:54.847Z" }, + { url = "https://files.pythonhosted.org/packages/76/6b/e8f4696194fdf3c19422f2a80ac10e03a9322f93e6c9ef57a89e03a8c8f7/coverage-7.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fea9d3ca80bcf17edb2c08a4704259dadac196fe5e9274067e7a20511fad1743", size = 241321, upload-time = "2024-04-23T17:40:57.092Z" }, + { url = "https://files.pythonhosted.org/packages/3f/1c/6a6990fd2e6890807775852882b1ed0a8e50519a525252490b0c219aa8a5/coverage-7.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:41327143c5b1d715f5f98a397608f90ab9ebba606ae4e6f3389c2145410c52b1", size = 250419, upload-time = "2024-04-23T17:40:59.051Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/b6422a1422381704dd015cc23e503acd1a44a6bdc4e59c75f8c6a2b24151/coverage-7.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:565b2e82d0968c977e0b0f7cbf25fd06d78d4856289abc79694c8edcce6eb2de", size = 248794, upload-time = "2024-04-23T17:41:01.803Z" }, + { url = "https://files.pythonhosted.org/packages/9b/93/e8231000754d4a31fe9a6c550f6a436eacd2e50763ba2b418f10b2308e45/coverage-7.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cf3539007202ebfe03923128fedfdd245db5860a36810136ad95a564a2fdffff", size = 249873, upload-time = "2024-04-23T17:41:04.719Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/eb5aae80bf9d01d0f293121d4caa660ac968da2cb967f82547a7b5e8d65b/coverage-7.5.0-cp311-cp311-win32.whl", hash = "sha256:bf0b4b8d9caa8d64df838e0f8dcf68fb570c5733b726d1494b87f3da85db3a2d", size = 212380, upload-time = "2024-04-23T17:41:06.879Z" }, + { url = "https://files.pythonhosted.org/packages/30/73/b70ab57f11b62f5ca9a83f43cae752fbbb4417bea651875235c32eb2fc2e/coverage-7.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:9c6384cc90e37cfb60435bbbe0488444e54b98700f727f16f64d8bfda0b84656", size = 213316, upload-time = "2024-04-23T17:41:09.233Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/f4e17ffb5ac2d125c72ee3b235c2e04f85a4296a6a9e17730e218af113d8/coverage-7.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fed7a72d54bd52f4aeb6c6e951f363903bd7d70bc1cad64dd1f087980d309ab9", size = 210340, upload-time = "2024-04-23T17:41:11.811Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bc/d7e832280f269be9e8d46cff5c4031b4840f1844674dc53ad93c5a9c1da6/coverage-7.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cbe6581fcff7c8e262eb574244f81f5faaea539e712a058e6707a9d272fe5b64", size = 210612, upload-time = "2024-04-23T17:41:14.256Z" }, + { url = "https://files.pythonhosted.org/packages/54/84/543e2cd6c1de30c7522a0afcb040677957bac756dd8677bade8bdd9274ba/coverage-7.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad97ec0da94b378e593ef532b980c15e377df9b9608c7c6da3506953182398af", size = 242926, upload-time = "2024-04-23T17:41:16.284Z" }, + { url = "https://files.pythonhosted.org/packages/ad/06/570533f747141b4fd727a193317e16c6e677ed7945e23a195b8f64e685a2/coverage-7.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd4bacd62aa2f1a1627352fe68885d6ee694bdaebb16038b6e680f2924a9b2cc", size = 240294, upload-time = "2024-04-23T17:41:19.099Z" }, + { url = "https://files.pythonhosted.org/packages/fa/d9/ec4ba0913195d240d026670d41b91f3e5b9a8a143a385f93a09e97c90f5c/coverage-7.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:adf032b6c105881f9d77fa17d9eebe0ad1f9bfb2ad25777811f97c5362aa07f2", size = 242232, upload-time = "2024-04-23T17:41:21.05Z" }, + { url = "https://files.pythonhosted.org/packages/d9/3f/1a613c32aa1980d20d6ca2f54faf800df04aafad6016d7132b3276d8715d/coverage-7.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ba01d9ba112b55bfa4b24808ec431197bb34f09f66f7cb4fd0258ff9d3711b1", size = 249171, upload-time = "2024-04-23T17:41:23.723Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3b/e16b12693572fd69148453abc6ddcd20cbeae6f0a040b5ed6af2f75b646f/coverage-7.5.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:f0bfe42523893c188e9616d853c47685e1c575fe25f737adf473d0405dcfa7eb", size = 247073, upload-time = "2024-04-23T17:41:25.719Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3e/04a05d40bb09f90a312296a32fb2c5ade2dfcf803edf777ad18b97547503/coverage-7.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a9a7ef30a1b02547c1b23fa9a5564f03c9982fc71eb2ecb7f98c96d7a0db5cf2", size = 248812, upload-time = "2024-04-23T17:41:27.951Z" }, + { url = "https://files.pythonhosted.org/packages/ba/f7/3a8b7b0affe548227f3d45e248c0f22c5b55bff0ee062b49afc165b3ff25/coverage-7.5.0-cp312-cp312-win32.whl", hash = "sha256:3c2b77f295edb9fcdb6a250f83e6481c679335ca7e6e4a955e4290350f2d22a4", size = 212634, upload-time = "2024-04-23T17:41:30.114Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/5f5286d2a5e21e1fe5670629bb24c79bf46383a092e74e00077e7a178e5c/coverage-7.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:427e1e627b0963ac02d7c8730ca6d935df10280d230508c0ba059505e9233475", size = 213460, upload-time = "2024-04-23T17:41:32.683Z" }, + { url = "https://files.pythonhosted.org/packages/62/18/5573216d5b8db7d9f29189350dcd81830a03a624966c35f8201ae10df09c/coverage-7.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0194d654e360b3e6cc9b774e83235bae6b9b2cac3be09040880bb0e8a88f4a1", size = 210014, upload-time = "2024-04-23T17:41:56.535Z" }, + { url = "https://files.pythonhosted.org/packages/7c/0e/e98d6c6d569d65ff3195f095e6b006b3d7780fd6182322a25e7dfe0d53d3/coverage-7.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33c020d3322662e74bc507fb11488773a96894aa82a622c35a5a28673c0c26f5", size = 210494, upload-time = "2024-04-23T17:41:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/d3/63/98e5a6b7ed1bfca874729ee309cc49a6d6658ab9e479a2b6d223ccc96e03/coverage-7.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbdf2cae14a06827bec50bd58e49249452d211d9caddd8bd80e35b53cb04631", size = 237996, upload-time = "2024-04-23T17:42:01.514Z" }, + { url = "https://files.pythonhosted.org/packages/76/e4/d3c67a0a092127b8a3dffa2f75334a8cdb2cefc99e3d75a7f42cf1ff98a9/coverage-7.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3235d7c781232e525b0761730e052388a01548bd7f67d0067a253887c6e8df46", size = 236287, upload-time = "2024-04-23T17:42:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/12/7f/9b787ffc31bc39aa9e98c7005b698e7c6539bd222043e4a9c83b83c782a2/coverage-7.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2de4e546f0ec4b2787d625e0b16b78e99c3e21bc1722b4977c0dddf11ca84e", size = 237070, upload-time = "2024-04-23T17:42:06.993Z" }, + { url = "https://files.pythonhosted.org/packages/31/ee/9998a0d855cad5f8e04062f7428b83c34aa643e5df468409593a480d5585/coverage-7.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0e206259b73af35c4ec1319fd04003776e11e859936658cb6ceffdeba0f5be", size = 243115, upload-time = "2024-04-23T17:42:09.281Z" }, + { url = "https://files.pythonhosted.org/packages/16/94/1e348cd4445404c588ec8199adde0b45727b1d7989d8fb097d39c93e3da5/coverage-7.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2055c4fb9a6ff624253d432aa471a37202cd8f458c033d6d989be4499aed037b", size = 241315, upload-time = "2024-04-23T17:42:11.836Z" }, + { url = "https://files.pythonhosted.org/packages/28/17/6fe1695d2a706e586b87a407598f4ed82dd218b2b43cdc790f695f259849/coverage-7.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:075299460948cd12722a970c7eae43d25d37989da682997687b34ae6b87c0ef0", size = 242467, upload-time = "2024-04-23T17:42:14.019Z" }, + { url = "https://files.pythonhosted.org/packages/81/a2/1e550272c8b1f89b980504230b1a929de83d8f3d5ecb268477b32e5996a6/coverage-7.5.0-cp39-cp39-win32.whl", hash = "sha256:280132aada3bc2f0fac939a5771db4fbb84f245cb35b94fae4994d4c1f80dae7", size = 212394, upload-time = "2024-04-23T17:42:17.655Z" }, + { url = "https://files.pythonhosted.org/packages/c9/48/7d3c31064c5adcc743fe5370cf7e198cee06cc0e2d37b5cbe930691a3f54/coverage-7.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:c58536f6892559e030e6924896a44098bc1290663ea12532c78cef71d0df8493", size = 213246, upload-time = "2024-04-23T17:42:19.777Z" }, + { url = "https://files.pythonhosted.org/packages/34/81/f00ce7ef95479085feb01fa9e352b2b5b2b9d24767acf2266d6267a6dba9/coverage-7.5.0-pp38.pp39.pp310-none-any.whl", hash = "sha256:2b57780b51084d5223eee7b59f0d4911c31c16ee5aa12737c7a02455829ff067", size = 202381, upload-time = "2024-04-23T17:42:22.127Z" }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + +[[package]] +name = "cramjam" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/14/12/34bf6e840a79130dfd0da7badfb6f7810b8fcfd60e75b0539372667b41b6/cramjam-2.11.0.tar.gz", hash = "sha256:5c82500ed91605c2d9781380b378397012e25127e89d64f460fea6aeac4389b4", size = 99100, upload-time = "2025-07-27T21:25:07.559Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/d3/20d0402e4e983b66603117ad3dd3b864a05d7997a830206d3ff9cacef9a2/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d0859c65775e8ebf2cbc084bfd51bd0ffda10266da6f9306451123b89f8e5a63", size = 3558999, upload-time = "2025-07-27T21:21:34.105Z" }, + { url = "https://files.pythonhosted.org/packages/f5/a8/a6e2744288938ccd320a5c6f6f3653faa790f933f5edd088c6e5782a2354/cramjam-2.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:1d77b9b0aca02a3f6eeeff27fcd315ca5972616c0919ee38e522cce257bcd349", size = 1861558, upload-time = "2025-07-27T21:21:36.624Z" }, + { url = "https://files.pythonhosted.org/packages/96/29/7961e09a849eea7d8302e7baa6f829dd3ef3faf199cb25ed29b318ae799b/cramjam-2.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:66425bc25b5481359b12a6719b6e7c90ffe76d85d0691f1da7df304bfb8ce45c", size = 1699431, upload-time = "2025-07-27T21:21:38.396Z" }, + { url = "https://files.pythonhosted.org/packages/7a/60/6665e52f01a8919bf37c43dcf0e03b6dd3866f5c4e95440b357d508ee14e/cramjam-2.11.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bd748d3407ec63e049b3aea1595e218814fccab329b7fb10bb51120a30e9fb7e", size = 2025262, upload-time = "2025-07-27T21:21:40.417Z" }, + { url = "https://files.pythonhosted.org/packages/d7/80/79bd84dbeb109e2c6efb74e661b7bd4c3ba393208ebcf69e2ae9454ae80c/cramjam-2.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6d9a23a35b3a105c42a8de60fc2e80281ae6e758f05a3baea0b68eb1ddcb679", size = 1766177, upload-time = "2025-07-27T21:21:42.224Z" }, + { url = "https://files.pythonhosted.org/packages/28/ef/b43280767ebcde022ba31f1e9902137655a956ae30e920d75630fa67e36e/cramjam-2.11.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:40a75b95e05e38a2a055b2446f09994ce1139151721659315151d4ad6289bbff", size = 1854031, upload-time = "2025-07-27T21:21:43.651Z" }, + { url = "https://files.pythonhosted.org/packages/60/1c/79d522757c494dfd9e9b208b0604cc7e97b481483cc477144f5705a06ab7/cramjam-2.11.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5d042c376d2025300da37d65192d06a457918b63b31140f697f85fd8e310b29", size = 2035812, upload-time = "2025-07-27T21:21:45.473Z" }, + { url = "https://files.pythonhosted.org/packages/c8/70/3bf0670380069b3abd4c6b53f61d3148f4e08935569c08efbeaf7550e87d/cramjam-2.11.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cb148b35ab20c75b19a06c27f05732e2a321adbd86fadc93f9466dbd7b1154a7", size = 2067661, upload-time = "2025-07-27T21:21:47.901Z" }, + { url = "https://files.pythonhosted.org/packages/db/7e/4f6ca98a4b474348e965a529b359184785d1119ab7c4c9ec1280b8bea50a/cramjam-2.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee47c220f0f5179ddc923ab91fc9e282c27b29fabc60c433dfe06f08084f798", size = 1981523, upload-time = "2025-07-27T21:21:49.704Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6c/b241511c7ffd5f1da29641429bb0e19b5fbcffafde5ba1bbcbf9394ea456/cramjam-2.11.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0cf1b5a81b21ea175c976c3ab09e00494258f4b49b7995efc86060cced3f0b2e", size = 2034251, upload-time = "2025-07-27T21:21:51.252Z" }, + { url = "https://files.pythonhosted.org/packages/14/5c/4ef926c8c3c1bf6da96f9c53450ff334cdb6d0fc1efced0aea97e2090803/cramjam-2.11.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:360c00338ecf48921492455007f904be607fc7818de3d681acbcc542aae2fb36", size = 2155322, upload-time = "2025-07-27T21:21:53.348Z" }, + { url = "https://files.pythonhosted.org/packages/be/fb/eb2aef7fb2730e56c5a2c9000817ee8fb4a95c92f19cc6e441afed42ec29/cramjam-2.11.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f31fcc0d30dc3f3e94ea6b4d8e1a855071757c6abf6a7b1e284050ab7d4c299c", size = 2169094, upload-time = "2025-07-27T21:21:55.187Z" }, + { url = "https://files.pythonhosted.org/packages/3b/80/925a5c668dcee1c6f61775067185c5dc9a63c766d5393e5c60d2af4217a7/cramjam-2.11.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:033be66fdceb3d63b2c99b257a98380c4ec22c9e4dca54a2bfec3718cd24e184", size = 2159089, upload-time = "2025-07-27T21:21:57.118Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ac/b2819640eef0592a6de7ca832c0d23c69bd1620f765ce88b60dbc8da9ba2/cramjam-2.11.0-cp310-cp310-win32.whl", hash = "sha256:1c6cea67f6000b81f6bd27d14c8a6f62d00336ca7252fd03ee16f6b70eb5c0d2", size = 1605046, upload-time = "2025-07-27T21:21:58.617Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f4/06af04727b9556721049e2127656d727306d275c518e3d97f9ed4cffd0d8/cramjam-2.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:98aa4a351b047b0f7f9e971585982065028adc2c162c5c23c5d5734c5ccc1077", size = 1710647, upload-time = "2025-07-27T21:22:00.279Z" }, + { url = "https://files.pythonhosted.org/packages/d0/89/8001f6a9b6b6e9fa69bec5319789083475d6f26d52aaea209d3ebf939284/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:04cfa39118570e70e920a9b75c733299784b6d269733dbc791d9aaed6edd2615", size = 3559272, upload-time = "2025-07-27T21:22:01.988Z" }, + { url = "https://files.pythonhosted.org/packages/0b/f3/001d00070ca92e5fbe6aacc768e455568b0cde46b0eb944561a4ea132300/cramjam-2.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:66a18f68506290349a256375d7aa2f645b9f7993c10fc4cc211db214e4e61d2b", size = 1861743, upload-time = "2025-07-27T21:22:03.754Z" }, + { url = "https://files.pythonhosted.org/packages/c9/35/041a3af01bf3f6158f120070f798546d4383b962b63c35cd91dcbf193e17/cramjam-2.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:50e7d65533857736cd56f6509cf2c4866f28ad84dd15b5bdbf2f8a81e77fa28a", size = 1699631, upload-time = "2025-07-27T21:22:05.192Z" }, + { url = "https://files.pythonhosted.org/packages/17/eb/5358b238808abebd0c949c42635c3751204ca7cf82b29b984abe9f5e33c8/cramjam-2.11.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1f71989668458fc327ac15396db28d92df22f8024bb12963929798b2729d2df5", size = 2025603, upload-time = "2025-07-27T21:22:06.726Z" }, + { url = "https://files.pythonhosted.org/packages/0e/79/19dba7c03a27408d8d11b5a7a4a7908459cfd4e6f375b73264dc66517bf6/cramjam-2.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee77ac543f1e2b22af1e8be3ae589f729491b6090582340aacd77d1d757d9569", size = 1766283, upload-time = "2025-07-27T21:22:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ad/40e4b3408501d886d082db465c33971655fe82573c535428e52ab905f4d0/cramjam-2.11.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad52784120e7e4d8a0b5b0517d185b8bf7f74f5e17272857ddc8951a628d9be1", size = 1854407, upload-time = "2025-07-27T21:22:10.518Z" }, + { url = "https://files.pythonhosted.org/packages/36/6e/c1b60ceb6d7ea6ff8b0bf197520aefe23f878bf2bfb0de65f2b0c2f82cd1/cramjam-2.11.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b86f8e6d9c1b3f9a75b2af870c93ceee0f1b827cd2507387540e053b35d7459", size = 2035793, upload-time = "2025-07-27T21:22:12.504Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ad/32a8d5f4b1e3717787945ec6d71bd1c6e6bccba4b7e903fc0d9d4e4b08c3/cramjam-2.11.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:320d61938950d95da2371b46c406ec433e7955fae9f396c8e1bf148ffc187d11", size = 2067499, upload-time = "2025-07-27T21:22:14.067Z" }, + { url = "https://files.pythonhosted.org/packages/ff/cd/3b5a662736ea62ff7fa4c4a10a85e050bfdaad375cc53dc80427e8afe41c/cramjam-2.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41eafc8c1653a35a5c7e75ad48138f9f60085cc05cd99d592e5298552d944e9f", size = 1981853, upload-time = "2025-07-27T21:22:15.908Z" }, + { url = "https://files.pythonhosted.org/packages/26/8e/1dbcfaaa7a702ee82ee683ec3a81656934dd7e04a7bc4ee854033686f98a/cramjam-2.11.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03a7316c6bf763dfa34279335b27702321da44c455a64de58112968c0818ec4a", size = 2034514, upload-time = "2025-07-27T21:22:17.352Z" }, + { url = "https://files.pythonhosted.org/packages/50/62/f11709bfdce74af79a88b410dcb76dedc97612166e759136931bf63cfd7b/cramjam-2.11.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:244c2ed8bd7ccbb294a2abe7ca6498db7e89d7eb5e744691dc511a7dc82e65ca", size = 2155343, upload-time = "2025-07-27T21:22:18.854Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6d/3b98b61841a5376d9a9b8468ae58753a8e6cf22be9534a0fa5af4d8621cc/cramjam-2.11.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:405f8790bad36ce0b4bbdb964ad51507bfc7942c78447f25cb828b870a1d86a0", size = 2169367, upload-time = "2025-07-27T21:22:20.389Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/bd5db5c49dbebc8b002f1c4983101b28d2e7fc9419753db1c31ec22b03ef/cramjam-2.11.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b1b751a5411032b08fb3ac556160229ca01c6bbe4757bb3a9a40b951ebaac23", size = 2159334, upload-time = "2025-07-27T21:22:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/34/32/203c57acdb6eea727e7078b2219984e64ed4ad043c996ed56321301ba167/cramjam-2.11.0-cp311-cp311-win32.whl", hash = "sha256:5251585608778b9ac8effed544933df7ad85b4ba21ee9738b551f17798b215ac", size = 1605313, upload-time = "2025-07-27T21:22:24.126Z" }, + { url = "https://files.pythonhosted.org/packages/a9/bd/102d6deb87a8524ac11cddcd31a7612b8f20bf9b473c3c645045e3b957c7/cramjam-2.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:dca88bc8b68ce6d35dafd8c4d5d59a238a56c43fa02b74c2ce5f9dfb0d1ccb46", size = 1710991, upload-time = "2025-07-27T21:22:25.661Z" }, + { url = "https://files.pythonhosted.org/packages/0b/0d/7c84c913a5fae85b773a9dcf8874390f9d68ba0fcc6630efa7ff1541b950/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:dba5c14b8b4f73ea1e65720f5a3fe4280c1d27761238378be8274135c60bbc6e", size = 3553368, upload-time = "2025-07-27T21:22:27.162Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cc/4f6d185d8a744776f53035e72831ff8eefc2354f46ab836f4bd3c4f6c138/cramjam-2.11.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:11eb40722b3fcf3e6890fba46c711bf60f8dc26360a24876c85e52d76c33b25b", size = 1860014, upload-time = "2025-07-27T21:22:28.738Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a8/626c76263085c6d5ded0e71823b411e9522bfc93ba6cc59855a5869296e7/cramjam-2.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aeb26e2898994b6e8319f19a4d37c481512acdcc6d30e1b5ecc9d8ec57e835cb", size = 1693512, upload-time = "2025-07-27T21:22:30.999Z" }, + { url = "https://files.pythonhosted.org/packages/e9/52/0851a16a62447532e30ba95a80e638926fdea869a34b4b5b9d0a020083ba/cramjam-2.11.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f8d82081ed7d8fe52c982bd1f06e4c7631a73fe1fb6d4b3b3f2404f87dc40fe", size = 2025285, upload-time = "2025-07-27T21:22:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/98/76/122e444f59dbc216451d8e3d8282c9665dc79eaf822f5f1470066be1b695/cramjam-2.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:092a3ec26e0a679305018380e4f652eae1b6dfe3fc3b154ee76aa6b92221a17c", size = 1761327, upload-time = "2025-07-27T21:22:34.484Z" }, + { url = "https://files.pythonhosted.org/packages/a3/bc/3a0189aef1af2b29632c039c19a7a1b752bc21a4053582a5464183a0ad3d/cramjam-2.11.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:529d6d667c65fd105d10bd83d1cd3f9869f8fd6c66efac9415c1812281196a92", size = 1854075, upload-time = "2025-07-27T21:22:36.157Z" }, + { url = "https://files.pythonhosted.org/packages/2e/80/8a6343b13778ce52d94bb8d5365a30c3aa951276b1857201fe79d7e2ad25/cramjam-2.11.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:555eb9c90c450e0f76e27d9ff064e64a8b8c6478ab1a5594c91b7bc5c82fd9f0", size = 2032710, upload-time = "2025-07-27T21:22:38.17Z" }, + { url = "https://files.pythonhosted.org/packages/df/6b/cd1778a207c29eda10791e3dfa018b588001928086e179fc71254793c625/cramjam-2.11.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5edf4c9e32493035b514cf2ba0c969d81ccb31de63bd05490cc8bfe3b431674e", size = 2068353, upload-time = "2025-07-27T21:22:39.615Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f0/5c2a5cd5711032f3b191ca50cb786c17689b4a9255f9f768866e6c9f04d9/cramjam-2.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2fe41f48c4d58d923803383b0737f048918b5a0d10390de9628bb6272b107", size = 1978104, upload-time = "2025-07-27T21:22:41.106Z" }, + { url = "https://files.pythonhosted.org/packages/f9/8b/b363a5fb2c3347504fe9a64f8d0f1e276844f0e532aa7162c061cd1ffee4/cramjam-2.11.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9ca14cf1cabdb0b77d606db1bb9e9ca593b1dbd421fcaf251ec9a5431ec449f3", size = 2030779, upload-time = "2025-07-27T21:22:42.969Z" }, + { url = "https://files.pythonhosted.org/packages/78/7b/d83dad46adb6c988a74361f81ad9c5c22642be53ad88616a19baedd06243/cramjam-2.11.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:309e95bf898829476bccf4fd2c358ec00e7ff73a12f95a3cdeeba4bb1d3683d5", size = 2155297, upload-time = "2025-07-27T21:22:44.6Z" }, + { url = "https://files.pythonhosted.org/packages/1a/be/60d9be4cb33d8740a4aa94c7513f2ef3c4eba4fd13536f086facbafade71/cramjam-2.11.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:86dca35d2f15ef22922411496c220f3c9e315d5512f316fe417461971cc1648d", size = 2169255, upload-time = "2025-07-27T21:22:46.534Z" }, + { url = "https://files.pythonhosted.org/packages/11/b0/4a595f01a243aec8ad272b160b161c44351190c35d98d7787919d962e9e5/cramjam-2.11.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:193c6488bd2f514cbc0bef5c18fad61a5f9c8d059dd56edf773b3b37f0e85496", size = 2155651, upload-time = "2025-07-27T21:22:48.46Z" }, + { url = "https://files.pythonhosted.org/packages/38/47/7776659aaa677046b77f527106e53ddd47373416d8fcdb1e1a881ec5dc06/cramjam-2.11.0-cp312-cp312-win32.whl", hash = "sha256:514e2c008a8b4fa823122ca3ecab896eac41d9aa0f5fc881bd6264486c204e32", size = 1603568, upload-time = "2025-07-27T21:22:50.084Z" }, + { url = "https://files.pythonhosted.org/packages/75/b1/d53002729cfd94c5844ddfaf1233c86d29f2dbfc1b764a6562c41c044199/cramjam-2.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:53fed080476d5f6ad7505883ec5d1ec28ba36c2273db3b3e92d7224fe5e463db", size = 1709287, upload-time = "2025-07-27T21:22:51.534Z" }, + { url = "https://files.pythonhosted.org/packages/0a/8b/406c5dc0f8e82385519d8c299c40fd6a56d97eca3fcd6f5da8dad48de75b/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2c289729cc1c04e88bafa48b51082fb462b0a57dbc96494eab2be9b14dca62af", size = 3553330, upload-time = "2025-07-27T21:22:53.124Z" }, + { url = "https://files.pythonhosted.org/packages/00/ad/4186884083d6e4125b285903e17841827ab0d6d0cffc86216d27ed91e91d/cramjam-2.11.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:045201ee17147e36cf43d8ae2fa4b4836944ac672df5874579b81cf6d40f1a1f", size = 1859756, upload-time = "2025-07-27T21:22:54.821Z" }, + { url = "https://files.pythonhosted.org/packages/54/01/91b485cf76a7efef638151e8a7d35784dae2c4ff221b1aec2c083e4b106d/cramjam-2.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:619cd195d74c9e1d2a3ad78d63451d35379c84bd851aec552811e30842e1c67a", size = 1693609, upload-time = "2025-07-27T21:22:56.331Z" }, + { url = "https://files.pythonhosted.org/packages/cd/84/d0c80d279b2976870fc7d10f15dcb90a3c10c06566c6964b37c152694974/cramjam-2.11.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6eb3ae5ab72edb2ed68bdc0f5710f0a6cad7fd778a610ec2c31ee15e32d3921e", size = 2024912, upload-time = "2025-07-27T21:22:57.915Z" }, + { url = "https://files.pythonhosted.org/packages/d6/70/88f2a5cb904281ed5d3c111b8f7d5366639817a5470f059bcd26833fc870/cramjam-2.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df7da3f4b19e3078f9635f132d31b0a8196accb2576e3213ddd7a77f93317c20", size = 1760715, upload-time = "2025-07-27T21:22:59.528Z" }, + { url = "https://files.pythonhosted.org/packages/b2/06/cf5b02081132537d28964fb385fcef9ed9f8a017dd7d8c59d317e53ba50d/cramjam-2.11.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57286b289cd557ac76c24479d8ecfb6c3d5b854cce54ccc7671f9a2f5e2a2708", size = 1853782, upload-time = "2025-07-27T21:23:01.07Z" }, + { url = "https://files.pythonhosted.org/packages/57/27/63525087ed40a53d1867021b9c4858b80cc86274ffe7225deed067d88d92/cramjam-2.11.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:28952fbbf8b32c0cb7fa4be9bcccfca734bf0d0989f4b509dc7f2f70ba79ae06", size = 2032354, upload-time = "2025-07-27T21:23:03.021Z" }, + { url = "https://files.pythonhosted.org/packages/c3/ef/dbba082c6ebfb6410da4dd39a64e654d7194fcfd4567f85991a83fa4ec32/cramjam-2.11.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78ed2e4099812a438b545dfbca1928ec825e743cd253bc820372d6ef8c3adff4", size = 2068007, upload-time = "2025-07-27T21:23:04.526Z" }, + { url = "https://files.pythonhosted.org/packages/35/ce/d902b9358a46a086938feae83b2251720e030f06e46006f4c1fc0ac9da20/cramjam-2.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9aecd5c3845d415bd6c9957c93de8d93097e269137c2ecb0e5a5256374bdc8", size = 1977485, upload-time = "2025-07-27T21:23:06.058Z" }, + { url = "https://files.pythonhosted.org/packages/e8/03/982f54553244b0afcbdb2ad2065d460f0ab05a72a96896a969a1ca136a1e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:362fcf4d6f5e1242a4540812455f5a594949190f6fbc04f2ffbfd7ae0266d788", size = 2030447, upload-time = "2025-07-27T21:23:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/74/5f/748e54cdb665ec098ec519e23caacc65fc5ae58718183b071e33fc1c45b4/cramjam-2.11.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:13240b3dea41b1174456cb9426843b085dc1a2bdcecd9ee2d8f65ac5703374b0", size = 2154949, upload-time = "2025-07-27T21:23:09.366Z" }, + { url = "https://files.pythonhosted.org/packages/69/81/c4e6cb06ed69db0dc81f9a8b1dc74995ebd4351e7a1877143f7031ff2700/cramjam-2.11.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:c54eed83726269594b9086d827decc7d2015696e31b99bf9b69b12d9063584fe", size = 2168925, upload-time = "2025-07-27T21:23:10.976Z" }, + { url = "https://files.pythonhosted.org/packages/13/5b/966365523ce8290a08e163e3b489626c5adacdff2b3da9da1b0823dfb14e/cramjam-2.11.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f8195006fdd0fc0a85b19df3d64a3ef8a240e483ae1dfc7ac6a4316019eb5df2", size = 2154950, upload-time = "2025-07-27T21:23:12.514Z" }, + { url = "https://files.pythonhosted.org/packages/3a/7d/7f8eb5c534b72b32c6eb79d74585bfee44a9a5647a14040bb65c31c2572d/cramjam-2.11.0-cp313-cp313-win32.whl", hash = "sha256:ccf30e3fe6d770a803dcdf3bb863fa44ba5dc2664d4610ba2746a3c73599f2e4", size = 1603199, upload-time = "2025-07-27T21:23:14.38Z" }, + { url = "https://files.pythonhosted.org/packages/37/05/47b5e0bf7c41a3b1cdd3b7c2147f880c93226a6bef1f5d85183040cbdece/cramjam-2.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:ee36348a204f0a68b03400f4736224e9f61d1c6a1582d7f875c1ca56f0254268", size = 1708924, upload-time = "2025-07-27T21:23:16.332Z" }, + { url = "https://files.pythonhosted.org/packages/de/07/a1051cdbbe6d723df16d756b97f09da7c1adb69e29695c58f0392bc12515/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7ba5e38c9fbd06f086f4a5a64a1a5b7b417cd3f8fc07a20e5c03651f72f36100", size = 3554141, upload-time = "2025-07-27T21:23:17.938Z" }, + { url = "https://files.pythonhosted.org/packages/74/66/58487d2e16ef3d04f51a7c7f0e69823e806744b4c21101e89da4873074bc/cramjam-2.11.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:b8adeee57b41fe08e4520698a4b0bd3cc76dbd81f99424b806d70a5256a391d3", size = 1860353, upload-time = "2025-07-27T21:23:19.593Z" }, + { url = "https://files.pythonhosted.org/packages/67/b4/67f6254d166ffbcc9d5fa1b56876eaa920c32ebc8e9d3d525b27296b693b/cramjam-2.11.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b96a74fa03a636c8a7d76f700d50e9a8bc17a516d6a72d28711225d641e30968", size = 1693832, upload-time = "2025-07-27T21:23:21.185Z" }, + { url = "https://files.pythonhosted.org/packages/55/a3/4e0b31c0d454ae70c04684ed7c13d3c67b4c31790c278c1e788cb804fa4a/cramjam-2.11.0-cp314-cp314-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c3811a56fa32e00b377ef79121c0193311fd7501f0fb378f254c7f083cc1fbe0", size = 2027080, upload-time = "2025-07-27T21:23:23.303Z" }, + { url = "https://files.pythonhosted.org/packages/d9/c7/5e8eed361d1d3b8be14f38a54852c5370cc0ceb2c2d543b8ba590c34f080/cramjam-2.11.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5d927e87461f8a0d448e4ab5eb2bca9f31ca5d8ea86d70c6f470bb5bc666d7e", size = 1761543, upload-time = "2025-07-27T21:23:24.991Z" }, + { url = "https://files.pythonhosted.org/packages/09/0c/06b7f8b0ce9fde89470505116a01fc0b6cb92d406c4fb1e46f168b5d3fa5/cramjam-2.11.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f1f5c450121430fd89cb5767e0a9728ecc65997768fd4027d069cb0368af62f9", size = 1854636, upload-time = "2025-07-27T21:23:26.987Z" }, + { url = "https://files.pythonhosted.org/packages/6f/c6/6ebc02c9d5acdf4e5f2b1ec6e1252bd5feee25762246798ae823b3347457/cramjam-2.11.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:724aa7490be50235d97f07e2ca10067927c5d7f336b786ddbc868470e822aa25", size = 2032715, upload-time = "2025-07-27T21:23:28.603Z" }, + { url = "https://files.pythonhosted.org/packages/a2/77/a122971c23f5ca4b53e4322c647ac7554626c95978f92d19419315dddd05/cramjam-2.11.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:54c4637122e7cfd7aac5c1d3d4c02364f446d6923ea34cf9d0e8816d6e7a4936", size = 2069039, upload-time = "2025-07-27T21:23:30.319Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/f6121b90b86b9093c066889274d26a1de3f29969d45c2ed1ecbe2033cb78/cramjam-2.11.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17eb39b1696179fb471eea2de958fa21f40a2cd8bf6b40d428312d5541e19dc4", size = 1979566, upload-time = "2025-07-27T21:23:32.002Z" }, + { url = "https://files.pythonhosted.org/packages/e0/a3/f95bc57fd7f4166ce6da816cfa917fb7df4bb80e669eb459d85586498414/cramjam-2.11.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:36aa5a798aa34e11813a80425a30d8e052d8de4a28f27bfc0368cfc454d1b403", size = 2030905, upload-time = "2025-07-27T21:23:33.696Z" }, + { url = "https://files.pythonhosted.org/packages/fc/52/e429de4e8bc86ee65e090dae0f87f45abd271742c63fb2d03c522ffde28a/cramjam-2.11.0-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:449fca52774dc0199545fbf11f5128933e5a6833946707885cf7be8018017839", size = 2155592, upload-time = "2025-07-27T21:23:35.375Z" }, + { url = "https://files.pythonhosted.org/packages/6c/6c/65a7a0207787ad39ad804af4da7f06a60149de19481d73d270b540657234/cramjam-2.11.0-cp314-cp314-musllinux_1_1_i686.whl", hash = "sha256:d87d37b3d476f4f7623c56a232045d25bd9b988314702ea01bd9b4a94948a778", size = 2170839, upload-time = "2025-07-27T21:23:37.197Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c5/5c5db505ba692bc844246b066e23901d5905a32baf2f33719c620e65887f/cramjam-2.11.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:26cb45c47d71982d76282e303931c6dd4baee1753e5d48f9a89b3a63e690b3a3", size = 2157236, upload-time = "2025-07-27T21:23:38.854Z" }, + { url = "https://files.pythonhosted.org/packages/b0/22/88e6693e60afe98901e5bbe91b8dea193e3aa7f42e2770f9c3339f5c1065/cramjam-2.11.0-cp314-cp314-win32.whl", hash = "sha256:4efe919d443c2fd112fe25fe636a52f9628250c9a50d9bddb0488d8a6c09acc6", size = 1604136, upload-time = "2025-07-27T21:23:40.56Z" }, + { url = "https://files.pythonhosted.org/packages/cc/f8/01618801cd59ccedcc99f0f96d20be67d8cfc3497da9ccaaad6b481781dd/cramjam-2.11.0-cp314-cp314-win_amd64.whl", hash = "sha256:ccec3524ea41b9abd5600e3e27001fd774199dbb4f7b9cb248fcee37d4bda84c", size = 1710272, upload-time = "2025-07-27T21:23:42.236Z" }, + { url = "https://files.pythonhosted.org/packages/40/81/6cdb3ed222d13ae86bda77aafe8d50566e81a1169d49ed195b6263610704/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:966ac9358b23d21ecd895c418c048e806fd254e46d09b1ff0cdad2eba195ea3e", size = 3559671, upload-time = "2025-07-27T21:23:44.504Z" }, + { url = "https://files.pythonhosted.org/packages/cb/43/52b7e54fe5ba1ef0270d9fdc43dabd7971f70ea2d7179be918c997820247/cramjam-2.11.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:387f09d647a0d38dcb4539f8a14281f8eb6bb1d3e023471eb18a5974b2121c86", size = 1867876, upload-time = "2025-07-27T21:23:46.987Z" }, + { url = "https://files.pythonhosted.org/packages/9d/28/30d5b8d10acd30db3193bc562a313bff722888eaa45cfe32aa09389f2b24/cramjam-2.11.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:665b0d8fbbb1a7f300265b43926457ec78385200133e41fef19d85790fc1e800", size = 1695562, upload-time = "2025-07-27T21:23:48.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/86/ec806f986e01b896a650655024ea52a13e25c3ac8a3a382f493089483cdc/cramjam-2.11.0-cp314-cp314t-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ca905387c7a371531b9622d93471be4d745ef715f2890c3702479cd4fc85aa51", size = 2025056, upload-time = "2025-07-27T21:23:50.404Z" }, + { url = "https://files.pythonhosted.org/packages/09/43/c2c17586b90848d29d63181f7d14b8bd3a7d00975ad46e3edf2af8af7e1f/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1aa56aef2c8af55a21ed39040a94a12b53fb23beea290f94d19a76027e2ffb", size = 1764084, upload-time = "2025-07-27T21:23:52.265Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a9/68bc334fadb434a61df10071dc8606702aa4f5b6cdb2df62474fc21d2845/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5db59c1cdfaa2ab85cc988e602d6919495f735ca8a5fd7603608eb1e23c26d5", size = 1854859, upload-time = "2025-07-27T21:23:54.085Z" }, + { url = "https://files.pythonhosted.org/packages/5b/4e/b48e67835b5811ec5e9cb2e2bcba9c3fd76dab3e732569fe801b542c6ca9/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1f893014f00fe5e89a660a032e813bf9f6d91de74cd1490cdb13b2b59d0c9a3", size = 2035970, upload-time = "2025-07-27T21:23:55.758Z" }, + { url = "https://files.pythonhosted.org/packages/c4/70/d2ac33d572b4d90f7f0f2c8a1d60fb48f06b128fdc2c05f9b49891bb0279/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c26a1eb487947010f5de24943bd7c422dad955b2b0f8650762539778c380ca89", size = 2069320, upload-time = "2025-07-27T21:23:57.494Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4c/85cec77af4a74308ba5fca8e296c4e2f80ec465c537afc7ab1e0ca2f9a00/cramjam-2.11.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d5c8bfb438d94e7b892d1426da5fc4b4a5370cc360df9b8d9d77c33b896c37e", size = 1982668, upload-time = "2025-07-27T21:23:59.126Z" }, + { url = "https://files.pythonhosted.org/packages/55/45/938546d1629e008cc3138df7c424ef892719b1796ff408a2ab8550032e5e/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:cb1fb8c9337ab0da25a01c05d69a0463209c347f16512ac43be5986f3d1ebaf4", size = 2034028, upload-time = "2025-07-27T21:24:00.865Z" }, + { url = "https://files.pythonhosted.org/packages/01/76/b5a53e20505555f1640e66dcf70394bcf51a1a3a072aa18ea35135a0f9ed/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:1f6449f6de52dde3e2f1038284910c8765a397a25e2d05083870f3f5e7fc682c", size = 2155513, upload-time = "2025-07-27T21:24:02.92Z" }, + { url = "https://files.pythonhosted.org/packages/84/12/8d3f6ceefae81bbe45a347fdfa2219d9f3ac75ebc304f92cd5fcb4fbddc5/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_i686.whl", hash = "sha256:382dec4f996be48ed9c6958d4e30c2b89435d7c2c4dbf32480b3b8886293dd65", size = 2170035, upload-time = "2025-07-27T21:24:04.558Z" }, + { url = "https://files.pythonhosted.org/packages/4b/85/3be6f0a1398f976070672be64f61895f8839857618a2d8cc0d3ab529d3dc/cramjam-2.11.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:d388bd5723732c3afe1dd1d181e4213cc4e1be210b080572e7d5749f6e955656", size = 2160229, upload-time = "2025-07-27T21:24:06.729Z" }, + { url = "https://files.pythonhosted.org/packages/57/5e/66cfc3635511b20014bbb3f2ecf0095efb3049e9e96a4a9e478e4f3d7b78/cramjam-2.11.0-cp314-cp314t-win32.whl", hash = "sha256:0a70ff17f8e1d13f322df616505550f0f4c39eda62290acb56f069d4857037c8", size = 1610267, upload-time = "2025-07-27T21:24:08.428Z" }, + { url = "https://files.pythonhosted.org/packages/ce/c6/c71e82e041c95ffe6a92ac707785500aa2a515a4339c2c7dd67e3c449249/cramjam-2.11.0-cp314-cp314t-win_amd64.whl", hash = "sha256:028400d699442d40dbda02f74158c73d05cb76587a12490d0bfedd958fd49188", size = 1713108, upload-time = "2025-07-27T21:24:10.147Z" }, + { url = "https://files.pythonhosted.org/packages/8c/33/3d7a7fbfb313614d59ae2e512b9dacfc22efb07c20e4af7deb73d3409f7b/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:2581e82dca742b55d8b1d7f33892394c06b057a74f2853ffcb0802dcddcbf694", size = 3559843, upload-time = "2025-07-27T21:24:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b0/ccf09697df7fcc750c4913dc4bf3fb91e5b778dda65fb9fa55dde61c03dc/cramjam-2.11.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a9994a42cd12f07ece04eff94dbf6e127b3986f7af9b26db1eb4545c477a6604", size = 1862081, upload-time = "2025-07-27T21:24:13.8Z" }, + { url = "https://files.pythonhosted.org/packages/41/55/d36255f1a9004a3352469143d2b8a5b769e0eb4e484a8192da41ad67e893/cramjam-2.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a4963dac24213690183110d6b41125fdc4af871a5a213589d6c6606d49e1b949", size = 1699970, upload-time = "2025-07-27T21:24:15.547Z" }, + { url = "https://files.pythonhosted.org/packages/35/52/722a2efbe104903648185411f9c634e5678035476bc556001d6ef811e191/cramjam-2.11.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9af16f0b07d851b968c54e52d19430d820bb47c26d10a09cfb5c7127de26773", size = 2025715, upload-time = "2025-07-27T21:24:17.327Z" }, + { url = "https://files.pythonhosted.org/packages/0a/60/75084f30277d5f2481d20a544654894a32528f98f4415c1bd467823ab5b2/cramjam-2.11.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e2400c09ba620e2ca91a903dbe907d75f6a1994d8337e9f3026778daa92b08d", size = 1766999, upload-time = "2025-07-27T21:24:19.163Z" }, + { url = "https://files.pythonhosted.org/packages/89/5c/2663bdfcea6ab06fcac97883b5b574a12236c5d9f70691cc05dd49cb10fb/cramjam-2.11.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b820004db8b22715cee2ef154d4b47b3d76c4677ff217c587dd46f694a3052f9", size = 1854352, upload-time = "2025-07-27T21:24:20.953Z" }, + { url = "https://files.pythonhosted.org/packages/b4/df/1db5b57ccf77e923687b2061766e69c2cbdaf41641204207dbf55ef7ebe9/cramjam-2.11.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:261e9200942189d8201a005ffa1e29339479364b5b0013ab0758b03229d9ac67", size = 2036219, upload-time = "2025-07-27T21:24:23.029Z" }, + { url = "https://files.pythonhosted.org/packages/f7/28/fa3b017668a3264068c893e57a6b923dfd8fa851a1c821c4cc1c95cd47a6/cramjam-2.11.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24c61f1fad56ca68aee53bf67b6a84cd762a2c71ee4b71064378547c2411ae6", size = 2077245, upload-time = "2025-07-27T21:24:25.127Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1d/6f6018ee81acec6c4ef6cda6bd0770959992caf2f1c41e7944a135a53eca/cramjam-2.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab86d22f69a21961f35d1a1b02278b5bb9a95c5f5b4722c6904bca343c8d219f", size = 1982235, upload-time = "2025-07-27T21:24:26.851Z" }, + { url = "https://files.pythonhosted.org/packages/31/b4/c38f6077d8ec7c9208d23d4f7f19a618f5b4940170c9deba5d3bdc722eb6/cramjam-2.11.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a88bc9b191422cd5b22a1521b28607008590628b6b2a8a7db5c54ec04dc82fa1", size = 2034629, upload-time = "2025-07-27T21:24:28.694Z" }, + { url = "https://files.pythonhosted.org/packages/66/3b/3f46a349b1a7a67e2bda10e99403e9163c87c95e34399cc69f4f86a2461a/cramjam-2.11.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:7855bc4df5ed5f7fb1c98ea3fd98292e9acd3c097b1b21d596a69e1e60455400", size = 2155552, upload-time = "2025-07-27T21:24:30.572Z" }, + { url = "https://files.pythonhosted.org/packages/ed/86/b431a51162d4c8f33b28bdcca047382e1038757d43625e65c8d29ed6c31f/cramjam-2.11.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:19eb43e21db9dc42613599703c1a8e40b0170514a313f11f4c8be380425a1019", size = 2169651, upload-time = "2025-07-27T21:24:32.331Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d5/9aa69784da58b6bd3f5abcaad2eb76ad2a89efde7929821bad17355fd8da/cramjam-2.11.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cec977d673ad596bae6bdfc0091ee386cef05b515b23f2ce52f9fadd0156186a", size = 2159740, upload-time = "2025-07-27T21:24:34.108Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e1/75706936eb81605a939e15b8b7a1241b35e805ce76a64838b4586c440f61/cramjam-2.11.0-cp39-cp39-win32.whl", hash = "sha256:dcc3b15b97f3054964b47e2a5fcfb4f5ff569e9af0a7af19f1d4c5f4231bbf3b", size = 1605449, upload-time = "2025-07-27T21:24:36.538Z" }, + { url = "https://files.pythonhosted.org/packages/37/6b/ae7626994c7285bfc0ffa0d9929c3c16f2d0aea5b9e151dad82fd0616762/cramjam-2.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:5eb0603d8f8019451fc00e1daf4022dfc9df59c16d2e68f925c77ac94555493b", size = 1710860, upload-time = "2025-07-27T21:24:38.243Z" }, + { url = "https://files.pythonhosted.org/packages/bf/8f/82e35ec3c5387f1864f46b3c24bce89a07af8bb3ef242ae47281db2c1848/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:37bed927abc4a7ae2d2669baa3675e21904d8a038ed8e4313326ea7b3be62b2b", size = 3573104, upload-time = "2025-07-27T21:24:40.069Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4e/0c821918080a32ba1e52c040e12dd02dada67728f07305c5f778b808a807/cramjam-2.11.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:50e4a58635fa8c6897d84847d6e065eb69f92811670fc5e9f2d9e3b6279a02b6", size = 1873441, upload-time = "2025-07-27T21:24:42.333Z" }, + { url = "https://files.pythonhosted.org/packages/a8/fd/848d077bf6abc4ce84273d8e3f3a70d61a2240519a339462f699d8acf829/cramjam-2.11.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3d1ba626dd5f81f7f09bbf59f70b534e2b75e0d6582b056b7bd31b397f1c13e9", size = 1702589, upload-time = "2025-07-27T21:24:44.305Z" }, + { url = "https://files.pythonhosted.org/packages/9d/1c/899818999bbdb59c601756b413e87d37fd65875d1315346c10e367bb3505/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c71e140d5eb3145d61d59d0be0bf72f07cc4cf4b32cb136b09f712a3b1040f5f", size = 1773646, upload-time = "2025-07-27T21:24:46.495Z" }, + { url = "https://files.pythonhosted.org/packages/5f/26/c2813c5422c43b3dcd8b6645bc359f08870737c44325ee4accc18f24eee0/cramjam-2.11.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a6ed7926a5cca28edebad7d0fedd2ad492710ae3524d25fc59a2b20546d9ce1", size = 1994179, upload-time = "2025-07-27T21:24:49.131Z" }, + { url = "https://files.pythonhosted.org/packages/2e/4f/af984f8d7f963f0301812cdd620ddcfd8276461ed7a786c0f89e82b14739/cramjam-2.11.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5eb4ed3cea945b164b0513fd491884993acac2153a27b93a84019c522e8eda82", size = 1714790, upload-time = "2025-07-27T21:24:51.045Z" }, + { url = "https://files.pythonhosted.org/packages/81/da/b3301962ccd6fce9fefa1ecd8ea479edaeaa38fadb1f34d5391d2587216a/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:52d5db3369f95b27b9f3c14d067acb0b183333613363ed34268c9e04560f997f", size = 3573546, upload-time = "2025-07-27T21:24:52.944Z" }, + { url = "https://files.pythonhosted.org/packages/b6/c2/410ddb8ad4b9dfb129284666293cb6559479645da560f7077dc19d6bee9e/cramjam-2.11.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:4820516366d455b549a44d0e2210ee7c4575882dda677564ce79092588321d54", size = 1873654, upload-time = "2025-07-27T21:24:54.958Z" }, + { url = "https://files.pythonhosted.org/packages/d5/99/f68a443c64f7ce7aff5bed369b0aa5b2fac668fa3dfd441837e316e97a1f/cramjam-2.11.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d9e5db525dc0a950a825202f84ee68d89a072479e07da98795a3469df942d301", size = 1702846, upload-time = "2025-07-27T21:24:57.124Z" }, + { url = "https://files.pythonhosted.org/packages/6c/02/0ff358ab773def1ee3383587906c453d289953171e9c92db84fdd01bf172/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62ab4971199b2270005359cdc379bc5736071dc7c9a228581c5122d9ffaac50c", size = 1773683, upload-time = "2025-07-27T21:24:59.28Z" }, + { url = "https://files.pythonhosted.org/packages/e9/31/3298e15f87c9cf2aabdbdd90b153d8644cf989cb42a45d68a1b71e1f7aaf/cramjam-2.11.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24758375cc5414d3035ca967ebb800e8f24604ececcba3c67d6f0218201ebf2d", size = 1994136, upload-time = "2025-07-27T21:25:01.565Z" }, + { url = "https://files.pythonhosted.org/packages/c7/90/20d1747255f1ee69a412e319da51ea594c18cca195e7a4d4c713f045eff5/cramjam-2.11.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6c2eea545fef1065c7dd4eda991666fd9c783fbc1d226592ccca8d8891c02f23", size = 1714982, upload-time = "2025-07-27T21:25:05.79Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/62/e3664e6ffd7743e1694b244dde70b43a394f6f7fbcacf7014a8ff5197c73/cryptography-46.0.1.tar.gz", hash = "sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7", size = 749198, upload-time = "2025-09-17T00:10:35.797Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/8c/44ee01267ec01e26e43ebfdae3f120ec2312aa72fa4c0507ebe41a26739f/cryptography-46.0.1-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475", size = 7285044, upload-time = "2025-09-17T00:08:36.807Z" }, + { url = "https://files.pythonhosted.org/packages/22/59/9ae689a25047e0601adfcb159ec4f83c0b4149fdb5c3030cc94cd218141d/cryptography-46.0.1-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080", size = 4308182, upload-time = "2025-09-17T00:08:39.388Z" }, + { url = "https://files.pythonhosted.org/packages/c4/ee/ca6cc9df7118f2fcd142c76b1da0f14340d77518c05b1ebfbbabca6b9e7d/cryptography-46.0.1-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e", size = 4572393, upload-time = "2025-09-17T00:08:41.663Z" }, + { url = "https://files.pythonhosted.org/packages/7f/a3/0f5296f63815d8e985922b05c31f77ce44787b3127a67c0b7f70f115c45f/cryptography-46.0.1-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6", size = 4308400, upload-time = "2025-09-17T00:08:43.559Z" }, + { url = "https://files.pythonhosted.org/packages/5d/8c/74fcda3e4e01be1d32775d5b4dd841acaac3c1b8fa4d0774c7ac8d52463d/cryptography-46.0.1-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8", size = 4015786, upload-time = "2025-09-17T00:08:45.758Z" }, + { url = "https://files.pythonhosted.org/packages/dc/b8/85d23287baeef273b0834481a3dd55bbed3a53587e3b8d9f0898235b8f91/cryptography-46.0.1-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28", size = 4982606, upload-time = "2025-09-17T00:08:47.602Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d3/de61ad5b52433b389afca0bc70f02a7a1f074651221f599ce368da0fe437/cryptography-46.0.1-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9", size = 4604234, upload-time = "2025-09-17T00:08:49.879Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1f/dbd4d6570d84748439237a7478d124ee0134bf166ad129267b7ed8ea6d22/cryptography-46.0.1-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736", size = 4307669, upload-time = "2025-09-17T00:08:52.321Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fd/ca0a14ce7f0bfe92fa727aacaf2217eb25eb7e4ed513b14d8e03b26e63ed/cryptography-46.0.1-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b", size = 4947579, upload-time = "2025-09-17T00:08:54.697Z" }, + { url = "https://files.pythonhosted.org/packages/89/6b/09c30543bb93401f6f88fce556b3bdbb21e55ae14912c04b7bf355f5f96c/cryptography-46.0.1-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab", size = 4603669, upload-time = "2025-09-17T00:08:57.16Z" }, + { url = "https://files.pythonhosted.org/packages/23/9a/38cb01cb09ce0adceda9fc627c9cf98eb890fc8d50cacbe79b011df20f8a/cryptography-46.0.1-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75", size = 4435828, upload-time = "2025-09-17T00:08:59.606Z" }, + { url = "https://files.pythonhosted.org/packages/0f/53/435b5c36a78d06ae0bef96d666209b0ecd8f8181bfe4dda46536705df59e/cryptography-46.0.1-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5", size = 4709553, upload-time = "2025-09-17T00:09:01.832Z" }, + { url = "https://files.pythonhosted.org/packages/f5/c4/0da6e55595d9b9cd3b6eb5dc22f3a07ded7f116a3ea72629cab595abb804/cryptography-46.0.1-cp311-abi3-win32.whl", hash = "sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0", size = 3058327, upload-time = "2025-09-17T00:09:03.726Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/cd29a35e0d6e78a0ee61793564c8cff0929c38391cb0de27627bdc7525aa/cryptography-46.0.1-cp311-abi3-win_amd64.whl", hash = "sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7", size = 3523893, upload-time = "2025-09-17T00:09:06.272Z" }, + { url = "https://files.pythonhosted.org/packages/f2/dd/eea390f3e78432bc3d2f53952375f8b37cb4d37783e626faa6a51e751719/cryptography-46.0.1-cp311-abi3-win_arm64.whl", hash = "sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0", size = 2932145, upload-time = "2025-09-17T00:09:08.568Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fb/c73588561afcd5e24b089952bd210b14676c0c5bf1213376350ae111945c/cryptography-46.0.1-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee", size = 7193928, upload-time = "2025-09-17T00:09:10.595Z" }, + { url = "https://files.pythonhosted.org/packages/26/34/0ff0bb2d2c79f25a2a63109f3b76b9108a906dd2a2eb5c1d460b9938adbb/cryptography-46.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd", size = 4293515, upload-time = "2025-09-17T00:09:12.861Z" }, + { url = "https://files.pythonhosted.org/packages/df/b7/d4f848aee24ecd1be01db6c42c4a270069a4f02a105d9c57e143daf6cf0f/cryptography-46.0.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a", size = 4545619, upload-time = "2025-09-17T00:09:15.397Z" }, + { url = "https://files.pythonhosted.org/packages/44/a5/42fedefc754fd1901e2d95a69815ea4ec8a9eed31f4c4361fcab80288661/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a", size = 4299160, upload-time = "2025-09-17T00:09:17.155Z" }, + { url = "https://files.pythonhosted.org/packages/86/a1/cd21174f56e769c831fbbd6399a1b7519b0ff6280acec1b826d7b072640c/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a", size = 3994491, upload-time = "2025-09-17T00:09:18.971Z" }, + { url = "https://files.pythonhosted.org/packages/8d/2f/a8cbfa1c029987ddc746fd966711d4fa71efc891d37fbe9f030fe5ab4eec/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12", size = 4960157, upload-time = "2025-09-17T00:09:20.923Z" }, + { url = "https://files.pythonhosted.org/packages/67/ae/63a84e6789e0d5a2502edf06b552bcb0fa9ff16147265d5c44a211942abe/cryptography-46.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129", size = 4577263, upload-time = "2025-09-17T00:09:23.356Z" }, + { url = "https://files.pythonhosted.org/packages/ef/8f/1b9fa8e92bd9cbcb3b7e1e593a5232f2c1e6f9bd72b919c1a6b37d315f92/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da", size = 4298703, upload-time = "2025-09-17T00:09:25.566Z" }, + { url = "https://files.pythonhosted.org/packages/c3/af/bb95db070e73fea3fae31d8a69ac1463d89d1c084220f549b00dd01094a8/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b", size = 4926363, upload-time = "2025-09-17T00:09:27.451Z" }, + { url = "https://files.pythonhosted.org/packages/f5/3b/d8fb17ffeb3a83157a1cc0aa5c60691d062aceecba09c2e5e77ebfc1870c/cryptography-46.0.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657", size = 4576958, upload-time = "2025-09-17T00:09:29.924Z" }, + { url = "https://files.pythonhosted.org/packages/d9/46/86bc3a05c10c8aa88c8ae7e953a8b4e407c57823ed201dbcba55c4d655f4/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0", size = 4422507, upload-time = "2025-09-17T00:09:32.222Z" }, + { url = "https://files.pythonhosted.org/packages/a8/4e/387e5a21dfd2b4198e74968a541cfd6128f66f8ec94ed971776e15091ac3/cryptography-46.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0", size = 4683964, upload-time = "2025-09-17T00:09:34.118Z" }, + { url = "https://files.pythonhosted.org/packages/25/a3/f9f5907b166adb8f26762071474b38bbfcf89858a5282f032899075a38a1/cryptography-46.0.1-cp314-cp314t-win32.whl", hash = "sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277", size = 3029705, upload-time = "2025-09-17T00:09:36.381Z" }, + { url = "https://files.pythonhosted.org/packages/12/66/4d3a4f1850db2e71c2b1628d14b70b5e4c1684a1bd462f7fffb93c041c38/cryptography-46.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05", size = 3502175, upload-time = "2025-09-17T00:09:38.261Z" }, + { url = "https://files.pythonhosted.org/packages/52/c7/9f10ad91435ef7d0d99a0b93c4360bea3df18050ff5b9038c489c31ac2f5/cryptography-46.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784", size = 2912354, upload-time = "2025-09-17T00:09:40.078Z" }, + { url = "https://files.pythonhosted.org/packages/98/e5/fbd632385542a3311915976f88e0dfcf09e62a3fc0aff86fb6762162a24d/cryptography-46.0.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b", size = 7255677, upload-time = "2025-09-17T00:09:42.407Z" }, + { url = "https://files.pythonhosted.org/packages/56/3e/13ce6eab9ad6eba1b15a7bd476f005a4c1b3f299f4c2f32b22408b0edccf/cryptography-46.0.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8", size = 4301110, upload-time = "2025-09-17T00:09:45.614Z" }, + { url = "https://files.pythonhosted.org/packages/a2/67/65dc233c1ddd688073cf7b136b06ff4b84bf517ba5529607c9d79720fc67/cryptography-46.0.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead", size = 4562369, upload-time = "2025-09-17T00:09:47.601Z" }, + { url = "https://files.pythonhosted.org/packages/17/db/d64ae4c6f4e98c3dac5bf35dd4d103f4c7c345703e43560113e5e8e31b2b/cryptography-46.0.1-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2", size = 4302126, upload-time = "2025-09-17T00:09:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/3d/19/5f1eea17d4805ebdc2e685b7b02800c4f63f3dd46cfa8d4c18373fea46c8/cryptography-46.0.1-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32", size = 4009431, upload-time = "2025-09-17T00:09:51.239Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/229ba6088fe7abccbfe4c5edb96c7a5ad547fac5fdd0d40aa6ea540b2985/cryptography-46.0.1-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef", size = 4980739, upload-time = "2025-09-17T00:09:54.181Z" }, + { url = "https://files.pythonhosted.org/packages/3a/9c/50aa38907b201e74bc43c572f9603fa82b58e831bd13c245613a23cff736/cryptography-46.0.1-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0", size = 4592289, upload-time = "2025-09-17T00:09:56.731Z" }, + { url = "https://files.pythonhosted.org/packages/5a/33/229858f8a5bb22f82468bb285e9f4c44a31978d5f5830bb4ea1cf8a4e454/cryptography-46.0.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128", size = 4301815, upload-time = "2025-09-17T00:09:58.548Z" }, + { url = "https://files.pythonhosted.org/packages/52/cb/b76b2c87fbd6ed4a231884bea3ce073406ba8e2dae9defad910d33cbf408/cryptography-46.0.1-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca", size = 4943251, upload-time = "2025-09-17T00:10:00.475Z" }, + { url = "https://files.pythonhosted.org/packages/94/0f/f66125ecf88e4cb5b8017ff43f3a87ede2d064cb54a1c5893f9da9d65093/cryptography-46.0.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc", size = 4591247, upload-time = "2025-09-17T00:10:02.874Z" }, + { url = "https://files.pythonhosted.org/packages/f6/22/9f3134ae436b63b463cfdf0ff506a0570da6873adb4bf8c19b8a5b4bac64/cryptography-46.0.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7", size = 4428534, upload-time = "2025-09-17T00:10:04.994Z" }, + { url = "https://files.pythonhosted.org/packages/89/39/e6042bcb2638650b0005c752c38ea830cbfbcbb1830e4d64d530000aa8dc/cryptography-46.0.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a", size = 4699541, upload-time = "2025-09-17T00:10:06.925Z" }, + { url = "https://files.pythonhosted.org/packages/68/46/753d457492d15458c7b5a653fc9a84a1c9c7a83af6ebdc94c3fc373ca6e8/cryptography-46.0.1-cp38-abi3-win32.whl", hash = "sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1", size = 3043779, upload-time = "2025-09-17T00:10:08.951Z" }, + { url = "https://files.pythonhosted.org/packages/2f/50/b6f3b540c2f6ee712feeb5fa780bb11fad76634e71334718568e7695cb55/cryptography-46.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3", size = 3517226, upload-time = "2025-09-17T00:10:10.769Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e8/77d17d00981cdd27cc493e81e1749a0b8bbfb843780dbd841e30d7f50743/cryptography-46.0.1-cp38-abi3-win_arm64.whl", hash = "sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9", size = 2923149, upload-time = "2025-09-17T00:10:13.236Z" }, + { url = "https://files.pythonhosted.org/packages/14/b9/b260180b31a66859648cfed5c980544ee22b15f8bd20ef82a23f58c0b83e/cryptography-46.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d", size = 3714683, upload-time = "2025-09-17T00:10:15.601Z" }, + { url = "https://files.pythonhosted.org/packages/c5/5a/1cd3ef86e5884edcbf8b27c3aa8f9544e9b9fcce5d3ed8b86959741f4f8e/cryptography-46.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5", size = 3443784, upload-time = "2025-09-17T00:10:18.014Z" }, + { url = "https://files.pythonhosted.org/packages/27/27/077e09fd92075dd1338ea0ffaf5cfee641535545925768350ad90d8c36ca/cryptography-46.0.1-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70", size = 3722319, upload-time = "2025-09-17T00:10:20.273Z" }, + { url = "https://files.pythonhosted.org/packages/db/32/6fc7250280920418651640d76cee34d91c1e0601d73acd44364570cf041f/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f", size = 4249030, upload-time = "2025-09-17T00:10:22.396Z" }, + { url = "https://files.pythonhosted.org/packages/32/33/8d5398b2da15a15110b2478480ab512609f95b45ead3a105c9a9c76f9980/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc", size = 4528009, upload-time = "2025-09-17T00:10:24.418Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1c/4012edad2a8977ab386c36b6e21f5065974d37afa3eade83a9968cba4855/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d", size = 4248902, upload-time = "2025-09-17T00:10:26.255Z" }, + { url = "https://files.pythonhosted.org/packages/58/a3/257cd5ae677302de8fa066fca9de37128f6729d1e63c04dd6a15555dd450/cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46", size = 4527150, upload-time = "2025-09-17T00:10:28.28Z" }, + { url = "https://files.pythonhosted.org/packages/6a/cd/fe6b65e1117ec7631f6be8951d3db076bac3e1b096e3e12710ed071ffc3c/cryptography-46.0.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a", size = 3448210, upload-time = "2025-09-17T00:10:30.145Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "dnspython" +version = "2.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/4a/263763cb2ba3816dd94b08ad3a33d5fdae34ecb856678773cc40a3605829/dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1", size = 345197, upload-time = "2024-10-05T20:14:59.362Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/1b/e0a87d256e40e8c888847551b20a017a6b98139178505dc7ffb96f04e954/dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86", size = 313632, upload-time = "2024-10-05T20:14:57.687Z" }, +] + +[[package]] +name = "docutils" +version = "0.21.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444, upload-time = "2024-04-23T18:57:18.24Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408, upload-time = "2024-04-23T18:57:14.835Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" }, +] + +[[package]] +name = "furo" +version = "2025.9.25" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "accessible-pygments" }, + { name = "beautifulsoup4" }, + { name = "pygments" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinx-basic-ng" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/29/ff3b83a1ffce74676043ab3e7540d398e0b1ce7660917a00d7c4958b93da/furo-2025.9.25.tar.gz", hash = "sha256:3eac05582768fdbbc2bdfa1cdbcdd5d33cfc8b4bd2051729ff4e026a1d7e0a98", size = 1662007, upload-time = "2025-09-25T21:37:19.221Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/69/964b55f389c289e16ba2a5dfe587c3c462aac09e24123f09ddf703889584/furo-2025.9.25-py3-none-any.whl", hash = "sha256:2937f68e823b8e37b410c972c371bc2b1d88026709534927158e0cb3fac95afe", size = 340409, upload-time = "2025-09-25T21:37:17.244Z" }, +] + +[[package]] +name = "gevent" +version = "25.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'CPython' and sys_platform == 'win32'" }, + { name = "greenlet", marker = "platform_python_implementation == 'CPython'" }, + { name = "zope-event" }, + { name = "zope-interface" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/58/267e8160aea00ab00acd2de97197eecfe307064a376fb5c892870a8a6159/gevent-25.5.1.tar.gz", hash = "sha256:582c948fa9a23188b890d0bc130734a506d039a2e5ad87dae276a456cc683e61", size = 6388207, upload-time = "2025-05-12T12:57:59.833Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/a7/438568c37fb255f80e710318bfcad04731b92ce764bc16adee278fdc6b4d/gevent-25.5.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8e5a0fab5e245b15ec1005b3666b0a2e867c26f411c8fe66ae1afe07174a30e9", size = 2922800, upload-time = "2025-05-12T11:11:46.728Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b3/b44d8b1c4a4d01097a7f82ffbc582d054007365c27b28867f0b2d4241d73/gevent-25.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7b80a37f2fb45ee4a8f7e64b77dd8a842d364384046e394227b974a4e9c9a52", size = 1812954, upload-time = "2025-05-12T11:52:27.059Z" }, + { url = "https://files.pythonhosted.org/packages/1e/c6/935b4c973ad827c9ec49c354d68d047da1d23e3018bda63d3723cce43178/gevent-25.5.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ab729d50ae85077a68e0385f129f5b01052d01a0ae6d7fdc1824f5337905e4", size = 1900169, upload-time = "2025-05-12T11:54:17.797Z" }, + { url = "https://files.pythonhosted.org/packages/38/8a/b745bddfec35fb723cafb036f191e5e0a0013f1698bf0ba4fa2cb8e01879/gevent-25.5.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80d20592aeabcc4e294fd441fd43d45cb537437fd642c374ea9d964622fad229", size = 1849786, upload-time = "2025-05-12T12:00:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/7c/b3/7aa7b09d91207bebe7608699558bbadd34f63e32904351867c29f8be25de/gevent-25.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8ba0257542ccbb72a8229dc34d00844ccdfba110417e4b7b34599548d0e20e9", size = 2139021, upload-time = "2025-05-12T11:32:58.961Z" }, + { url = "https://files.pythonhosted.org/packages/74/da/cf52ae0c84361f4164a04f3338508b1234331ce79719db103e50dbc5598c/gevent-25.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cad0821dff998c7c60dd238f92cd61380342c47fb9e92e1a8705d9b5ac7c16e8", size = 1830758, upload-time = "2025-05-12T11:59:55.666Z" }, + { url = "https://files.pythonhosted.org/packages/93/93/73a49b896d78eec27f0895ce3008f9825db748a5aacbca47404d1014da4b/gevent-25.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:017a7384c0cd1a5907751c991535a0699596e89725468a7fc39228312e10efa1", size = 2199993, upload-time = "2025-05-12T11:40:50.845Z" }, + { url = "https://files.pythonhosted.org/packages/df/c7/34680b7d2a75492fa032fa8ecaacc03c1940767a35125f6740954a0132a3/gevent-25.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:469c86d02fccad7e2a3d82fe22237e47ecb376fbf4710bc18747b49c50716817", size = 1652665, upload-time = "2025-05-12T12:35:58.105Z" }, + { url = "https://files.pythonhosted.org/packages/c6/eb/015e93f16a718e2f836ecebecae9bcd7b4d2a5695d1c8bd5bba2d5d91548/gevent-25.5.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:12380aba5c316e9ff53cc21d8ab80f4a91c0df3ada58f65d4f5eb2cf693db00e", size = 2877441, upload-time = "2025-05-12T11:14:57.735Z" }, + { url = "https://files.pythonhosted.org/packages/7b/86/42d191a6f6672ca59d6d79b4cd9b89d4a15f59c843fbbad42f2b749f8ea9/gevent-25.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f0694daab1a041b69a53f53c2141c12994892b2503870515cabe6a5dbd2a928", size = 1774873, upload-time = "2025-05-12T11:52:29.015Z" }, + { url = "https://files.pythonhosted.org/packages/f5/9f/42dd255849c9ca2e814f5cbe180980594007ba19044a132cf674069e38bf/gevent-25.5.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2797885e9aeffdc98e1846723e5aa212e7ce53007dbef40d6fd2add264235c41", size = 1857911, upload-time = "2025-05-12T11:54:19.523Z" }, + { url = "https://files.pythonhosted.org/packages/3e/fc/8e799a733be48f6114bfc531b94e28812741664d8af89872dd90e117f8a4/gevent-25.5.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cde6aaac36b54332e10ea2a5bc0de6a8aba6c205c92603fe4396e3777c88e05d", size = 1812751, upload-time = "2025-05-12T12:00:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/52/4f/a3f3acd961887da10cb0b49c3d915201973d59ce6bf49e2922eaf2058d5f/gevent-25.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24484f80f14befb8822bf29554cfb3a26a26cb69cd1e5a8be9e23b4bd7a96e25", size = 2087115, upload-time = "2025-05-12T11:33:01.128Z" }, + { url = "https://files.pythonhosted.org/packages/b6/27/bb38e005106a53787c13ad1f9f73ed990e403e462108acae6320ab11d442/gevent-25.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc7446895fa184890d8ca5ea61e502691114f9db55c9b76adc33f3086c4368", size = 1793549, upload-time = "2025-05-12T11:59:57.854Z" }, + { url = "https://files.pythonhosted.org/packages/ee/56/da817bc69e1f0ae8438f12f2cd150656b09a8c3576c6d12f992dc9ca64ef/gevent-25.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5b6106e2414b1797133786258fa1962a5e836480e4d5e861577f9fc63b673a5a", size = 2145899, upload-time = "2025-05-12T11:40:53.275Z" }, + { url = "https://files.pythonhosted.org/packages/b8/42/989403abbdbb1346a1507083c02018bee3fedaef3f9648940c767d8c0958/gevent-25.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:bc899212d90f311784c58938a9c09c59802fb6dc287a35fabdc36d180f57f575", size = 1635771, upload-time = "2025-05-12T12:26:47.644Z" }, + { url = "https://files.pythonhosted.org/packages/58/c5/cf71423666a0b83db3d7e3f85788bc47d573fca5fe62b798fe2c4273de7c/gevent-25.5.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d87c0a1bd809d8f70f96b9b229779ec6647339830b8888a192beed33ac8d129f", size = 2909333, upload-time = "2025-05-12T11:11:34.883Z" }, + { url = "https://files.pythonhosted.org/packages/26/7e/d2f174ee8bec6eb85d961ca203bc599d059c857b8412e367b8fa206603a5/gevent-25.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b87a4b66edb3808d4d07bbdb0deed5a710cf3d3c531e082759afd283758bb649", size = 1788420, upload-time = "2025-05-12T11:52:30.306Z" }, + { url = "https://files.pythonhosted.org/packages/fe/f3/3aba8c147b9108e62ba348c726fe38ae69735a233db425565227336e8ce6/gevent-25.5.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f076779050029a82feb0cb1462021d3404d22f80fa76a181b1a7889cd4d6b519", size = 1868854, upload-time = "2025-05-12T11:54:21.564Z" }, + { url = "https://files.pythonhosted.org/packages/c6/b1/11a5453f8fcebe90a456471fad48bd154c6a62fcb96e3475a5e408d05fc8/gevent-25.5.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb673eb291c19370f69295f7a881a536451408481e2e3deec3f41dedb7c281ec", size = 1833946, upload-time = "2025-05-12T12:00:05.514Z" }, + { url = "https://files.pythonhosted.org/packages/70/1c/37d4a62303f86e6af67660a8df38c1171b7290df61b358e618c6fea79567/gevent-25.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1325ed44225c8309c0dd188bdbbbee79e1df8c11ceccac226b861c7d52e4837", size = 2070583, upload-time = "2025-05-12T11:33:02.803Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/3b14929ff28263aba1d268ea97bcf104be1a86ba6f6bb4633838e7a1905e/gevent-25.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fcd5bcad3102bde686d0adcc341fade6245186050ce14386d547ccab4bd54310", size = 1808341, upload-time = "2025-05-12T11:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/2f/fc/674ec819fb8a96e482e4d21f8baa43d34602dba09dfce7bbdc8700899d1b/gevent-25.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1a93062609e8fa67ec97cd5fb9206886774b2a09b24887f40148c9c37e6fb71c", size = 2137974, upload-time = "2025-05-12T11:40:54.78Z" }, + { url = "https://files.pythonhosted.org/packages/05/9a/048b7f5e28c54e4595ad4a8ad3c338fa89560e558db2bbe8273f44f030de/gevent-25.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:2534c23dc32bed62b659ed4fd9e198906179e68b26c9276a897e04163bdde806", size = 1638344, upload-time = "2025-05-12T12:08:31.776Z" }, + { url = "https://files.pythonhosted.org/packages/10/25/2162b38d7b48e08865db6772d632bd1648136ce2bb50e340565e45607cad/gevent-25.5.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:a022a9de9275ce0b390b7315595454258c525dc8287a03f1a6cacc5878ab7cbc", size = 2928044, upload-time = "2025-05-12T11:11:36.33Z" }, + { url = "https://files.pythonhosted.org/packages/1b/e0/dbd597a964ed00176da122ea759bf2a6c1504f1e9f08e185379f92dc355f/gevent-25.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fae8533f9d0ef3348a1f503edcfb531ef7a0236b57da1e24339aceb0ce52922", size = 1788751, upload-time = "2025-05-12T11:52:32.643Z" }, + { url = "https://files.pythonhosted.org/packages/f1/74/960cc4cf4c9c90eafbe0efc238cdf588862e8e278d0b8c0d15a0da4ed480/gevent-25.5.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c7b32d9c3b5294b39ea9060e20c582e49e1ec81edbfeae6cf05f8ad0829cb13d", size = 1869766, upload-time = "2025-05-12T11:54:23.903Z" }, + { url = "https://files.pythonhosted.org/packages/56/78/fa84b1c7db79b156929685db09a7c18c3127361dca18a09e998e98118506/gevent-25.5.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b95815fe44f318ebbfd733b6428b4cb18cc5e68f1c40e8501dd69cc1f42a83d", size = 1835358, upload-time = "2025-05-12T12:00:06.794Z" }, + { url = "https://files.pythonhosted.org/packages/00/5c/bfefe3822bbca5b83bfad256c82251b3f5be13d52d14e17a786847b9b625/gevent-25.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d316529b70d325b183b2f3f5cde958911ff7be12eb2b532b5c301f915dbbf1e", size = 2073071, upload-time = "2025-05-12T11:33:04.2Z" }, + { url = "https://files.pythonhosted.org/packages/20/e4/08a77a3839a37db96393dea952e992d5846a881b887986dde62ead6b48a1/gevent-25.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f6ba33c13db91ffdbb489a4f3d177a261ea1843923e1d68a5636c53fe98fa5ce", size = 1809805, upload-time = "2025-05-12T12:00:00.537Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ac/28848348f790c1283df74b0fc0a554271d0606676470f848eccf84eae42a/gevent-25.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37ee34b77c7553777c0b8379915f75934c3f9c8cd32f7cd098ea43c9323c2276", size = 2138305, upload-time = "2025-05-12T11:40:56.566Z" }, + { url = "https://files.pythonhosted.org/packages/52/9e/0e9e40facd2d714bfb00f71fc6dacaacc82c24c1c2e097bf6461e00dec9f/gevent-25.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:9fa6aa0da224ed807d3b76cdb4ee8b54d4d4d5e018aed2478098e685baae7896", size = 1637444, upload-time = "2025-05-12T12:17:45.995Z" }, + { url = "https://files.pythonhosted.org/packages/60/16/b71171e97ec7b4ded8669542f4369d88d5a289e2704efbbde51e858e062a/gevent-25.5.1-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:0bacf89a65489d26c7087669af89938d5bfd9f7afb12a07b57855b9fad6ccbd0", size = 2937113, upload-time = "2025-05-12T11:12:03.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/54/e5908beb092c2745aa8390f15b9559cc3ebd77bf1ba71c81c606f7b1fb92/gevent-25.5.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30169ef9cc0a57930bfd8fe14d86bc9d39fb96d278e3891e85cbe7b46058a97", size = 2147450, upload-time = "2025-05-12T11:33:05.883Z" }, + { url = "https://files.pythonhosted.org/packages/ee/39/206c9da2395a7df11c13e2989f7c7c65a7799babdb8b4b055cccae4d5c14/gevent-25.5.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e72ad5f8d9c92df017fb91a1f6a438cfb63b0eff4b40904ff81b40cb8150078c", size = 2210122, upload-time = "2025-05-12T11:40:58.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/18/d10ca3841b686143c1973cac816651a72ff77ad9e79a5300cbbbe310fced/gevent-25.5.1-cp39-cp39-win32.whl", hash = "sha256:e5f358e81e27b1a7f2fb2f5219794e13ab5f59ce05571aa3877cfac63adb97db", size = 1548447, upload-time = "2025-05-12T12:48:21.565Z" }, + { url = "https://files.pythonhosted.org/packages/ac/9d/48c01ff8324ce4bfaba0760c0f1db6f4e2c976838655f6b80333cfd47999/gevent-25.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:b83aff2441c7d4ee93e519989713b7c2607d4510abe990cd1d04f641bc6c03af", size = 1659832, upload-time = "2025-05-12T12:45:00.794Z" }, + { url = "https://files.pythonhosted.org/packages/11/81/834da3c1ea5e71e4dc1a78a034a15f2813d9760d135464aae5d1f058a8c6/gevent-25.5.1-pp310-pypy310_pp73-macosx_11_0_universal2.whl", hash = "sha256:60ad4ca9ca2c4cc8201b607c229cd17af749831e371d006d8a91303bb5568eb1", size = 1291540, upload-time = "2025-05-12T11:11:55.456Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752, upload-time = "2025-06-05T16:16:09.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/db/b4c12cff13ebac2786f4f217f06588bccd8b53d260453404ef22b121fc3a/greenlet-3.2.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:1afd685acd5597349ee6d7a88a8bec83ce13c106ac78c196ee9dde7c04fe87be", size = 268977, upload-time = "2025-06-05T16:10:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/52/61/75b4abd8147f13f70986df2801bf93735c1bd87ea780d70e3b3ecda8c165/greenlet-3.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:761917cac215c61e9dc7324b2606107b3b292a8349bdebb31503ab4de3f559ac", size = 627351, upload-time = "2025-06-05T16:38:50.685Z" }, + { url = "https://files.pythonhosted.org/packages/35/aa/6894ae299d059d26254779a5088632874b80ee8cf89a88bca00b0709d22f/greenlet-3.2.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a433dbc54e4a37e4fff90ef34f25a8c00aed99b06856f0119dcf09fbafa16392", size = 638599, upload-time = "2025-06-05T16:41:34.057Z" }, + { url = "https://files.pythonhosted.org/packages/30/64/e01a8261d13c47f3c082519a5e9dbf9e143cc0498ed20c911d04e54d526c/greenlet-3.2.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:72e77ed69312bab0434d7292316d5afd6896192ac4327d44f3d613ecb85b037c", size = 634482, upload-time = "2025-06-05T16:48:16.26Z" }, + { url = "https://files.pythonhosted.org/packages/47/48/ff9ca8ba9772d083a4f5221f7b4f0ebe8978131a9ae0909cf202f94cd879/greenlet-3.2.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:68671180e3849b963649254a882cd544a3c75bfcd2c527346ad8bb53494444db", size = 633284, upload-time = "2025-06-05T16:13:01.599Z" }, + { url = "https://files.pythonhosted.org/packages/e9/45/626e974948713bc15775b696adb3eb0bd708bec267d6d2d5c47bb47a6119/greenlet-3.2.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49c8cfb18fb419b3d08e011228ef8a25882397f3a859b9fe1436946140b6756b", size = 582206, upload-time = "2025-06-05T16:12:48.51Z" }, + { url = "https://files.pythonhosted.org/packages/b1/8e/8b6f42c67d5df7db35b8c55c9a850ea045219741bb14416255616808c690/greenlet-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:efc6dc8a792243c31f2f5674b670b3a95d46fa1c6a912b8e310d6f542e7b0712", size = 1111412, upload-time = "2025-06-05T16:36:45.479Z" }, + { url = "https://files.pythonhosted.org/packages/05/46/ab58828217349500a7ebb81159d52ca357da747ff1797c29c6023d79d798/greenlet-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:731e154aba8e757aedd0781d4b240f1225b075b4409f1bb83b05ff410582cf00", size = 1135054, upload-time = "2025-06-05T16:12:36.478Z" }, + { url = "https://files.pythonhosted.org/packages/68/7f/d1b537be5080721c0f0089a8447d4ef72839039cdb743bdd8ffd23046e9a/greenlet-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:96c20252c2f792defe9a115d3287e14811036d51e78b3aaddbee23b69b216302", size = 296573, upload-time = "2025-06-05T16:34:26.521Z" }, + { url = "https://files.pythonhosted.org/packages/fc/2e/d4fcb2978f826358b673f779f78fa8a32ee37df11920dc2bb5589cbeecef/greenlet-3.2.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:784ae58bba89fa1fa5733d170d42486580cab9decda3484779f4759345b29822", size = 270219, upload-time = "2025-06-05T16:10:10.414Z" }, + { url = "https://files.pythonhosted.org/packages/16/24/929f853e0202130e4fe163bc1d05a671ce8dcd604f790e14896adac43a52/greenlet-3.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0921ac4ea42a5315d3446120ad48f90c3a6b9bb93dd9b3cf4e4d84a66e42de83", size = 630383, upload-time = "2025-06-05T16:38:51.785Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b2/0320715eb61ae70c25ceca2f1d5ae620477d246692d9cc284c13242ec31c/greenlet-3.2.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d2971d93bb99e05f8c2c0c2f4aa9484a18d98c4c3bd3c62b65b7e6ae33dfcfaf", size = 642422, upload-time = "2025-06-05T16:41:35.259Z" }, + { url = "https://files.pythonhosted.org/packages/bd/49/445fd1a210f4747fedf77615d941444349c6a3a4a1135bba9701337cd966/greenlet-3.2.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c667c0bf9d406b77a15c924ef3285e1e05250948001220368e039b6aa5b5034b", size = 638375, upload-time = "2025-06-05T16:48:18.235Z" }, + { url = "https://files.pythonhosted.org/packages/7e/c8/ca19760cf6eae75fa8dc32b487e963d863b3ee04a7637da77b616703bc37/greenlet-3.2.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:592c12fb1165be74592f5de0d70f82bc5ba552ac44800d632214b76089945147", size = 637627, upload-time = "2025-06-05T16:13:02.858Z" }, + { url = "https://files.pythonhosted.org/packages/65/89/77acf9e3da38e9bcfca881e43b02ed467c1dedc387021fc4d9bd9928afb8/greenlet-3.2.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29e184536ba333003540790ba29829ac14bb645514fbd7e32af331e8202a62a5", size = 585502, upload-time = "2025-06-05T16:12:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/97/c6/ae244d7c95b23b7130136e07a9cc5aadd60d59b5951180dc7dc7e8edaba7/greenlet-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93c0bb79844a367782ec4f429d07589417052e621aa39a5ac1fb99c5aa308edc", size = 1114498, upload-time = "2025-06-05T16:36:46.598Z" }, + { url = "https://files.pythonhosted.org/packages/89/5f/b16dec0cbfd3070658e0d744487919740c6d45eb90946f6787689a7efbce/greenlet-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:751261fc5ad7b6705f5f76726567375bb2104a059454e0226e1eef6c756748ba", size = 1139977, upload-time = "2025-06-05T16:12:38.262Z" }, + { url = "https://files.pythonhosted.org/packages/66/77/d48fb441b5a71125bcac042fc5b1494c806ccb9a1432ecaa421e72157f77/greenlet-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:83a8761c75312361aa2b5b903b79da97f13f556164a7dd2d5448655425bd4c34", size = 297017, upload-time = "2025-06-05T16:25:05.225Z" }, + { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992, upload-time = "2025-06-05T16:11:23.467Z" }, + { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820, upload-time = "2025-06-05T16:38:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046, upload-time = "2025-06-05T16:41:36.343Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701, upload-time = "2025-06-05T16:48:19.604Z" }, + { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747, upload-time = "2025-06-05T16:13:04.628Z" }, + { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461, upload-time = "2025-06-05T16:12:50.792Z" }, + { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190, upload-time = "2025-06-05T16:36:48.59Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055, upload-time = "2025-06-05T16:12:40.457Z" }, + { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817, upload-time = "2025-06-05T16:29:49.244Z" }, + { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732, upload-time = "2025-06-05T16:10:08.26Z" }, + { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033, upload-time = "2025-06-05T16:38:53.983Z" }, + { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999, upload-time = "2025-06-05T16:41:37.89Z" }, + { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368, upload-time = "2025-06-05T16:48:21.467Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037, upload-time = "2025-06-05T16:13:06.402Z" }, + { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402, upload-time = "2025-06-05T16:12:51.91Z" }, + { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577, upload-time = "2025-06-05T16:36:49.787Z" }, + { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121, upload-time = "2025-06-05T16:12:42.527Z" }, + { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603, upload-time = "2025-06-05T16:20:12.651Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479, upload-time = "2025-06-05T16:10:47.525Z" }, + { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952, upload-time = "2025-06-05T16:38:55.125Z" }, + { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917, upload-time = "2025-06-05T16:41:38.959Z" }, + { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443, upload-time = "2025-06-05T16:48:23.113Z" }, + { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995, upload-time = "2025-06-05T16:13:07.972Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320, upload-time = "2025-06-05T16:12:53.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236, upload-time = "2025-06-05T16:15:20.111Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d9/a3114df5fba2bf9823e0acc01e9e2abdcd8ea4c5487cf1c3dcd4cc0b48cf/greenlet-3.2.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:42efc522c0bd75ffa11a71e09cd8a399d83fafe36db250a87cf1dacfaa15dc64", size = 267769, upload-time = "2025-06-05T16:10:44.802Z" }, + { url = "https://files.pythonhosted.org/packages/bc/da/47dfc50f6e5673116e66a737dc58d1eca651db9a9aa8797c1d27e940e211/greenlet-3.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d760f9bdfe79bff803bad32b4d8ffb2c1d2ce906313fc10a83976ffb73d64ca7", size = 625472, upload-time = "2025-06-05T16:38:56.882Z" }, + { url = "https://files.pythonhosted.org/packages/f5/74/f6ef9f85d981b2fcd665bbee3e69e3c0a10fb962eb4c6a5889ac3b6debfa/greenlet-3.2.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8324319cbd7b35b97990090808fdc99c27fe5338f87db50514959f8059999805", size = 637253, upload-time = "2025-06-05T16:41:40.542Z" }, + { url = "https://files.pythonhosted.org/packages/66/69/4919bb1c9e43bfc16dc886e7a37fe1bc04bfa4101aba177936a10f313cad/greenlet-3.2.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:8c37ef5b3787567d322331d5250e44e42b58c8c713859b8a04c6065f27efbf72", size = 632611, upload-time = "2025-06-05T16:48:24.976Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8d/97d988d019f40b6b360b0c71c99e5b4c877a3d92666fe48b081d0e1ea1cd/greenlet-3.2.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ce539fb52fb774d0802175d37fcff5c723e2c7d249c65916257f0a940cee8904", size = 631843, upload-time = "2025-06-05T16:13:09.476Z" }, + { url = "https://files.pythonhosted.org/packages/59/24/d5e1504ec00768755d4ccc2168b76d9f4524e96694a14ad45bd87796e9bb/greenlet-3.2.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:003c930e0e074db83559edc8705f3a2d066d4aa8c2f198aff1e454946efd0f26", size = 580781, upload-time = "2025-06-05T16:12:55.029Z" }, + { url = "https://files.pythonhosted.org/packages/9c/df/d009bcca566dbfd2283b306b4e424f4c0e59bf984868f8b789802fe9e607/greenlet-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7e70ea4384b81ef9e84192e8a77fb87573138aa5d4feee541d8014e452b434da", size = 1109903, upload-time = "2025-06-05T16:36:51.491Z" }, + { url = "https://files.pythonhosted.org/packages/33/54/5036097197a78388aa6901a5b90b562f3a154a9fbee89c301a26f56f3942/greenlet-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:22eb5ba839c4b2156f18f76768233fe44b23a31decd9cc0d4cc8141c211fd1b4", size = 1133975, upload-time = "2025-06-05T16:12:43.866Z" }, + { url = "https://files.pythonhosted.org/packages/e2/15/b001456a430805fdd8b600a788d19a790664eee8863739523395f68df752/greenlet-3.2.3-cp39-cp39-win32.whl", hash = "sha256:4532f0d25df67f896d137431b13f4cdce89f7e3d4a96387a41290910df4d3a57", size = 279320, upload-time = "2025-06-05T16:43:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4c/bf2100cbc1bd07f39bee3b09e7eef39beffe29f5453dc2477a2693737913/greenlet-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:aaa7aae1e7f75eaa3ae400ad98f8644bb81e1dc6ba47ce8a93d3f17274e08322", size = 296444, upload-time = "2025-06-05T16:39:22.664Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "imagesize" +version = "1.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a7/84/62473fb57d61e31fef6e36d64a179c8781605429fd927b5dd608c997be31/imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a", size = 1280026, upload-time = "2022-07-01T12:21:05.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "python_full_version != '3.14.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" }, + { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" }, + { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" }, + { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" }, + { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" }, + { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" }, + { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" }, + { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" }, + { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" }, + { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" }, + { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" }, + { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" }, + { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" }, + { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" }, + { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" }, + { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" }, + { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" }, + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, + { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" }, + { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" }, + { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" }, + { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" }, + { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" }, + { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" }, + { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" }, + { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" }, + { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" }, + { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" }, + { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" }, + { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" }, + { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" }, + { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" }, + { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" }, + { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" }, + { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" }, + { url = "https://files.pythonhosted.org/packages/a7/ea/9b1530c3fdeeca613faeb0fb5cbcf2389d816072fab72a71b45749ef6062/MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a", size = 14344, upload-time = "2024-10-18T15:21:43.721Z" }, + { url = "https://files.pythonhosted.org/packages/4b/c2/fbdbfe48848e7112ab05e627e718e854d20192b674952d9042ebd8c9e5de/MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff", size = 12389, upload-time = "2024-10-18T15:21:44.666Z" }, + { url = "https://files.pythonhosted.org/packages/f0/25/7a7c6e4dbd4f867d95d94ca15449e91e52856f6ed1905d58ef1de5e211d0/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13", size = 21607, upload-time = "2024-10-18T15:21:45.452Z" }, + { url = "https://files.pythonhosted.org/packages/53/8f/f339c98a178f3c1e545622206b40986a4c3307fe39f70ccd3d9df9a9e425/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144", size = 20728, upload-time = "2024-10-18T15:21:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/1a/03/8496a1a78308456dbd50b23a385c69b41f2e9661c67ea1329849a598a8f9/MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29", size = 20826, upload-time = "2024-10-18T15:21:47.134Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/0a490a4bd363048c3022f2f475c8c05582179bb179defcee4766fb3dcc18/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0", size = 21843, upload-time = "2024-10-18T15:21:48.334Z" }, + { url = "https://files.pythonhosted.org/packages/19/a3/34187a78613920dfd3cdf68ef6ce5e99c4f3417f035694074beb8848cd77/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0", size = 21219, upload-time = "2024-10-18T15:21:49.587Z" }, + { url = "https://files.pythonhosted.org/packages/17/d8/5811082f85bb88410ad7e452263af048d685669bbbfb7b595e8689152498/MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178", size = 20946, upload-time = "2024-10-18T15:21:50.441Z" }, + { url = "https://files.pythonhosted.org/packages/7c/31/bd635fb5989440d9365c5e3c47556cfea121c7803f5034ac843e8f37c2f2/MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f", size = 15063, upload-time = "2024-10-18T15:21:51.385Z" }, + { url = "https://files.pythonhosted.org/packages/b3/73/085399401383ce949f727afec55ec3abd76648d04b9f22e1c0e99cb4bec3/MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a", size = 15506, upload-time = "2024-10-18T15:21:52.974Z" }, +] + +[[package]] +name = "mockupdb" +version = "1.9.0.dev1" +source = { git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master#317c4e049965f9d99423698a81e52d0ab37b7599" } +dependencies = [ + { name = "pymongo" }, +] + +[[package]] +name = "mypy" +version = "1.18.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "pathspec" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/77/8f0d0001ffad290cef2f7f216f96c814866248a0b92a722365ed54648e7e/mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b", size = 3448846, upload-time = "2025-09-19T00:11:10.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/6f/657961a0743cff32e6c0611b63ff1c1970a0b482ace35b069203bf705187/mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c", size = 12807973, upload-time = "2025-09-19T00:10:35.282Z" }, + { url = "https://files.pythonhosted.org/packages/10/e9/420822d4f661f13ca8900f5fa239b40ee3be8b62b32f3357df9a3045a08b/mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e", size = 11896527, upload-time = "2025-09-19T00:10:55.791Z" }, + { url = "https://files.pythonhosted.org/packages/aa/73/a05b2bbaa7005f4642fcfe40fb73f2b4fb6bb44229bd585b5878e9a87ef8/mypy-1.18.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448acd386266989ef11662ce3c8011fd2a7b632e0ec7d61a98edd8e27472225b", size = 12507004, upload-time = "2025-09-19T00:11:05.411Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/f6e4b9f0d031c11ccbd6f17da26564f3a0f3c4155af344006434b0a05a9d/mypy-1.18.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f9e171c465ad3901dc652643ee4bffa8e9fef4d7d0eece23b428908c77a76a66", size = 13245947, upload-time = "2025-09-19T00:10:46.923Z" }, + { url = "https://files.pythonhosted.org/packages/d7/97/19727e7499bfa1ae0773d06afd30ac66a58ed7437d940c70548634b24185/mypy-1.18.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:592ec214750bc00741af1f80cbf96b5013d81486b7bb24cb052382c19e40b428", size = 13499217, upload-time = "2025-09-19T00:09:39.472Z" }, + { url = "https://files.pythonhosted.org/packages/9f/4f/90dc8c15c1441bf31cf0f9918bb077e452618708199e530f4cbd5cede6ff/mypy-1.18.2-cp310-cp310-win_amd64.whl", hash = "sha256:7fb95f97199ea11769ebe3638c29b550b5221e997c63b14ef93d2e971606ebed", size = 9766753, upload-time = "2025-09-19T00:10:49.161Z" }, + { url = "https://files.pythonhosted.org/packages/88/87/cafd3ae563f88f94eec33f35ff722d043e09832ea8530ef149ec1efbaf08/mypy-1.18.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:807d9315ab9d464125aa9fcf6d84fde6e1dc67da0b6f80e7405506b8ac72bc7f", size = 12731198, upload-time = "2025-09-19T00:09:44.857Z" }, + { url = "https://files.pythonhosted.org/packages/0f/e0/1e96c3d4266a06d4b0197ace5356d67d937d8358e2ee3ffac71faa843724/mypy-1.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:776bb00de1778caf4db739c6e83919c1d85a448f71979b6a0edd774ea8399341", size = 11817879, upload-time = "2025-09-19T00:09:47.131Z" }, + { url = "https://files.pythonhosted.org/packages/72/ef/0c9ba89eb03453e76bdac5a78b08260a848c7bfc5d6603634774d9cd9525/mypy-1.18.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1379451880512ffce14505493bd9fe469e0697543717298242574882cf8cdb8d", size = 12427292, upload-time = "2025-09-19T00:10:22.472Z" }, + { url = "https://files.pythonhosted.org/packages/1a/52/ec4a061dd599eb8179d5411d99775bec2a20542505988f40fc2fee781068/mypy-1.18.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1331eb7fd110d60c24999893320967594ff84c38ac6d19e0a76c5fd809a84c86", size = 13163750, upload-time = "2025-09-19T00:09:51.472Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5f/2cf2ceb3b36372d51568f2208c021870fe7834cf3186b653ac6446511839/mypy-1.18.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ca30b50a51e7ba93b00422e486cbb124f1c56a535e20eff7b2d6ab72b3b2e37", size = 13351827, upload-time = "2025-09-19T00:09:58.311Z" }, + { url = "https://files.pythonhosted.org/packages/c8/7d/2697b930179e7277529eaaec1513f8de622818696857f689e4a5432e5e27/mypy-1.18.2-cp311-cp311-win_amd64.whl", hash = "sha256:664dc726e67fa54e14536f6e1224bcfce1d9e5ac02426d2326e2bb4e081d1ce8", size = 9757983, upload-time = "2025-09-19T00:10:09.071Z" }, + { url = "https://files.pythonhosted.org/packages/07/06/dfdd2bc60c66611dd8335f463818514733bc763e4760dee289dcc33df709/mypy-1.18.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33eca32dd124b29400c31d7cf784e795b050ace0e1f91b8dc035672725617e34", size = 12908273, upload-time = "2025-09-19T00:10:58.321Z" }, + { url = "https://files.pythonhosted.org/packages/81/14/6a9de6d13a122d5608e1a04130724caf9170333ac5a924e10f670687d3eb/mypy-1.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3c47adf30d65e89b2dcd2fa32f3aeb5e94ca970d2c15fcb25e297871c8e4764", size = 11920910, upload-time = "2025-09-19T00:10:20.043Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a9/b29de53e42f18e8cc547e38daa9dfa132ffdc64f7250e353f5c8cdd44bee/mypy-1.18.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d6c838e831a062f5f29d11c9057c6009f60cb294fea33a98422688181fe2893", size = 12465585, upload-time = "2025-09-19T00:10:33.005Z" }, + { url = "https://files.pythonhosted.org/packages/77/ae/6c3d2c7c61ff21f2bee938c917616c92ebf852f015fb55917fd6e2811db2/mypy-1.18.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01199871b6110a2ce984bde85acd481232d17413868c9807e95c1b0739a58914", size = 13348562, upload-time = "2025-09-19T00:10:11.51Z" }, + { url = "https://files.pythonhosted.org/packages/4d/31/aec68ab3b4aebdf8f36d191b0685d99faa899ab990753ca0fee60fb99511/mypy-1.18.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a2afc0fa0b0e91b4599ddfe0f91e2c26c2b5a5ab263737e998d6817874c5f7c8", size = 13533296, upload-time = "2025-09-19T00:10:06.568Z" }, + { url = "https://files.pythonhosted.org/packages/9f/83/abcb3ad9478fca3ebeb6a5358bb0b22c95ea42b43b7789c7fb1297ca44f4/mypy-1.18.2-cp312-cp312-win_amd64.whl", hash = "sha256:d8068d0afe682c7c4897c0f7ce84ea77f6de953262b12d07038f4d296d547074", size = 9828828, upload-time = "2025-09-19T00:10:28.203Z" }, + { url = "https://files.pythonhosted.org/packages/5f/04/7f462e6fbba87a72bc8097b93f6842499c428a6ff0c81dd46948d175afe8/mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc", size = 12898728, upload-time = "2025-09-19T00:10:01.33Z" }, + { url = "https://files.pythonhosted.org/packages/99/5b/61ed4efb64f1871b41fd0b82d29a64640f3516078f6c7905b68ab1ad8b13/mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e", size = 11910758, upload-time = "2025-09-19T00:10:42.607Z" }, + { url = "https://files.pythonhosted.org/packages/3c/46/d297d4b683cc89a6e4108c4250a6a6b717f5fa96e1a30a7944a6da44da35/mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986", size = 12475342, upload-time = "2025-09-19T00:11:00.371Z" }, + { url = "https://files.pythonhosted.org/packages/83/45/4798f4d00df13eae3bfdf726c9244bcb495ab5bd588c0eed93a2f2dd67f3/mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d", size = 13338709, upload-time = "2025-09-19T00:11:03.358Z" }, + { url = "https://files.pythonhosted.org/packages/d7/09/479f7358d9625172521a87a9271ddd2441e1dab16a09708f056e97007207/mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba", size = 13529806, upload-time = "2025-09-19T00:10:26.073Z" }, + { url = "https://files.pythonhosted.org/packages/71/cf/ac0f2c7e9d0ea3c75cd99dff7aec1c9df4a1376537cb90e4c882267ee7e9/mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544", size = 9833262, upload-time = "2025-09-19T00:10:40.035Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0c/7d5300883da16f0063ae53996358758b2a2df2a09c72a5061fa79a1f5006/mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce", size = 12893775, upload-time = "2025-09-19T00:10:03.814Z" }, + { url = "https://files.pythonhosted.org/packages/50/df/2cffbf25737bdb236f60c973edf62e3e7b4ee1c25b6878629e88e2cde967/mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d", size = 11936852, upload-time = "2025-09-19T00:10:51.631Z" }, + { url = "https://files.pythonhosted.org/packages/be/50/34059de13dd269227fb4a03be1faee6e2a4b04a2051c82ac0a0b5a773c9a/mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c", size = 12480242, upload-time = "2025-09-19T00:11:07.955Z" }, + { url = "https://files.pythonhosted.org/packages/5b/11/040983fad5132d85914c874a2836252bbc57832065548885b5bb5b0d4359/mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb", size = 13326683, upload-time = "2025-09-19T00:09:55.572Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ba/89b2901dd77414dd7a8c8729985832a5735053be15b744c18e4586e506ef/mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075", size = 13514749, upload-time = "2025-09-19T00:10:44.827Z" }, + { url = "https://files.pythonhosted.org/packages/25/bc/cc98767cffd6b2928ba680f3e5bc969c4152bf7c2d83f92f5a504b92b0eb/mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf", size = 9982959, upload-time = "2025-09-19T00:10:37.344Z" }, + { url = "https://files.pythonhosted.org/packages/3f/a6/490ff491d8ecddf8ab91762d4f67635040202f76a44171420bcbe38ceee5/mypy-1.18.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25a9c8fb67b00599f839cf472713f54249a62efd53a54b565eb61956a7e3296b", size = 12807230, upload-time = "2025-09-19T00:09:49.471Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2e/60076fc829645d167ece9e80db9e8375648d210dab44cc98beb5b322a826/mypy-1.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2b9c7e284ee20e7598d6f42e13ca40b4928e6957ed6813d1ab6348aa3f47133", size = 11895666, upload-time = "2025-09-19T00:10:53.678Z" }, + { url = "https://files.pythonhosted.org/packages/97/4a/1e2880a2a5dda4dc8d9ecd1a7e7606bc0b0e14813637eeda40c38624e037/mypy-1.18.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6985ed057513e344e43a26cc1cd815c7a94602fb6a3130a34798625bc2f07b6", size = 12499608, upload-time = "2025-09-19T00:09:36.204Z" }, + { url = "https://files.pythonhosted.org/packages/00/81/a117f1b73a3015b076b20246b1f341c34a578ebd9662848c6b80ad5c4138/mypy-1.18.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f27105f1525ec024b5c630c0b9f36d5c1cc4d447d61fe51ff4bd60633f47ac", size = 13244551, upload-time = "2025-09-19T00:10:17.531Z" }, + { url = "https://files.pythonhosted.org/packages/9b/61/b9f48e1714ce87c7bf0358eb93f60663740ebb08f9ea886ffc670cea7933/mypy-1.18.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:030c52d0ea8144e721e49b1f68391e39553d7451f0c3f8a7565b59e19fcb608b", size = 13491552, upload-time = "2025-09-19T00:10:13.753Z" }, + { url = "https://files.pythonhosted.org/packages/c9/66/b2c0af3b684fa80d1b27501a8bdd3d2daa467ea3992a8aa612f5ca17c2db/mypy-1.18.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa5e07ac1a60a253445797e42b8b2963c9675563a94f11291ab40718b016a7a0", size = 9765635, upload-time = "2025-09-19T00:10:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/87/e3/be76d87158ebafa0309946c4a73831974d4d6ab4f4ef40c3b53a385a66fd/mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e", size = 2352367, upload-time = "2025-09-19T00:10:15.489Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "pip" +version = "25.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pykerberos" +version = "1.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/e9/ae44ea7d7605df9e5ca1ed745a2c5672dc838a8398101051dd5f255b130d/pykerberos-1.2.4.tar.gz", hash = "sha256:9d701ebd8fc596c99d3155d5ba45813bd5908d26ef83ba0add250edb622abed4", size = 25046, upload-time = "2022-03-09T03:54:08.546Z" } + +[[package]] +name = "pymongo" +source = { editable = "." } +dependencies = [ + { name = "dnspython" }, +] + +[package.optional-dependencies] +aws = [ + { name = "pymongo-auth-aws" }, +] +docs = [ + { name = "furo" }, + { name = "readthedocs-sphinx-search" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinx-autobuild" }, + { name = "sphinx-rtd-theme" }, + { name = "sphinxcontrib-shellcheck" }, +] +encryption = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "pymongo-auth-aws" }, + { name = "pymongocrypt" }, +] +gssapi = [ + { name = "pykerberos", marker = "os_name != 'nt'" }, + { name = "winkerberos", marker = "os_name == 'nt'" }, +] +ocsp = [ + { name = "certifi", marker = "os_name == 'nt' or sys_platform == 'darwin'" }, + { name = "cryptography" }, + { name = "pyopenssl" }, + { name = "requests" }, + { name = "service-identity" }, +] +snappy = [ + { name = "python-snappy" }, +] +test = [ + { name = "importlib-metadata", marker = "python_full_version < '3.13'" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, +] +zstd = [ + { name = "zstandard" }, +] + +[package.dev-dependencies] +coverage = [ + { name = "coverage" }, + { name = "pytest-cov" }, +] +gevent = [ + { name = "gevent" }, +] +mockupdb = [ + { name = "mockupdb" }, +] +perf = [ + { name = "simplejson" }, +] +pip = [ + { name = "pip" }, +] +typing = [ + { name = "mypy" }, + { name = "pip" }, + { name = "pyright" }, + { name = "typing-extensions" }, +] + +[package.metadata] +requires-dist = [ + { name = "certifi", marker = "(os_name == 'nt' and extra == 'encryption') or (sys_platform == 'darwin' and extra == 'encryption')", specifier = ">=2023.7.22" }, + { name = "certifi", marker = "(os_name == 'nt' and extra == 'ocsp') or (sys_platform == 'darwin' and extra == 'ocsp')", specifier = ">=2023.7.22" }, + { name = "cryptography", marker = "extra == 'ocsp'", specifier = ">=2.5" }, + { name = "dnspython", specifier = ">=2.6.1,<3.0.0" }, + { name = "furo", marker = "extra == 'docs'", specifier = "==2025.9.25" }, + { name = "importlib-metadata", marker = "python_full_version < '3.13' and extra == 'test'", specifier = ">=7.0" }, + { name = "pykerberos", marker = "os_name != 'nt' and extra == 'gssapi'" }, + { name = "pymongo-auth-aws", marker = "extra == 'aws'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongo-auth-aws", marker = "extra == 'encryption'", specifier = ">=1.1.0,<2.0.0" }, + { name = "pymongocrypt", marker = "extra == 'encryption'", specifier = ">=1.13.0,<2.0.0" }, + { name = "pyopenssl", marker = "extra == 'ocsp'", specifier = ">=17.2.0" }, + { name = "pytest", marker = "extra == 'test'", specifier = ">=8.2" }, + { name = "pytest-asyncio", marker = "extra == 'test'", specifier = ">=0.24.0" }, + { name = "python-snappy", marker = "extra == 'snappy'" }, + { name = "readthedocs-sphinx-search", marker = "extra == 'docs'", specifier = "~=0.3" }, + { name = "requests", marker = "extra == 'ocsp'", specifier = "<3.0.0" }, + { name = "service-identity", marker = "extra == 'ocsp'", specifier = ">=18.1.0" }, + { name = "sphinx", marker = "extra == 'docs'", specifier = ">=5.3,<9" }, + { name = "sphinx-autobuild", marker = "extra == 'docs'", specifier = ">=2020.9.1" }, + { name = "sphinx-rtd-theme", marker = "extra == 'docs'", specifier = ">=2,<4" }, + { name = "sphinxcontrib-shellcheck", marker = "extra == 'docs'", specifier = ">=1,<2" }, + { name = "winkerberos", marker = "os_name == 'nt' and extra == 'gssapi'", specifier = ">=0.5.0" }, + { name = "zstandard", marker = "extra == 'zstd'" }, +] +provides-extras = ["aws", "docs", "encryption", "gssapi", "ocsp", "snappy", "test", "zstd"] + +[package.metadata.requires-dev] +coverage = [ + { name = "coverage", specifier = ">=5,<=7.10.6" }, + { name = "pytest-cov" }, +] +dev = [] +gevent = [{ name = "gevent", specifier = ">=20.6.0" }] +mockupdb = [{ name = "mockupdb", git = "https://github.com/mongodb-labs/mongo-mockup-db?rev=master" }] +perf = [{ name = "simplejson", specifier = ">=3.17.0" }] +pip = [{ name = "pip" }] +typing = [ + { name = "mypy", specifier = "==1.18.2" }, + { name = "pip" }, + { name = "pyright", specifier = "==1.1.406" }, + { name = "typing-extensions" }, +] + +[[package]] +name = "pymongo-auth-aws" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "boto3" }, + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c7/37/ca8d840f322f0047b71afcec7a489b1ea1f59a5f6d29f91ad8004024736f/pymongo_auth_aws-1.3.0.tar.gz", hash = "sha256:d0fa893958dc525ca29f601c34f2ca73c860f66bc6511ec0a7da6eb7ea44e94f", size = 18559, upload-time = "2024-09-11T20:29:17.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/12/a997fc108416f31fac55748e5406c1c8c4e976a4073f07b5553825641611/pymongo_auth_aws-1.3.0-py3-none-any.whl", hash = "sha256:367f6d853da428a02e9e450422756133715d40f8141f47ae5d98f139a88c0ce5", size = 15470, upload-time = "2024-09-11T20:29:16.637Z" }, +] + +[[package]] +name = "pymongocrypt" +version = "1.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, + { name = "cryptography" }, + { name = "httpx" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fb/8e/dd9ed710e8fd4eec127dac1db3b3e9156ffcf340a0463a82087a12ae924e/pymongocrypt-1.16.0.tar.gz", hash = "sha256:0db0812055d00e6f5562a8d66711c4cba4b75014c363306c9b298a9fd68fccdd", size = 65354, upload-time = "2025-09-09T18:54:25.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/8b/dda0f19ce16f7b257e4aa2a8831a1a1307c1ea124a00f571cda83a04adcb/pymongocrypt-1.16.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:fbd85534880ea8525956b96e583a7021c721abbf3b51a6dbe48a57d7eba8e74a", size = 4721169, upload-time = "2025-09-09T18:54:18.642Z" }, + { url = "https://files.pythonhosted.org/packages/99/48/512a5b597d71407f9b06a14cd8e5ac376e06b780d4d54a4e69726bd48703/pymongocrypt-1.16.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:85df0a78480e91bdd3a5a6da3e4cdc7d9700de8a871aa8168588981c041f1914", size = 4038242, upload-time = "2025-09-09T18:54:20.496Z" }, + { url = "https://files.pythonhosted.org/packages/3f/67/3bdeda347191d6c1ee257eb3da8c85f1278d86dfb493cc9bc26352a41d0a/pymongocrypt-1.16.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d2ebeb1b5e4f4554bf44f726e8009c59c4d7d0b412beebfece875991714676", size = 3775742, upload-time = "2025-09-09T18:54:22.254Z" }, + { url = "https://files.pythonhosted.org/packages/dc/81/70f6947afbd1ac7be54482b44cb1b99e8e9b9cac41985e6250c4fc279e58/pymongocrypt-1.16.0-py3-none-win_amd64.whl", hash = "sha256:c20afcd89ec5fc53305e924c05c4a0321ddc73f1e4e7c8240ee2fd0123e23609", size = 1607917, upload-time = "2025-09-09T18:54:24.182Z" }, +] + +[[package]] +name = "pyopenssl" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/80/be/97b83a464498a79103036bc74d1038df4a7ef0e402cfaf4d5e113fb14759/pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329", size = 184073, upload-time = "2025-09-17T00:32:21.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/81/ef2b1dfd1862567d573a4fdbc9f969067621764fbb74338496840a1d2977/pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6", size = 57268, upload-time = "2025-09-17T00:32:19.474Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.406" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/16/6b4fbdd1fef59a0292cbb99f790b44983e390321eccbc5921b4d161da5d1/pyright-1.1.406.tar.gz", hash = "sha256:c4872bc58c9643dac09e8a2e74d472c62036910b3bd37a32813989ef7576ea2c", size = 4113151, upload-time = "2025-10-02T01:04:45.488Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/a2/e309afbb459f50507103793aaef85ca4348b66814c86bc73908bdeb66d12/pyright-1.1.406-py3-none-any.whl", hash = "sha256:1d81fb43c2407bf566e97e57abb01c811973fdb21b2df8df59f870f688bdca71", size = 5980982, upload-time = "2025-10-02T01:04:43.137Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" }, + { name = "pytest" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, +] + +[[package]] +name = "pytest-cov" +version = "6.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/99/668cade231f434aaa59bbfbf49469068d2ddd945000621d3d165d2e7dd7b/pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2", size = 69432, upload-time = "2025-06-12T10:47:47.684Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-snappy" +version = "0.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cramjam" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/66/9185fbb6605ba92716d9f77fbb13c97eb671cd13c3ad56bd154016fbf08b/python_snappy-0.7.3.tar.gz", hash = "sha256:40216c1badfb2d38ac781ecb162a1d0ec40f8ee9747e610bcfefdfa79486cee3", size = 9337, upload-time = "2024-08-29T13:16:05.705Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155, upload-time = "2024-08-29T13:16:04.773Z" }, +] + +[[package]] +name = "readthedocs-sphinx-search" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/96/0c51439e3dbc634cf5328ffb173ff759b7fc9abf3276e78bf71d9fc0aa51/readthedocs-sphinx-search-0.3.2.tar.gz", hash = "sha256:277773bfa28566a86694c08e568d5a648cd80f22826545555a764d6d20c365fb", size = 21949, upload-time = "2024-01-15T16:46:22.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/3c/41bc9d7d4d936a73e380423f23996bee1691e17598d8a03c062be6aac640/readthedocs_sphinx_search-0.3.2-py3-none-any.whl", hash = "sha256:58716fd21f01581e6e67bf3bc02e79c77e10dc58b5f8e4c7cc1977e013eda173", size = 21379, upload-time = "2024-01-15T16:46:20.552Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3", version = "1.26.20", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "urllib3", version = "2.5.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "roman-numerals-py" +version = "3.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/30/76/48fd56d17c5bdbdf65609abbc67288728a98ed4c02919428d4f52d23b24b/roman_numerals_py-3.1.0.tar.gz", hash = "sha256:be4bf804f083a4ce001b5eb7e3c0862479d10f94c936f6c4e5f250aa5ff5bd2d", size = 9017, upload-time = "2025-02-22T07:34:54.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/53/97/d2cbbaa10c9b826af0e10fdf836e1bf344d9f0abb873ebc34d1f49642d3f/roman_numerals_py-3.1.0-py3-none-any.whl", hash = "sha256:9da2ad2fb670bcf24e81070ceb3be72f6c11c440d73bd579fbeca1e9f330954c", size = 7742, upload-time = "2025-02-22T07:34:52.422Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/d52bf1e65044b4e5e27d4e63e8d1579dbdec54fce685908ae09bc3720030/s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf", size = 150589, upload-time = "2025-07-18T19:22:42.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/4f/d073e09df851cfa251ef7840007d04db3293a0482ce607d2b993926089be/s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724", size = 85308, upload-time = "2025-07-18T19:22:40.947Z" }, +] + +[[package]] +name = "service-identity" +version = "24.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "cryptography" }, + { name = "pyasn1" }, + { name = "pyasn1-modules" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/a5/dfc752b979067947261dbbf2543470c58efe735c3c1301dd870ef27830ee/service_identity-24.2.0.tar.gz", hash = "sha256:b8683ba13f0d39c6cd5d625d2c5f65421d6d707b013b375c355751557cbe8e09", size = 39245, upload-time = "2024-10-26T07:21:57.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/2c/ca6dd598b384bc1ce581e24aaae0f2bed4ccac57749d5c3befbb5e742081/service_identity-24.2.0-py3-none-any.whl", hash = "sha256:6b047fbd8a84fd0bb0d55ebce4031e400562b9196e1e0d3e0fe2b8a59f6d4a85", size = 11364, upload-time = "2024-10-26T07:21:56.302Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "simplejson" +version = "3.20.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/92/51b417685abd96b31308b61b9acce7ec50d8e1de8fbc39a7fd4962c60689/simplejson-3.20.1.tar.gz", hash = "sha256:e64139b4ec4f1f24c142ff7dcafe55a22b811a74d86d66560c8815687143037d", size = 85591, upload-time = "2025-02-15T05:18:53.15Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/c4/627214fb418cd4a17fb0230ff0b6c3bb4a85cbb48dd69c85dcc3b85df828/simplejson-3.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e580aa65d5f6c3bf41b9b4afe74be5d5ddba9576701c107c772d936ea2b5043a", size = 93790, upload-time = "2025-02-15T05:15:32.954Z" }, + { url = "https://files.pythonhosted.org/packages/15/ca/56a6a2a33cbcf330c4d71af3f827c47e4e0ba791e78f2642f3d1ab02ff31/simplejson-3.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a586ce4f78cec11f22fe55c5bee0f067e803aab9bad3441afe2181693b5ebb5", size = 75707, upload-time = "2025-02-15T05:15:34.954Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c8/3d92b67e03a3b6207d97202669f9454ed700b35ade9bd4428265a078fb6c/simplejson-3.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74a1608f9e6e8c27a4008d70a54270868306d80ed48c9df7872f9f4b8ac87808", size = 75700, upload-time = "2025-02-15T05:15:37.144Z" }, + { url = "https://files.pythonhosted.org/packages/74/30/20001219d6fdca4aaa3974c96dfb6955a766b4e2cc950505a5b51fd050b0/simplejson-3.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03db8cb64154189a92a7786209f24e391644f3a3fa335658be2df2af1960b8d8", size = 138672, upload-time = "2025-02-15T05:15:38.547Z" }, + { url = "https://files.pythonhosted.org/packages/21/47/50157810876c2a7ebbd6e6346ec25eda841fe061fecaa02538a7742a3d2a/simplejson-3.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eea7e2b7d858f6fdfbf0fe3cb846d6bd8a45446865bc09960e51f3d473c2271b", size = 146616, upload-time = "2025-02-15T05:15:39.871Z" }, + { url = "https://files.pythonhosted.org/packages/95/60/8c97cdc93096437b0aca2745aca63c880fe2315fd7f6a6ce6edbb344a2ae/simplejson-3.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e66712b17d8425bb7ff8968d4c7c7fd5a2dd7bd63728b28356223c000dd2f91f", size = 134344, upload-time = "2025-02-15T05:15:42.091Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9e/da184f0e9bb3a5d7ffcde713bd41b4fe46cca56b6f24d9bd155fac56805a/simplejson-3.20.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2cc4f6486f9f515b62f5831ff1888886619b84fc837de68f26d919ba7bbdcbc", size = 138017, upload-time = "2025-02-15T05:15:43.542Z" }, + { url = "https://files.pythonhosted.org/packages/31/db/00d1a8d9b036db98f678c8a3c69ed17d2894d1768d7a00576e787ad3e546/simplejson-3.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a3c2df555ee4016148fa192e2b9cd9e60bc1d40769366134882685e90aee2a1e", size = 140118, upload-time = "2025-02-15T05:15:45.7Z" }, + { url = "https://files.pythonhosted.org/packages/52/21/57fc47eab8c1c73390b933a5ba9271f08e3e1ec83162c580357f28f5b97c/simplejson-3.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:78520f04b7548a5e476b5396c0847e066f1e0a4c0c5e920da1ad65e95f410b11", size = 140314, upload-time = "2025-02-15T05:16:07.949Z" }, + { url = "https://files.pythonhosted.org/packages/ad/cc/7cfd78d1e0fa5e57350b98cfe77353b6dfa13dce21afa4060e1019223852/simplejson-3.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f4bd49ecde87b0fe9f55cc971449a32832bca9910821f7072bbfae1155eaa007", size = 148544, upload-time = "2025-02-15T05:16:09.455Z" }, + { url = "https://files.pythonhosted.org/packages/63/26/1c894a1c2bd95dc8be0cf5a2fa73b0d173105b6ca18c90cb981ff10443d0/simplejson-3.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7eaae2b88eb5da53caaffdfa50e2e12022553949b88c0df4f9a9663609373f72", size = 141172, upload-time = "2025-02-15T05:16:10.966Z" }, + { url = "https://files.pythonhosted.org/packages/93/27/0717dccc10cd9988dbf1314def52ab32678a95a95328bb37cafacf499400/simplejson-3.20.1-cp310-cp310-win32.whl", hash = "sha256:e836fb88902799eac8debc2b642300748f4860a197fa3d9ea502112b6bb8e142", size = 74181, upload-time = "2025-02-15T05:16:12.361Z" }, + { url = "https://files.pythonhosted.org/packages/5f/af/593f896573f306519332d4287b1ab8b7b888c239bbd5159f7054d7055c2d/simplejson-3.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a19b552b212fc3b5b96fc5ce92333d4a9ac0a800803e1f17ebb16dac4be5", size = 75738, upload-time = "2025-02-15T05:16:14.438Z" }, + { url = "https://files.pythonhosted.org/packages/76/59/74bc90d1c051bc2432c96b34bd4e8036875ab58b4fcbe4d6a5a76985f853/simplejson-3.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:325b8c107253d3217e89d7b50c71015b5b31e2433e6c5bf38967b2f80630a8ca", size = 92132, upload-time = "2025-02-15T05:16:15.743Z" }, + { url = "https://files.pythonhosted.org/packages/71/c7/1970916e0c51794fff89f76da2f632aaf0b259b87753c88a8c409623d3e1/simplejson-3.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88a7baa8211089b9e58d78fbc1b0b322103f3f3d459ff16f03a36cece0d0fcf0", size = 74956, upload-time = "2025-02-15T05:16:17.062Z" }, + { url = "https://files.pythonhosted.org/packages/c8/0d/98cc5909180463f1d75fac7180de62d4cdb4e82c4fef276b9e591979372c/simplejson-3.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:299b1007b8101d50d95bc0db1bf5c38dc372e85b504cf77f596462083ee77e3f", size = 74772, upload-time = "2025-02-15T05:16:19.204Z" }, + { url = "https://files.pythonhosted.org/packages/e1/94/a30a5211a90d67725a3e8fcc1c788189f2ae2ed2b96b63ed15d0b7f5d6bb/simplejson-3.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ec618ed65caab48e81e3ed29586236a8e57daef792f1f3bb59504a7e98cd10", size = 143575, upload-time = "2025-02-15T05:16:21.337Z" }, + { url = "https://files.pythonhosted.org/packages/ee/08/cdb6821f1058eb5db46d252de69ff7e6c53f05f1bae6368fe20d5b51d37e/simplejson-3.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2cdead1d3197f0ff43373cf4730213420523ba48697743e135e26f3d179f38", size = 153241, upload-time = "2025-02-15T05:16:22.859Z" }, + { url = "https://files.pythonhosted.org/packages/4c/2d/ca3caeea0bdc5efc5503d5f57a2dfb56804898fb196dfada121323ee0ccb/simplejson-3.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3466d2839fdc83e1af42e07b90bc8ff361c4e8796cd66722a40ba14e458faddd", size = 141500, upload-time = "2025-02-15T05:16:25.068Z" }, + { url = "https://files.pythonhosted.org/packages/e1/33/d3e0779d5c58245e7370c98eb969275af6b7a4a5aec3b97cbf85f09ad328/simplejson-3.20.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d492ed8e92f3a9f9be829205f44b1d0a89af6582f0cf43e0d129fa477b93fe0c", size = 144757, upload-time = "2025-02-15T05:16:28.301Z" }, + { url = "https://files.pythonhosted.org/packages/54/53/2d93128bb55861b2fa36c5944f38da51a0bc6d83e513afc6f7838440dd15/simplejson-3.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f924b485537b640dc69434565463fd6fc0c68c65a8c6e01a823dd26c9983cf79", size = 144409, upload-time = "2025-02-15T05:16:29.687Z" }, + { url = "https://files.pythonhosted.org/packages/99/4c/dac310a98f897ad3435b4bdc836d92e78f09e38c5dbf28211ed21dc59fa2/simplejson-3.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9e8eacf6a3491bf76ea91a8d46726368a6be0eb94993f60b8583550baae9439e", size = 146082, upload-time = "2025-02-15T05:16:31.064Z" }, + { url = "https://files.pythonhosted.org/packages/ee/22/d7ba958cfed39827335b82656b1c46f89678faecda9a7677b47e87b48ee6/simplejson-3.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:d34d04bf90b4cea7c22d8b19091633908f14a096caa301b24c2f3d85b5068fb8", size = 154339, upload-time = "2025-02-15T05:16:32.719Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c8/b072b741129406a7086a0799c6f5d13096231bf35fdd87a0cffa789687fc/simplejson-3.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:69dd28d4ce38390ea4aaf212902712c0fd1093dc4c1ff67e09687c3c3e15a749", size = 147915, upload-time = "2025-02-15T05:16:34.291Z" }, + { url = "https://files.pythonhosted.org/packages/6c/46/8347e61e9cf3db5342a42f7fd30a81b4f5cf85977f916852d7674a540907/simplejson-3.20.1-cp311-cp311-win32.whl", hash = "sha256:dfe7a9da5fd2a3499436cd350f31539e0a6ded5da6b5b3d422df016444d65e43", size = 73972, upload-time = "2025-02-15T05:16:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/01/85/b52f24859237b4e9d523d5655796d911ba3d46e242eb1959c45b6af5aedd/simplejson-3.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:896a6c04d7861d507d800da7642479c3547060bf97419d9ef73d98ced8258766", size = 75595, upload-time = "2025-02-15T05:16:36.957Z" }, + { url = "https://files.pythonhosted.org/packages/8d/eb/34c16a1ac9ba265d024dc977ad84e1659d931c0a700967c3e59a98ed7514/simplejson-3.20.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f31c4a3a7ab18467ee73a27f3e59158255d1520f3aad74315edde7a940f1be23", size = 93100, upload-time = "2025-02-15T05:16:38.801Z" }, + { url = "https://files.pythonhosted.org/packages/41/fc/2c2c007d135894971e6814e7c0806936e5bade28f8db4dd7e2a58b50debd/simplejson-3.20.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884e6183d16b725e113b83a6fc0230152ab6627d4d36cb05c89c2c5bccfa7bc6", size = 75464, upload-time = "2025-02-15T05:16:40.905Z" }, + { url = "https://files.pythonhosted.org/packages/0f/05/2b5ecb33b776c34bb5cace5de5d7669f9b60e3ca13c113037b2ca86edfbd/simplejson-3.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03d7a426e416fe0d3337115f04164cd9427eb4256e843a6b8751cacf70abc832", size = 75112, upload-time = "2025-02-15T05:16:42.246Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/1f3609a2792f06cd4b71030485f78e91eb09cfd57bebf3116bf2980a8bac/simplejson-3.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:000602141d0bddfcff60ea6a6e97d5e10c9db6b17fd2d6c66199fa481b6214bb", size = 150182, upload-time = "2025-02-15T05:16:43.557Z" }, + { url = "https://files.pythonhosted.org/packages/2f/b0/053fbda38b8b602a77a4f7829def1b4f316cd8deb5440a6d3ee90790d2a4/simplejson-3.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:af8377a8af78226e82e3a4349efdde59ffa421ae88be67e18cef915e4023a595", size = 158363, upload-time = "2025-02-15T05:16:45.748Z" }, + { url = "https://files.pythonhosted.org/packages/d1/4b/2eb84ae867539a80822e92f9be4a7200dffba609275faf99b24141839110/simplejson-3.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c7de4c88ab2fbcb8781a3b982ef883696736134e20b1210bca43fb42ff1acf", size = 148415, upload-time = "2025-02-15T05:16:47.861Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bd/400b0bd372a5666addf2540c7358bfc3841b9ce5cdbc5cc4ad2f61627ad8/simplejson-3.20.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:455a882ff3f97d810709f7b620007d4e0aca8da71d06fc5c18ba11daf1c4df49", size = 152213, upload-time = "2025-02-15T05:16:49.25Z" }, + { url = "https://files.pythonhosted.org/packages/50/12/143f447bf6a827ee9472693768dc1a5eb96154f8feb140a88ce6973a3cfa/simplejson-3.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:fc0f523ce923e7f38eb67804bc80e0a028c76d7868500aa3f59225574b5d0453", size = 150048, upload-time = "2025-02-15T05:16:51.5Z" }, + { url = "https://files.pythonhosted.org/packages/5e/ea/dd9b3e8e8ed710a66f24a22c16a907c9b539b6f5f45fd8586bd5c231444e/simplejson-3.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76461ec929282dde4a08061071a47281ad939d0202dc4e63cdd135844e162fbc", size = 151668, upload-time = "2025-02-15T05:16:53Z" }, + { url = "https://files.pythonhosted.org/packages/99/af/ee52a8045426a0c5b89d755a5a70cc821815ef3c333b56fbcad33c4435c0/simplejson-3.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19c2da8c043607bde4d4ef3a6b633e668a7d2e3d56f40a476a74c5ea71949f", size = 158840, upload-time = "2025-02-15T05:16:54.851Z" }, + { url = "https://files.pythonhosted.org/packages/68/db/ab32869acea6b5de7d75fa0dac07a112ded795d41eaa7e66c7813b17be95/simplejson-3.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2578bedaedf6294415197b267d4ef678fea336dd78ee2a6d2f4b028e9d07be3", size = 154212, upload-time = "2025-02-15T05:16:56.318Z" }, + { url = "https://files.pythonhosted.org/packages/fa/7a/e3132d454977d75a3bf9a6d541d730f76462ebf42a96fea2621498166f41/simplejson-3.20.1-cp312-cp312-win32.whl", hash = "sha256:339f407373325a36b7fd744b688ba5bae0666b5d340ec6d98aebc3014bf3d8ea", size = 74101, upload-time = "2025-02-15T05:16:57.746Z" }, + { url = "https://files.pythonhosted.org/packages/bc/5d/4e243e937fa3560107c69f6f7c2eed8589163f5ed14324e864871daa2dd9/simplejson-3.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:627d4486a1ea7edf1f66bb044ace1ce6b4c1698acd1b05353c97ba4864ea2e17", size = 75736, upload-time = "2025-02-15T05:16:59.017Z" }, + { url = "https://files.pythonhosted.org/packages/c4/03/0f453a27877cb5a5fff16a975925f4119102cc8552f52536b9a98ef0431e/simplejson-3.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:71e849e7ceb2178344998cbe5ade101f1b329460243c79c27fbfc51c0447a7c3", size = 93109, upload-time = "2025-02-15T05:17:00.377Z" }, + { url = "https://files.pythonhosted.org/packages/74/1f/a729f4026850cabeaff23e134646c3f455e86925d2533463420635ae54de/simplejson-3.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b63fdbab29dc3868d6f009a59797cefaba315fd43cd32ddd998ee1da28e50e29", size = 75475, upload-time = "2025-02-15T05:17:02.544Z" }, + { url = "https://files.pythonhosted.org/packages/e2/14/50a2713fee8ff1f8d655b1a14f4a0f1c0c7246768a1b3b3d12964a4ed5aa/simplejson-3.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1190f9a3ce644fd50ec277ac4a98c0517f532cfebdcc4bd975c0979a9f05e1fb", size = 75112, upload-time = "2025-02-15T05:17:03.875Z" }, + { url = "https://files.pythonhosted.org/packages/45/86/ea9835abb646755140e2d482edc9bc1e91997ed19a59fd77ae4c6a0facea/simplejson-3.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1336ba7bcb722ad487cd265701ff0583c0bb6de638364ca947bb84ecc0015d1", size = 150245, upload-time = "2025-02-15T05:17:06.899Z" }, + { url = "https://files.pythonhosted.org/packages/12/b4/53084809faede45da829fe571c65fbda8479d2a5b9c633f46b74124d56f5/simplejson-3.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e975aac6a5acd8b510eba58d5591e10a03e3d16c1cf8a8624ca177491f7230f0", size = 158465, upload-time = "2025-02-15T05:17:08.707Z" }, + { url = "https://files.pythonhosted.org/packages/a9/7d/d56579468d1660b3841e1f21c14490d103e33cf911886b22652d6e9683ec/simplejson-3.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a6dd11ee282937ad749da6f3b8d87952ad585b26e5edfa10da3ae2536c73078", size = 148514, upload-time = "2025-02-15T05:17:11.323Z" }, + { url = "https://files.pythonhosted.org/packages/19/e3/874b1cca3d3897b486d3afdccc475eb3a09815bf1015b01cf7fcb52a55f0/simplejson-3.20.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab980fcc446ab87ea0879edad41a5c28f2d86020014eb035cf5161e8de4474c6", size = 152262, upload-time = "2025-02-15T05:17:13.543Z" }, + { url = "https://files.pythonhosted.org/packages/32/84/f0fdb3625292d945c2bd13a814584603aebdb38cfbe5fe9be6b46fe598c4/simplejson-3.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f5aee2a4cb6b146bd17333ac623610f069f34e8f31d2f4f0c1a2186e50c594f0", size = 150164, upload-time = "2025-02-15T05:17:15.021Z" }, + { url = "https://files.pythonhosted.org/packages/95/51/6d625247224f01eaaeabace9aec75ac5603a42f8ebcce02c486fbda8b428/simplejson-3.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:652d8eecbb9a3b6461b21ec7cf11fd0acbab144e45e600c817ecf18e4580b99e", size = 151795, upload-time = "2025-02-15T05:17:16.542Z" }, + { url = "https://files.pythonhosted.org/packages/7f/d9/bb921df6b35be8412f519e58e86d1060fddf3ad401b783e4862e0a74c4c1/simplejson-3.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:8c09948f1a486a89251ee3a67c9f8c969b379f6ffff1a6064b41fea3bce0a112", size = 159027, upload-time = "2025-02-15T05:17:18.083Z" }, + { url = "https://files.pythonhosted.org/packages/03/c5/5950605e4ad023a6621cf4c931b29fd3d2a9c1f36be937230bfc83d7271d/simplejson-3.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:cbbd7b215ad4fc6f058b5dd4c26ee5c59f72e031dfda3ac183d7968a99e4ca3a", size = 154380, upload-time = "2025-02-15T05:17:20.334Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/b74149557c5ec1e4e4d55758bda426f5d2ec0123cd01a53ae63b8de51fa3/simplejson-3.20.1-cp313-cp313-win32.whl", hash = "sha256:ae81e482476eaa088ef9d0120ae5345de924f23962c0c1e20abbdff597631f87", size = 74102, upload-time = "2025-02-15T05:17:22.475Z" }, + { url = "https://files.pythonhosted.org/packages/db/a9/25282fdd24493e1022f30b7f5cdf804255c007218b2bfaa655bd7ad34b2d/simplejson-3.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:1b9fd15853b90aec3b1739f4471efbf1ac05066a2c7041bf8db821bb73cd2ddc", size = 75736, upload-time = "2025-02-15T05:17:24.122Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ba/d32fe890a5edaf4a8518adf043bccf7866b600123f512a6de0988cf36810/simplejson-3.20.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a8011f1dd1d676befcd4d675ebdbfdbbefd3bf350052b956ba8c699fca7d8cef", size = 93773, upload-time = "2025-02-15T05:18:28.231Z" }, + { url = "https://files.pythonhosted.org/packages/48/c7/361e7f6695b56001a04e0a5cc623cd6c82ea2f45e872e61213e405cc8a24/simplejson-3.20.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e91703a4c5fec53e36875ae426ad785f4120bd1d93b65bed4752eeccd1789e0c", size = 75697, upload-time = "2025-02-15T05:18:30.006Z" }, + { url = "https://files.pythonhosted.org/packages/3c/2f/d0ff0b772d4ef092876eb85c99bc591c446b0502715551dad7dfc7f7c2c0/simplejson-3.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e39eaa57c7757daa25bcd21f976c46be443b73dd6c3da47fe5ce7b7048ccefe2", size = 75692, upload-time = "2025-02-15T05:18:31.424Z" }, + { url = "https://files.pythonhosted.org/packages/26/94/cab4db9530b6ca9d62f16a260e8311b04130ccd670dab75e958fcb44590e/simplejson-3.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceab2ce2acdc7fbaa433a93006758db6ba9a659e80c4faa13b80b9d2318e9b17", size = 138106, upload-time = "2025-02-15T05:18:32.907Z" }, + { url = "https://files.pythonhosted.org/packages/40/22/11c0f746bdb44c297cea8a37d8f7ccb75ea6681132aadfb9f820d9a52647/simplejson-3.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d4f320c33277a5b715db5bf5b10dae10c19076bd6d66c2843e04bd12d1f1ea5", size = 146242, upload-time = "2025-02-15T05:18:35.223Z" }, + { url = "https://files.pythonhosted.org/packages/78/e9/b7c4c26f29b41cc41ba5f0224c47adbfa7f28427418edfd58ab122f3b584/simplejson-3.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6436c48e64378fa844d8c9e58a5ed0352bbcfd4028369a9b46679b7ab79d2d", size = 133866, upload-time = "2025-02-15T05:18:36.998Z" }, + { url = "https://files.pythonhosted.org/packages/09/68/1e81ed83f38906c8859f2b973afb19302357d6003e724a6105cee0f61ec7/simplejson-3.20.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e18345c8dda5d699be8166b61f9d80aaee4545b709f1363f60813dc032dac53", size = 137444, upload-time = "2025-02-15T05:18:38.763Z" }, + { url = "https://files.pythonhosted.org/packages/9a/6b/8d1e076c543277c1d603230eec24f4dd75ebce46d351c0679526d202981f/simplejson-3.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:90b573693d1526bed576f6817e2a492eaaef68f088b57d7a9e83d122bbb49e51", size = 139617, upload-time = "2025-02-15T05:18:40.36Z" }, + { url = "https://files.pythonhosted.org/packages/d1/46/7b74803de10d4157c5cd2e89028897fa733374667bc5520a44b23b6c887a/simplejson-3.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:272cc767826e924a6bd369ea3dbf18e166ded29059c7a4d64d21a9a22424b5b5", size = 139725, upload-time = "2025-02-15T05:18:42.012Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8f/9991582665a7b6d95415e439bb4fbaa4faf0f77231666675a0fd1de54107/simplejson-3.20.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:51b41f284d603c4380732d7d619f8b34bd04bc4aa0ed0ed5f4ffd0539b14da44", size = 148010, upload-time = "2025-02-15T05:18:43.749Z" }, + { url = "https://files.pythonhosted.org/packages/54/ee/3c6e91989cdf65ec75e75662d9f15cfe167a792b893806169ea5b1da6fd2/simplejson-3.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e6697a3067d281f01de0fe96fc7cba4ea870d96d7deb7bfcf85186d74456503", size = 140624, upload-time = "2025-02-15T05:18:45.498Z" }, + { url = "https://files.pythonhosted.org/packages/9d/bd/05e13ebb7ead81c8b555f4ccc741ea7dfa0ef5c2a0c183d6a7bc50a02bca/simplejson-3.20.1-cp39-cp39-win32.whl", hash = "sha256:6dd3a1d5aca87bf947f3339b0f8e8e329f1badf548bdbff37fac63c17936da8e", size = 74148, upload-time = "2025-02-15T05:18:47.27Z" }, + { url = "https://files.pythonhosted.org/packages/88/c9/d8bf87aaebec5a4c3ccfd5228689578e2fe77027d6114a259255d54969bf/simplejson-3.20.1-cp39-cp39-win_amd64.whl", hash = "sha256:463f1fca8fbf23d088e5850fdd0dd4d5faea8900a9f9680270bd98fd649814ca", size = 75732, upload-time = "2025-02-15T05:18:49.598Z" }, + { url = "https://files.pythonhosted.org/packages/4b/30/00f02a0a921556dd5a6db1ef2926a1bc7a8bbbfb1c49cfed68a275b8ab2b/simplejson-3.20.1-py3-none-any.whl", hash = "sha256:8a6c1bbac39fa4a79f83cbf1df6ccd8ff7069582a9fd8db1e52cea073bc2c697", size = 57121, upload-time = "2025-02-15T05:18:51.243Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "snowballstemmer" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/75/a7/9810d872919697c9d01295633f5d574fb416d47e535f258272ca1f01f447/snowballstemmer-3.0.1.tar.gz", hash = "sha256:6d5eeeec8e9f84d4d56b847692bacf79bc2c8e90c7f80ca4444ff8b6f2e52895", size = 105575, upload-time = "2025-05-09T16:34:51.843Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/78/3565d011c61f5a43488987ee32b6f3f656e7f107ac2782dd57bdd7d91d9a/snowballstemmer-3.0.1-py3-none-any.whl", hash = "sha256:6cd7b3897da8d6c9ffb968a6781fa6532dce9c3618a4b127d920dab764a19064", size = 103274, upload-time = "2025-05-09T16:34:50.371Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418, upload-time = "2025-04-20T18:50:08.518Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload-time = "2025-04-20T18:50:07.196Z" }, +] + +[[package]] +name = "sphinx" +version = "7.4.7" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "alabaster", version = "0.7.16", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "babel", marker = "python_full_version < '3.10'" }, + { name = "colorama", marker = "python_full_version < '3.10' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version < '3.10'" }, + { name = "imagesize", marker = "python_full_version < '3.10'" }, + { name = "importlib-metadata", marker = "python_full_version < '3.10'" }, + { name = "jinja2", marker = "python_full_version < '3.10'" }, + { name = "packaging", marker = "python_full_version < '3.10'" }, + { name = "pygments", marker = "python_full_version < '3.10'" }, + { name = "requests", marker = "python_full_version < '3.10'" }, + { name = "snowballstemmer", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version < '3.10'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version < '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/be/50e50cb4f2eff47df05673d361095cafd95521d2a22521b920c67a372dcb/sphinx-7.4.7.tar.gz", hash = "sha256:242f92a7ea7e6c5b406fdc2615413890ba9f699114a9c09192d7dfead2ee9cfe", size = 8067911, upload-time = "2024-07-20T14:46:56.059Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0d/ef/153f6803c5d5f8917dbb7f7fcf6d34a871ede3296fa89c2c703f5f8a6c8e/sphinx-7.4.7-py3-none-any.whl", hash = "sha256:c2419e2135d11f1951cd994d6eb18a1835bd8fdd8429f9ca375dc1f3281bd239", size = 3401624, upload-time = "2024-07-20T14:46:52.142Z" }, +] + +[[package]] +name = "sphinx" +version = "8.1.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "babel", marker = "python_full_version == '3.10.*'" }, + { name = "colorama", marker = "python_full_version == '3.10.*' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version == '3.10.*'" }, + { name = "imagesize", marker = "python_full_version == '3.10.*'" }, + { name = "jinja2", marker = "python_full_version == '3.10.*'" }, + { name = "packaging", marker = "python_full_version == '3.10.*'" }, + { name = "pygments", marker = "python_full_version == '3.10.*'" }, + { name = "requests", marker = "python_full_version == '3.10.*'" }, + { name = "snowballstemmer", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version == '3.10.*'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version == '3.10.*'" }, + { name = "tomli", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/be0b61178fe2cdcb67e2a92fc9ebb488e3c51c4f74a36a7824c0adf23425/sphinx-8.1.3.tar.gz", hash = "sha256:43c1911eecb0d3e161ad78611bc905d1ad0e523e4ddc202a58a821773dc4c927", size = 8184611, upload-time = "2024-10-13T20:27:13.93Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" }, +] + +[[package]] +name = "sphinx" +version = "8.2.3" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", +] +dependencies = [ + { name = "alabaster", version = "1.0.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "babel", marker = "python_full_version >= '3.11'" }, + { name = "colorama", marker = "python_full_version >= '3.11' and sys_platform == 'win32'" }, + { name = "docutils", marker = "python_full_version >= '3.11'" }, + { name = "imagesize", marker = "python_full_version >= '3.11'" }, + { name = "jinja2", marker = "python_full_version >= '3.11'" }, + { name = "packaging", marker = "python_full_version >= '3.11'" }, + { name = "pygments", marker = "python_full_version >= '3.11'" }, + { name = "requests", marker = "python_full_version >= '3.11'" }, + { name = "roman-numerals-py", marker = "python_full_version >= '3.11'" }, + { name = "snowballstemmer", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-applehelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-devhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-htmlhelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-jsmath", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-qthelp", marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-serializinghtml", marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ad/4360e50ed56cb483667b8e6dadf2d3fda62359593faabbe749a27c4eaca6/sphinx-8.2.3.tar.gz", hash = "sha256:398ad29dee7f63a75888314e9424d40f52ce5a6a87ae88e7071e80af296ec348", size = 8321876, upload-time = "2025-03-02T22:31:59.658Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/53/136e9eca6e0b9dc0e1962e2c908fbea2e5ac000c2a2fbd9a35797958c48b/sphinx-8.2.3-py3-none-any.whl", hash = "sha256:4405915165f13521d875a8c29c8970800a0141c14cc5416a38feca4ea5d9b9c3", size = 3589741, upload-time = "2025-03-02T22:31:56.836Z" }, +] + +[[package]] +name = "sphinx-autobuild" +version = "2024.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "starlette" }, + { name = "uvicorn" }, + { name = "watchfiles" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a5/2c/155e1de2c1ba96a72e5dba152c509a8b41e047ee5c2def9e9f0d812f8be7/sphinx_autobuild-2024.10.3.tar.gz", hash = "sha256:248150f8f333e825107b6d4b86113ab28fa51750e5f9ae63b59dc339be951fb1", size = 14023, upload-time = "2024-10-02T23:15:30.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/c0/eba125db38c84d3c74717008fd3cb5000b68cd7e2cbafd1349c6a38c3d3b/sphinx_autobuild-2024.10.3-py3-none-any.whl", hash = "sha256:158e16c36f9d633e613c9aaf81c19b0fc458ca78b112533b20dafcda430d60fa", size = 11908, upload-time = "2024-10-02T23:15:28.739Z" }, +] + +[[package]] +name = "sphinx-basic-ng" +version = "1.0.0b2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/0b/a866924ded68efec7a1759587a4e478aec7559d8165fac8b2ad1c0e774d6/sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9", size = 20736, upload-time = "2023-07-08T18:40:54.166Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/dd/018ce05c532a22007ac58d4f45232514cd9d6dd0ee1dc374e309db830983/sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b", size = 22496, upload-time = "2023-07-08T18:40:52.659Z" }, +] + +[[package]] +name = "sphinx-rtd-theme" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "docutils" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, + { name = "sphinxcontrib-jquery" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/44/c97faec644d29a5ceddd3020ae2edffa69e7d00054a8c7a6021e82f20335/sphinx_rtd_theme-3.0.2.tar.gz", hash = "sha256:b7457bc25dda723b20b086a670b9953c859eab60a2a03ee8eb2bb23e176e5f85", size = 7620463, upload-time = "2024-11-13T11:06:04.545Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/77/46e3bac77b82b4df5bb5b61f2de98637724f246b4966cfc34bc5895d852a/sphinx_rtd_theme-3.0.2-py2.py3-none-any.whl", hash = "sha256:422ccc750c3a3a311de4ae327e82affdaf59eb695ba4936538552f3b00f4ee13", size = 7655561, upload-time = "2024-11-13T11:06:02.094Z" }, +] + +[[package]] +name = "sphinxcontrib-applehelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/6e/b837e84a1a704953c62ef8776d45c3e8d759876b4a84fe14eba2859106fe/sphinxcontrib_applehelp-2.0.0.tar.gz", hash = "sha256:2f29ef331735ce958efa4734873f084941970894c6090408b079c61b2e1c06d1", size = 20053, upload-time = "2024-07-29T01:09:00.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5d/85/9ebeae2f76e9e77b952f4b274c27238156eae7979c5421fba91a28f4970d/sphinxcontrib_applehelp-2.0.0-py3-none-any.whl", hash = "sha256:4cd3f0ec4ac5dd9c17ec65e9ab272c9b867ea77425228e68ecf08d6b28ddbdb5", size = 119300, upload-time = "2024-07-29T01:08:58.99Z" }, +] + +[[package]] +name = "sphinxcontrib-devhelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/d2/5beee64d3e4e747f316bae86b55943f51e82bb86ecd325883ef65741e7da/sphinxcontrib_devhelp-2.0.0.tar.gz", hash = "sha256:411f5d96d445d1d73bb5d52133377b4248ec79db5c793ce7dbe59e074b4dd1ad", size = 12967, upload-time = "2024-07-29T01:09:23.417Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/7a/987e583882f985fe4d7323774889ec58049171828b58c2217e7f79cdf44e/sphinxcontrib_devhelp-2.0.0-py3-none-any.whl", hash = "sha256:aefb8b83854e4b0998877524d1029fd3e6879210422ee3780459e28a1f03a8a2", size = 82530, upload-time = "2024-07-29T01:09:21.945Z" }, +] + +[[package]] +name = "sphinxcontrib-htmlhelp" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/93/983afd9aa001e5201eab16b5a444ed5b9b0a7a010541e0ddfbbfd0b2470c/sphinxcontrib_htmlhelp-2.1.0.tar.gz", hash = "sha256:c9e2916ace8aad64cc13a0d233ee22317f2b9025b9cf3295249fa985cc7082e9", size = 22617, upload-time = "2024-07-29T01:09:37.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7b/18a8c0bcec9182c05a0b3ec2a776bba4ead82750a55ff798e8d406dae604/sphinxcontrib_htmlhelp-2.1.0-py3-none-any.whl", hash = "sha256:166759820b47002d22914d64a075ce08f4c46818e17cfc9470a9786b759b19f8", size = 98705, upload-time = "2024-07-29T01:09:36.407Z" }, +] + +[[package]] +name = "sphinxcontrib-jquery" +version = "4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/f3/aa67467e051df70a6330fe7770894b3e4f09436dea6881ae0b4f3d87cad8/sphinxcontrib-jquery-4.1.tar.gz", hash = "sha256:1620739f04e36a2c779f1a131a2dfd49b2fd07351bf1968ced074365933abc7a", size = 122331, upload-time = "2023-03-14T15:01:01.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/85/749bd22d1a68db7291c89e2ebca53f4306c3f205853cf31e9de279034c3c/sphinxcontrib_jquery-4.1-py2.py3-none-any.whl", hash = "sha256:f936030d7d0147dd026a4f2b5a57343d233f1fc7b363f68b3d4f1cb0993878ae", size = 121104, upload-time = "2023-03-14T15:01:00.356Z" }, +] + +[[package]] +name = "sphinxcontrib-jsmath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/e8/9ed3830aeed71f17c026a07a5097edcf44b692850ef215b161b8ad875729/sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8", size = 5787, upload-time = "2019-01-21T16:10:16.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/42/4c8646762ee83602e3fb3fbe774c2fac12f317deb0b5dbeeedd2d3ba4b77/sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178", size = 5071, upload-time = "2019-01-21T16:10:14.333Z" }, +] + +[[package]] +name = "sphinxcontrib-qthelp" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/68/bc/9104308fc285eb3e0b31b67688235db556cd5b0ef31d96f30e45f2e51cae/sphinxcontrib_qthelp-2.0.0.tar.gz", hash = "sha256:4fe7d0ac8fc171045be623aba3e2a8f613f8682731f9153bb2e40ece16b9bbab", size = 17165, upload-time = "2024-07-29T01:09:56.435Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/83/859ecdd180cacc13b1f7e857abf8582a64552ea7a061057a6c716e790fce/sphinxcontrib_qthelp-2.0.0-py3-none-any.whl", hash = "sha256:b18a828cdba941ccd6ee8445dbe72ffa3ef8cbe7505d8cd1fa0d42d3f2d5f3eb", size = 88743, upload-time = "2024-07-29T01:09:54.885Z" }, +] + +[[package]] +name = "sphinxcontrib-serializinghtml" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3b/44/6716b257b0aa6bfd51a1b31665d1c205fb12cb5ad56de752dfa15657de2f/sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d", size = 16080, upload-time = "2024-07-29T01:10:09.332Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/a7/d2782e4e3f77c8450f727ba74a8f12756d5ba823d81b941f1b04da9d033a/sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331", size = 92072, upload-time = "2024-07-29T01:10:08.203Z" }, +] + +[[package]] +name = "sphinxcontrib-shellcheck" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "docutils" }, + { name = "six" }, + { name = "sphinx", version = "7.4.7", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "sphinx", version = "8.1.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version == '3.10.*'" }, + { name = "sphinx", version = "8.2.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/2b/20717a5e0c7ee99dfd5fcdf11a8cf0ab02533cf62775f24d344ea5cf48c1/sphinxcontrib-shellcheck-1.1.2.zip", hash = "sha256:475a3ae12a1cfc1bc26cff57f0dd15561213818e3b470b3eacc4bb8be7b129c0", size = 338739, upload-time = "2020-03-30T01:51:39.993Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/9c/1ff7fe5174f944fac0fcb53bdaac7b98d73a98dd2ca800d95af6af9edb9a/sphinxcontrib_shellcheck-1.1.2-py35-none-any.whl", hash = "sha256:c0449dc9402521ab1d05a1b9eb8c9099707da64824341686dab4f620dc688514", size = 11532, upload-time = "2020-03-30T01:51:34.913Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8c/833388d3127d8dc0d5558bf52225eb20ed024ac46ef8ef4bffe7298ceb3d/sphinxcontrib_shellcheck-1.1.2-py36-none-any.whl", hash = "sha256:bcd8ffd26e6430deff9ffd10705683b502ace3fc8b4d1ba84496b3752f65fe52", size = 11533, upload-time = "2020-03-30T01:51:36.422Z" }, + { url = "https://files.pythonhosted.org/packages/9d/b5/cdc74763bcf0916f47d053830c00114f1de65d97ea2281b66bbf2a587b8a/sphinxcontrib_shellcheck-1.1.2-py37-none-any.whl", hash = "sha256:46d1aba8201bbfc7a2c51e08446cab36bdab318c997223c8fc40733a5eedc71f", size = 11533, upload-time = "2020-03-30T01:51:37.351Z" }, + { url = "https://files.pythonhosted.org/packages/58/ba/cf15480bc238a15e10604ee7f0e3e20ea0bf9a55a4f0b4e50571e8d13e60/sphinxcontrib_shellcheck-1.1.2-py38-none-any.whl", hash = "sha256:4c5f2840418cd1d7d662c0b3f51a07625f1a8f92755b19347ce85e8258e9d847", size = 11532, upload-time = "2020-03-30T01:51:38.858Z" }, +] + +[[package]] +name = "starlette" +version = "0.47.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, +] + +[[package]] +name = "tomli" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" }, + { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" }, + { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" }, + { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" }, + { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" }, + { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" }, + { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" }, + { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" }, + { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" }, + { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" }, + { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" }, + { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" }, + { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" }, + { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" }, + { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" }, + { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" }, + { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" }, + { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" }, + { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" }, + { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" }, + { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" }, + { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" }, + { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" }, + { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" }, + { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" }, +] + +[[package]] +name = "urllib3" +version = "1.26.20" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/e4/e8/6ff5e6bc22095cfc59b6ea711b687e2b7ed4bdb373f7eeec370a97d7392f/urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32", size = 307380, upload-time = "2024-08-29T15:43:11.37Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/cf/8435d5a7159e2a9c83a95896ed596f68cf798005fe107cc655b5c5c14704/urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e", size = 144225, upload-time = "2024-08-29T15:43:08.921Z" }, +] + +[[package]] +name = "urllib3" +version = "2.5.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version == '3.14.*'", + "python_full_version >= '3.15' or (python_full_version >= '3.11' and python_full_version < '3.14')", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", version = "8.1.8", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "click", version = "8.2.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, +] + +[[package]] +name = "watchfiles" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/9a/d451fcc97d029f5812e898fd30a53fd8c15c7bbd058fd75cfc6beb9bd761/watchfiles-1.1.0.tar.gz", hash = "sha256:693ed7ec72cbfcee399e92c895362b6e66d63dac6b91e2c11ae03d10d503e575", size = 94406, upload-time = "2025-06-15T19:06:59.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/dd/579d1dc57f0f895426a1211c4ef3b0cb37eb9e642bb04bdcd962b5df206a/watchfiles-1.1.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:27f30e14aa1c1e91cb653f03a63445739919aef84c8d2517997a83155e7a2fcc", size = 405757, upload-time = "2025-06-15T19:04:51.058Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a0/7a0318cd874393344d48c34d53b3dd419466adf59a29ba5b51c88dd18b86/watchfiles-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3366f56c272232860ab45c77c3ca7b74ee819c8e1f6f35a7125556b198bbc6df", size = 397511, upload-time = "2025-06-15T19:04:52.79Z" }, + { url = "https://files.pythonhosted.org/packages/06/be/503514656d0555ec2195f60d810eca29b938772e9bfb112d5cd5ad6f6a9e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8412eacef34cae2836d891836a7fff7b754d6bcac61f6c12ba5ca9bc7e427b68", size = 450739, upload-time = "2025-06-15T19:04:54.203Z" }, + { url = "https://files.pythonhosted.org/packages/4e/0d/a05dd9e5f136cdc29751816d0890d084ab99f8c17b86f25697288ca09bc7/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df670918eb7dd719642e05979fc84704af913d563fd17ed636f7c4783003fdcc", size = 458106, upload-time = "2025-06-15T19:04:55.607Z" }, + { url = "https://files.pythonhosted.org/packages/f1/fa/9cd16e4dfdb831072b7ac39e7bea986e52128526251038eb481effe9f48e/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7642b9bc4827b5518ebdb3b82698ada8c14c7661ddec5fe719f3e56ccd13c97", size = 484264, upload-time = "2025-06-15T19:04:57.009Z" }, + { url = "https://files.pythonhosted.org/packages/32/04/1da8a637c7e2b70e750a0308e9c8e662ada0cca46211fa9ef24a23937e0b/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:199207b2d3eeaeb80ef4411875a6243d9ad8bc35b07fc42daa6b801cc39cc41c", size = 597612, upload-time = "2025-06-15T19:04:58.409Z" }, + { url = "https://files.pythonhosted.org/packages/30/01/109f2762e968d3e58c95731a206e5d7d2a7abaed4299dd8a94597250153c/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a479466da6db5c1e8754caee6c262cd373e6e6c363172d74394f4bff3d84d7b5", size = 477242, upload-time = "2025-06-15T19:04:59.786Z" }, + { url = "https://files.pythonhosted.org/packages/b5/b8/46f58cf4969d3b7bc3ca35a98e739fa4085b0657a1540ccc29a1a0bc016f/watchfiles-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:935f9edd022ec13e447e5723a7d14456c8af254544cefbc533f6dd276c9aa0d9", size = 453148, upload-time = "2025-06-15T19:05:01.103Z" }, + { url = "https://files.pythonhosted.org/packages/a5/cd/8267594263b1770f1eb76914940d7b2d03ee55eca212302329608208e061/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8076a5769d6bdf5f673a19d51da05fc79e2bbf25e9fe755c47595785c06a8c72", size = 626574, upload-time = "2025-06-15T19:05:02.582Z" }, + { url = "https://files.pythonhosted.org/packages/a1/2f/7f2722e85899bed337cba715723e19185e288ef361360718973f891805be/watchfiles-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86b1e28d4c37e89220e924305cd9f82866bb0ace666943a6e4196c5df4d58dcc", size = 624378, upload-time = "2025-06-15T19:05:03.719Z" }, + { url = "https://files.pythonhosted.org/packages/bf/20/64c88ec43d90a568234d021ab4b2a6f42a5230d772b987c3f9c00cc27b8b/watchfiles-1.1.0-cp310-cp310-win32.whl", hash = "sha256:d1caf40c1c657b27858f9774d5c0e232089bca9cb8ee17ce7478c6e9264d2587", size = 279829, upload-time = "2025-06-15T19:05:04.822Z" }, + { url = "https://files.pythonhosted.org/packages/39/5c/a9c1ed33de7af80935e4eac09570de679c6e21c07070aa99f74b4431f4d6/watchfiles-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:a89c75a5b9bc329131115a409d0acc16e8da8dfd5867ba59f1dd66ae7ea8fa82", size = 292192, upload-time = "2025-06-15T19:05:06.348Z" }, + { url = "https://files.pythonhosted.org/packages/8b/78/7401154b78ab484ccaaeef970dc2af0cb88b5ba8a1b415383da444cdd8d3/watchfiles-1.1.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c9649dfc57cc1f9835551deb17689e8d44666315f2e82d337b9f07bd76ae3aa2", size = 405751, upload-time = "2025-06-15T19:05:07.679Z" }, + { url = "https://files.pythonhosted.org/packages/76/63/e6c3dbc1f78d001589b75e56a288c47723de28c580ad715eb116639152b5/watchfiles-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:406520216186b99374cdb58bc48e34bb74535adec160c8459894884c983a149c", size = 397313, upload-time = "2025-06-15T19:05:08.764Z" }, + { url = "https://files.pythonhosted.org/packages/6c/a2/8afa359ff52e99af1632f90cbf359da46184207e893a5f179301b0c8d6df/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb45350fd1dc75cd68d3d72c47f5b513cb0578da716df5fba02fff31c69d5f2d", size = 450792, upload-time = "2025-06-15T19:05:09.869Z" }, + { url = "https://files.pythonhosted.org/packages/1d/bf/7446b401667f5c64972a57a0233be1104157fc3abf72c4ef2666c1bd09b2/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:11ee4444250fcbeb47459a877e5e80ed994ce8e8d20283857fc128be1715dac7", size = 458196, upload-time = "2025-06-15T19:05:11.91Z" }, + { url = "https://files.pythonhosted.org/packages/58/2f/501ddbdfa3fa874ea5597c77eeea3d413579c29af26c1091b08d0c792280/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bda8136e6a80bdea23e5e74e09df0362744d24ffb8cd59c4a95a6ce3d142f79c", size = 484788, upload-time = "2025-06-15T19:05:13.373Z" }, + { url = "https://files.pythonhosted.org/packages/61/1e/9c18eb2eb5c953c96bc0e5f626f0e53cfef4bd19bd50d71d1a049c63a575/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b915daeb2d8c1f5cee4b970f2e2c988ce6514aace3c9296e58dd64dc9aa5d575", size = 597879, upload-time = "2025-06-15T19:05:14.725Z" }, + { url = "https://files.pythonhosted.org/packages/8b/6c/1467402e5185d89388b4486745af1e0325007af0017c3384cc786fff0542/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed8fc66786de8d0376f9f913c09e963c66e90ced9aa11997f93bdb30f7c872a8", size = 477447, upload-time = "2025-06-15T19:05:15.775Z" }, + { url = "https://files.pythonhosted.org/packages/2b/a1/ec0a606bde4853d6c4a578f9391eeb3684a9aea736a8eb217e3e00aa89a1/watchfiles-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe4371595edf78c41ef8ac8df20df3943e13defd0efcb732b2e393b5a8a7a71f", size = 453145, upload-time = "2025-06-15T19:05:17.17Z" }, + { url = "https://files.pythonhosted.org/packages/90/b9/ef6f0c247a6a35d689fc970dc7f6734f9257451aefb30def5d100d6246a5/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b7c5f6fe273291f4d414d55b2c80d33c457b8a42677ad14b4b47ff025d0893e4", size = 626539, upload-time = "2025-06-15T19:05:18.557Z" }, + { url = "https://files.pythonhosted.org/packages/34/44/6ffda5537085106ff5aaa762b0d130ac6c75a08015dd1621376f708c94de/watchfiles-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7738027989881e70e3723c75921f1efa45225084228788fc59ea8c6d732eb30d", size = 624472, upload-time = "2025-06-15T19:05:19.588Z" }, + { url = "https://files.pythonhosted.org/packages/c3/e3/71170985c48028fa3f0a50946916a14055e741db11c2e7bc2f3b61f4d0e3/watchfiles-1.1.0-cp311-cp311-win32.whl", hash = "sha256:622d6b2c06be19f6e89b1d951485a232e3b59618def88dbeda575ed8f0d8dbf2", size = 279348, upload-time = "2025-06-15T19:05:20.856Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/3e39c68b68a7a171070f81fc2561d23ce8d6859659406842a0e4bebf3bba/watchfiles-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:48aa25e5992b61debc908a61ab4d3f216b64f44fdaa71eb082d8b2de846b7d12", size = 292607, upload-time = "2025-06-15T19:05:21.937Z" }, + { url = "https://files.pythonhosted.org/packages/61/9f/2973b7539f2bdb6ea86d2c87f70f615a71a1fc2dba2911795cea25968aea/watchfiles-1.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:00645eb79a3faa70d9cb15c8d4187bb72970b2470e938670240c7998dad9f13a", size = 285056, upload-time = "2025-06-15T19:05:23.12Z" }, + { url = "https://files.pythonhosted.org/packages/f6/b8/858957045a38a4079203a33aaa7d23ea9269ca7761c8a074af3524fbb240/watchfiles-1.1.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9dc001c3e10de4725c749d4c2f2bdc6ae24de5a88a339c4bce32300a31ede179", size = 402339, upload-time = "2025-06-15T19:05:24.516Z" }, + { url = "https://files.pythonhosted.org/packages/80/28/98b222cca751ba68e88521fabd79a4fab64005fc5976ea49b53fa205d1fa/watchfiles-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9ba68ec283153dead62cbe81872d28e053745f12335d037de9cbd14bd1877f5", size = 394409, upload-time = "2025-06-15T19:05:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/50/dee79968566c03190677c26f7f47960aff738d32087087bdf63a5473e7df/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130fc497b8ee68dce163e4254d9b0356411d1490e868bd8790028bc46c5cc297", size = 450939, upload-time = "2025-06-15T19:05:26.494Z" }, + { url = "https://files.pythonhosted.org/packages/40/45/a7b56fb129700f3cfe2594a01aa38d033b92a33dddce86c8dfdfc1247b72/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:50a51a90610d0845a5931a780d8e51d7bd7f309ebc25132ba975aca016b576a0", size = 457270, upload-time = "2025-06-15T19:05:27.466Z" }, + { url = "https://files.pythonhosted.org/packages/b5/c8/fa5ef9476b1d02dc6b5e258f515fcaaecf559037edf8b6feffcbc097c4b8/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc44678a72ac0910bac46fa6a0de6af9ba1355669b3dfaf1ce5f05ca7a74364e", size = 483370, upload-time = "2025-06-15T19:05:28.548Z" }, + { url = "https://files.pythonhosted.org/packages/98/68/42cfcdd6533ec94f0a7aab83f759ec11280f70b11bfba0b0f885e298f9bd/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a543492513a93b001975ae283a51f4b67973662a375a403ae82f420d2c7205ee", size = 598654, upload-time = "2025-06-15T19:05:29.997Z" }, + { url = "https://files.pythonhosted.org/packages/d3/74/b2a1544224118cc28df7e59008a929e711f9c68ce7d554e171b2dc531352/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ac164e20d17cc285f2b94dc31c384bc3aa3dd5e7490473b3db043dd70fbccfd", size = 478667, upload-time = "2025-06-15T19:05:31.172Z" }, + { url = "https://files.pythonhosted.org/packages/8c/77/e3362fe308358dc9f8588102481e599c83e1b91c2ae843780a7ded939a35/watchfiles-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7590d5a455321e53857892ab8879dce62d1f4b04748769f5adf2e707afb9d4f", size = 452213, upload-time = "2025-06-15T19:05:32.299Z" }, + { url = "https://files.pythonhosted.org/packages/6e/17/c8f1a36540c9a1558d4faf08e909399e8133599fa359bf52ec8fcee5be6f/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:37d3d3f7defb13f62ece99e9be912afe9dd8a0077b7c45ee5a57c74811d581a4", size = 626718, upload-time = "2025-06-15T19:05:33.415Z" }, + { url = "https://files.pythonhosted.org/packages/26/45/fb599be38b4bd38032643783d7496a26a6f9ae05dea1a42e58229a20ac13/watchfiles-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7080c4bb3efd70a07b1cc2df99a7aa51d98685be56be6038c3169199d0a1c69f", size = 623098, upload-time = "2025-06-15T19:05:34.534Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/fdf40e038475498e160cd167333c946e45d8563ae4dd65caf757e9ffe6b4/watchfiles-1.1.0-cp312-cp312-win32.whl", hash = "sha256:cbcf8630ef4afb05dc30107bfa17f16c0896bb30ee48fc24bf64c1f970f3b1fd", size = 279209, upload-time = "2025-06-15T19:05:35.577Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d3/3ae9d5124ec75143bdf088d436cba39812122edc47709cd2caafeac3266f/watchfiles-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:cbd949bdd87567b0ad183d7676feb98136cde5bb9025403794a4c0db28ed3a47", size = 292786, upload-time = "2025-06-15T19:05:36.559Z" }, + { url = "https://files.pythonhosted.org/packages/26/2f/7dd4fc8b5f2b34b545e19629b4a018bfb1de23b3a496766a2c1165ca890d/watchfiles-1.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:0a7d40b77f07be87c6faa93d0951a0fcd8cbca1ddff60a1b65d741bac6f3a9f6", size = 284343, upload-time = "2025-06-15T19:05:37.5Z" }, + { url = "https://files.pythonhosted.org/packages/d3/42/fae874df96595556a9089ade83be34a2e04f0f11eb53a8dbf8a8a5e562b4/watchfiles-1.1.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:5007f860c7f1f8df471e4e04aaa8c43673429047d63205d1630880f7637bca30", size = 402004, upload-time = "2025-06-15T19:05:38.499Z" }, + { url = "https://files.pythonhosted.org/packages/fa/55/a77e533e59c3003d9803c09c44c3651224067cbe7fb5d574ddbaa31e11ca/watchfiles-1.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:20ecc8abbd957046f1fe9562757903f5eaf57c3bce70929fda6c7711bb58074a", size = 393671, upload-time = "2025-06-15T19:05:39.52Z" }, + { url = "https://files.pythonhosted.org/packages/05/68/b0afb3f79c8e832e6571022611adbdc36e35a44e14f129ba09709aa4bb7a/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2f0498b7d2a3c072766dba3274fe22a183dbea1f99d188f1c6c72209a1063dc", size = 449772, upload-time = "2025-06-15T19:05:40.897Z" }, + { url = "https://files.pythonhosted.org/packages/ff/05/46dd1f6879bc40e1e74c6c39a1b9ab9e790bf1f5a2fe6c08b463d9a807f4/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:239736577e848678e13b201bba14e89718f5c2133dfd6b1f7846fa1b58a8532b", size = 456789, upload-time = "2025-06-15T19:05:42.045Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ca/0eeb2c06227ca7f12e50a47a3679df0cd1ba487ea19cf844a905920f8e95/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eff4b8d89f444f7e49136dc695599a591ff769300734446c0a86cba2eb2f9895", size = 482551, upload-time = "2025-06-15T19:05:43.781Z" }, + { url = "https://files.pythonhosted.org/packages/31/47/2cecbd8694095647406645f822781008cc524320466ea393f55fe70eed3b/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12b0a02a91762c08f7264e2e79542f76870c3040bbc847fb67410ab81474932a", size = 597420, upload-time = "2025-06-15T19:05:45.244Z" }, + { url = "https://files.pythonhosted.org/packages/d9/7e/82abc4240e0806846548559d70f0b1a6dfdca75c1b4f9fa62b504ae9b083/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29e7bc2eee15cbb339c68445959108803dc14ee0c7b4eea556400131a8de462b", size = 477950, upload-time = "2025-06-15T19:05:46.332Z" }, + { url = "https://files.pythonhosted.org/packages/25/0d/4d564798a49bf5482a4fa9416dea6b6c0733a3b5700cb8a5a503c4b15853/watchfiles-1.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9481174d3ed982e269c090f780122fb59cee6c3796f74efe74e70f7780ed94c", size = 451706, upload-time = "2025-06-15T19:05:47.459Z" }, + { url = "https://files.pythonhosted.org/packages/81/b5/5516cf46b033192d544102ea07c65b6f770f10ed1d0a6d388f5d3874f6e4/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:80f811146831c8c86ab17b640801c25dc0a88c630e855e2bef3568f30434d52b", size = 625814, upload-time = "2025-06-15T19:05:48.654Z" }, + { url = "https://files.pythonhosted.org/packages/0c/dd/7c1331f902f30669ac3e754680b6edb9a0dd06dea5438e61128111fadd2c/watchfiles-1.1.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:60022527e71d1d1fda67a33150ee42869042bce3d0fcc9cc49be009a9cded3fb", size = 622820, upload-time = "2025-06-15T19:05:50.088Z" }, + { url = "https://files.pythonhosted.org/packages/1b/14/36d7a8e27cd128d7b1009e7715a7c02f6c131be9d4ce1e5c3b73d0e342d8/watchfiles-1.1.0-cp313-cp313-win32.whl", hash = "sha256:32d6d4e583593cb8576e129879ea0991660b935177c0f93c6681359b3654bfa9", size = 279194, upload-time = "2025-06-15T19:05:51.186Z" }, + { url = "https://files.pythonhosted.org/packages/25/41/2dd88054b849aa546dbeef5696019c58f8e0774f4d1c42123273304cdb2e/watchfiles-1.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:f21af781a4a6fbad54f03c598ab620e3a77032c5878f3d780448421a6e1818c7", size = 292349, upload-time = "2025-06-15T19:05:52.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/cf/421d659de88285eb13941cf11a81f875c176f76a6d99342599be88e08d03/watchfiles-1.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:5366164391873ed76bfdf618818c82084c9db7fac82b64a20c44d335eec9ced5", size = 283836, upload-time = "2025-06-15T19:05:53.265Z" }, + { url = "https://files.pythonhosted.org/packages/45/10/6faf6858d527e3599cc50ec9fcae73590fbddc1420bd4fdccfebffeedbc6/watchfiles-1.1.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:17ab167cca6339c2b830b744eaf10803d2a5b6683be4d79d8475d88b4a8a4be1", size = 400343, upload-time = "2025-06-15T19:05:54.252Z" }, + { url = "https://files.pythonhosted.org/packages/03/20/5cb7d3966f5e8c718006d0e97dfe379a82f16fecd3caa7810f634412047a/watchfiles-1.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:328dbc9bff7205c215a7807da7c18dce37da7da718e798356212d22696404339", size = 392916, upload-time = "2025-06-15T19:05:55.264Z" }, + { url = "https://files.pythonhosted.org/packages/8c/07/d8f1176328fa9e9581b6f120b017e286d2a2d22ae3f554efd9515c8e1b49/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7208ab6e009c627b7557ce55c465c98967e8caa8b11833531fdf95799372633", size = 449582, upload-time = "2025-06-15T19:05:56.317Z" }, + { url = "https://files.pythonhosted.org/packages/66/e8/80a14a453cf6038e81d072a86c05276692a1826471fef91df7537dba8b46/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a8f6f72974a19efead54195bc9bed4d850fc047bb7aa971268fd9a8387c89011", size = 456752, upload-time = "2025-06-15T19:05:57.359Z" }, + { url = "https://files.pythonhosted.org/packages/5a/25/0853b3fe0e3c2f5af9ea60eb2e781eade939760239a72c2d38fc4cc335f6/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d181ef50923c29cf0450c3cd47e2f0557b62218c50b2ab8ce2ecaa02bd97e670", size = 481436, upload-time = "2025-06-15T19:05:58.447Z" }, + { url = "https://files.pythonhosted.org/packages/fe/9e/4af0056c258b861fbb29dcb36258de1e2b857be4a9509e6298abcf31e5c9/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adb4167043d3a78280d5d05ce0ba22055c266cf8655ce942f2fb881262ff3cdf", size = 596016, upload-time = "2025-06-15T19:05:59.59Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fa/95d604b58aa375e781daf350897aaaa089cff59d84147e9ccff2447c8294/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c5701dc474b041e2934a26d31d39f90fac8a3dee2322b39f7729867f932b1d4", size = 476727, upload-time = "2025-06-15T19:06:01.086Z" }, + { url = "https://files.pythonhosted.org/packages/65/95/fe479b2664f19be4cf5ceeb21be05afd491d95f142e72d26a42f41b7c4f8/watchfiles-1.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b067915e3c3936966a8607f6fe5487df0c9c4afb85226613b520890049deea20", size = 451864, upload-time = "2025-06-15T19:06:02.144Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8a/3c4af14b93a15ce55901cd7a92e1a4701910f1768c78fb30f61d2b79785b/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:9c733cda03b6d636b4219625a4acb5c6ffb10803338e437fb614fef9516825ef", size = 625626, upload-time = "2025-06-15T19:06:03.578Z" }, + { url = "https://files.pythonhosted.org/packages/da/f5/cf6aa047d4d9e128f4b7cde615236a915673775ef171ff85971d698f3c2c/watchfiles-1.1.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:cc08ef8b90d78bfac66f0def80240b0197008e4852c9f285907377b2947ffdcb", size = 622744, upload-time = "2025-06-15T19:06:05.066Z" }, + { url = "https://files.pythonhosted.org/packages/2c/00/70f75c47f05dea6fd30df90f047765f6fc2d6eb8b5a3921379b0b04defa2/watchfiles-1.1.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:9974d2f7dc561cce3bb88dfa8eb309dab64c729de85fba32e98d75cf24b66297", size = 402114, upload-time = "2025-06-15T19:06:06.186Z" }, + { url = "https://files.pythonhosted.org/packages/53/03/acd69c48db4a1ed1de26b349d94077cca2238ff98fd64393f3e97484cae6/watchfiles-1.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c68e9f1fcb4d43798ad8814c4c1b61547b014b667216cb754e606bfade587018", size = 393879, upload-time = "2025-06-15T19:06:07.369Z" }, + { url = "https://files.pythonhosted.org/packages/2f/c8/a9a2a6f9c8baa4eceae5887fecd421e1b7ce86802bcfc8b6a942e2add834/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95ab1594377effac17110e1352989bdd7bdfca9ff0e5eeccd8c69c5389b826d0", size = 450026, upload-time = "2025-06-15T19:06:08.476Z" }, + { url = "https://files.pythonhosted.org/packages/fe/51/d572260d98388e6e2b967425c985e07d47ee6f62e6455cefb46a6e06eda5/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fba9b62da882c1be1280a7584ec4515d0a6006a94d6e5819730ec2eab60ffe12", size = 457917, upload-time = "2025-06-15T19:06:09.988Z" }, + { url = "https://files.pythonhosted.org/packages/c6/2d/4258e52917bf9f12909b6ec314ff9636276f3542f9d3807d143f27309104/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3434e401f3ce0ed6b42569128b3d1e3af773d7ec18751b918b89cd49c14eaafb", size = 483602, upload-time = "2025-06-15T19:06:11.088Z" }, + { url = "https://files.pythonhosted.org/packages/84/99/bee17a5f341a4345fe7b7972a475809af9e528deba056f8963d61ea49f75/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa257a4d0d21fcbca5b5fcba9dca5a78011cb93c0323fb8855c6d2dfbc76eb77", size = 596758, upload-time = "2025-06-15T19:06:12.197Z" }, + { url = "https://files.pythonhosted.org/packages/40/76/e4bec1d59b25b89d2b0716b41b461ed655a9a53c60dc78ad5771fda5b3e6/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7fd1b3879a578a8ec2076c7961076df540b9af317123f84569f5a9ddee64ce92", size = 477601, upload-time = "2025-06-15T19:06:13.391Z" }, + { url = "https://files.pythonhosted.org/packages/1f/fa/a514292956f4a9ce3c567ec0c13cce427c158e9f272062685a8a727d08fc/watchfiles-1.1.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc7a30eeb0e20ecc5f4bd113cd69dcdb745a07c68c0370cea919f373f65d9e", size = 451936, upload-time = "2025-06-15T19:06:14.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/5d/c3bf927ec3bbeb4566984eba8dd7a8eb69569400f5509904545576741f88/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:891c69e027748b4a73847335d208e374ce54ca3c335907d381fde4e41661b13b", size = 626243, upload-time = "2025-06-15T19:06:16.232Z" }, + { url = "https://files.pythonhosted.org/packages/e6/65/6e12c042f1a68c556802a84d54bb06d35577c81e29fba14019562479159c/watchfiles-1.1.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:12fe8eaffaf0faa7906895b4f8bb88264035b3f0243275e0bf24af0436b27259", size = 623073, upload-time = "2025-06-15T19:06:17.457Z" }, + { url = "https://files.pythonhosted.org/packages/89/ab/7f79d9bf57329e7cbb0a6fd4c7bd7d0cee1e4a8ef0041459f5409da3506c/watchfiles-1.1.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:bfe3c517c283e484843cb2e357dd57ba009cff351edf45fb455b5fbd1f45b15f", size = 400872, upload-time = "2025-06-15T19:06:18.57Z" }, + { url = "https://files.pythonhosted.org/packages/df/d5/3f7bf9912798e9e6c516094db6b8932df53b223660c781ee37607030b6d3/watchfiles-1.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a9ccbf1f129480ed3044f540c0fdbc4ee556f7175e5ab40fe077ff6baf286d4e", size = 392877, upload-time = "2025-06-15T19:06:19.55Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c5/54ec7601a2798604e01c75294770dbee8150e81c6e471445d7601610b495/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba0e3255b0396cac3cc7bbace76404dd72b5438bf0d8e7cefa2f79a7f3649caa", size = 449645, upload-time = "2025-06-15T19:06:20.66Z" }, + { url = "https://files.pythonhosted.org/packages/0a/04/c2f44afc3b2fce21ca0b7802cbd37ed90a29874f96069ed30a36dfe57c2b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4281cd9fce9fc0a9dbf0fc1217f39bf9cf2b4d315d9626ef1d4e87b84699e7e8", size = 457424, upload-time = "2025-06-15T19:06:21.712Z" }, + { url = "https://files.pythonhosted.org/packages/9f/b0/eec32cb6c14d248095261a04f290636da3df3119d4040ef91a4a50b29fa5/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d2404af8db1329f9a3c9b79ff63e0ae7131986446901582067d9304ae8aaf7f", size = 481584, upload-time = "2025-06-15T19:06:22.777Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/ca4bb71c68a937d7145aa25709e4f5d68eb7698a25ce266e84b55d591bbd/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e78b6ed8165996013165eeabd875c5dfc19d41b54f94b40e9fff0eb3193e5e8e", size = 596675, upload-time = "2025-06-15T19:06:24.226Z" }, + { url = "https://files.pythonhosted.org/packages/a1/dd/b0e4b7fb5acf783816bc950180a6cd7c6c1d2cf7e9372c0ea634e722712b/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:249590eb75ccc117f488e2fabd1bfa33c580e24b96f00658ad88e38844a040bb", size = 477363, upload-time = "2025-06-15T19:06:25.42Z" }, + { url = "https://files.pythonhosted.org/packages/69/c4/088825b75489cb5b6a761a4542645718893d395d8c530b38734f19da44d2/watchfiles-1.1.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d05686b5487cfa2e2c28ff1aa370ea3e6c5accfe6435944ddea1e10d93872147", size = 452240, upload-time = "2025-06-15T19:06:26.552Z" }, + { url = "https://files.pythonhosted.org/packages/10/8c/22b074814970eeef43b7c44df98c3e9667c1f7bf5b83e0ff0201b0bd43f9/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:d0e10e6f8f6dc5762adee7dece33b722282e1f59aa6a55da5d493a97282fedd8", size = 625607, upload-time = "2025-06-15T19:06:27.606Z" }, + { url = "https://files.pythonhosted.org/packages/32/fa/a4f5c2046385492b2273213ef815bf71a0d4c1943b784fb904e184e30201/watchfiles-1.1.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:af06c863f152005c7592df1d6a7009c836a247c9d8adb78fef8575a5a98699db", size = 623315, upload-time = "2025-06-15T19:06:29.076Z" }, + { url = "https://files.pythonhosted.org/packages/47/8a/a45db804b9f0740f8408626ab2bca89c3136432e57c4673b50180bf85dd9/watchfiles-1.1.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:865c8e95713744cf5ae261f3067861e9da5f1370ba91fc536431e29b418676fa", size = 406400, upload-time = "2025-06-15T19:06:30.233Z" }, + { url = "https://files.pythonhosted.org/packages/64/06/a08684f628fb41addd451845aceedc2407dc3d843b4b060a7c4350ddee0c/watchfiles-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42f92befc848bb7a19658f21f3e7bae80d7d005d13891c62c2cd4d4d0abb3433", size = 397920, upload-time = "2025-06-15T19:06:31.315Z" }, + { url = "https://files.pythonhosted.org/packages/79/e6/e10d5675af653b1b07d4156906858041149ca222edaf8995877f2605ba9e/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa0cc8365ab29487eb4f9979fd41b22549853389e22d5de3f134a6796e1b05a4", size = 451196, upload-time = "2025-06-15T19:06:32.435Z" }, + { url = "https://files.pythonhosted.org/packages/f6/8a/facd6988100cd0f39e89f6c550af80edb28e3a529e1ee662e750663e6b36/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:90ebb429e933645f3da534c89b29b665e285048973b4d2b6946526888c3eb2c7", size = 458218, upload-time = "2025-06-15T19:06:33.503Z" }, + { url = "https://files.pythonhosted.org/packages/90/26/34cbcbc4d0f2f8f9cc243007e65d741ae039f7a11ef8ec6e9cd25bee08d1/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c588c45da9b08ab3da81d08d7987dae6d2a3badd63acdb3e206a42dbfa7cb76f", size = 484851, upload-time = "2025-06-15T19:06:34.541Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1f/f59faa9fc4b0e36dbcdd28a18c430416443b309d295d8b82e18192d120ad/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c55b0f9f68590115c25272b06e63f0824f03d4fc7d6deed43d8ad5660cabdbf", size = 599520, upload-time = "2025-06-15T19:06:35.785Z" }, + { url = "https://files.pythonhosted.org/packages/83/72/3637abecb3bf590529f5154ca000924003e5f4bbb9619744feeaf6f0b70b/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd17a1e489f02ce9117b0de3c0b1fab1c3e2eedc82311b299ee6b6faf6c23a29", size = 477956, upload-time = "2025-06-15T19:06:36.965Z" }, + { url = "https://files.pythonhosted.org/packages/f7/f3/d14ffd9acc0c1bd4790378995e320981423263a5d70bd3929e2e0dc87fff/watchfiles-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da71945c9ace018d8634822f16cbc2a78323ef6c876b1d34bbf5d5222fd6a72e", size = 453196, upload-time = "2025-06-15T19:06:38.024Z" }, + { url = "https://files.pythonhosted.org/packages/7f/38/78ad77bd99e20c0fdc82262be571ef114fc0beef9b43db52adb939768c38/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:51556d5004887045dba3acdd1fdf61dddea2be0a7e18048b5e853dcd37149b86", size = 627479, upload-time = "2025-06-15T19:06:39.442Z" }, + { url = "https://files.pythonhosted.org/packages/e6/cf/549d50a22fcc83f1017c6427b1c76c053233f91b526f4ad7a45971e70c0b/watchfiles-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04e4ed5d1cd3eae68c89bcc1a485a109f39f2fd8de05f705e98af6b5f1861f1f", size = 624414, upload-time = "2025-06-15T19:06:40.859Z" }, + { url = "https://files.pythonhosted.org/packages/72/de/57d6e40dc9140af71c12f3a9fc2d3efc5529d93981cd4d265d484d7c9148/watchfiles-1.1.0-cp39-cp39-win32.whl", hash = "sha256:c600e85f2ffd9f1035222b1a312aff85fd11ea39baff1d705b9b047aad2ce267", size = 280020, upload-time = "2025-06-15T19:06:41.89Z" }, + { url = "https://files.pythonhosted.org/packages/88/bb/7d287fc2a762396b128a0fca2dbae29386e0a242b81d1046daf389641db3/watchfiles-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3aba215958d88182e8d2acba0fdaf687745180974946609119953c0e112397dc", size = 292758, upload-time = "2025-06-15T19:06:43.251Z" }, + { url = "https://files.pythonhosted.org/packages/be/7c/a3d7c55cfa377c2f62c4ae3c6502b997186bc5e38156bafcb9b653de9a6d/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a6fd40bbb50d24976eb275ccb55cd1951dfb63dbc27cae3066a6ca5f4beabd5", size = 406748, upload-time = "2025-06-15T19:06:44.2Z" }, + { url = "https://files.pythonhosted.org/packages/38/d0/c46f1b2c0ca47f3667b144de6f0515f6d1c670d72f2ca29861cac78abaa1/watchfiles-1.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f811079d2f9795b5d48b55a37aa7773680a5659afe34b54cc1d86590a51507d", size = 398801, upload-time = "2025-06-15T19:06:45.774Z" }, + { url = "https://files.pythonhosted.org/packages/70/9c/9a6a42e97f92eeed77c3485a43ea96723900aefa3ac739a8c73f4bff2cd7/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2726d7bfd9f76158c84c10a409b77a320426540df8c35be172444394b17f7ea", size = 451528, upload-time = "2025-06-15T19:06:46.791Z" }, + { url = "https://files.pythonhosted.org/packages/51/7b/98c7f4f7ce7ff03023cf971cd84a3ee3b790021ae7584ffffa0eb2554b96/watchfiles-1.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df32d59cb9780f66d165a9a7a26f19df2c7d24e3bd58713108b41d0ff4f929c6", size = 454095, upload-time = "2025-06-15T19:06:48.211Z" }, + { url = "https://files.pythonhosted.org/packages/8c/6b/686dcf5d3525ad17b384fd94708e95193529b460a1b7bf40851f1328ec6e/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0ece16b563b17ab26eaa2d52230c9a7ae46cf01759621f4fbbca280e438267b3", size = 406910, upload-time = "2025-06-15T19:06:49.335Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d3/71c2dcf81dc1edcf8af9f4d8d63b1316fb0a2dd90cbfd427e8d9dd584a90/watchfiles-1.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:51b81e55d40c4b4aa8658427a3ee7ea847c591ae9e8b81ef94a90b668999353c", size = 398816, upload-time = "2025-06-15T19:06:50.433Z" }, + { url = "https://files.pythonhosted.org/packages/b8/fa/12269467b2fc006f8fce4cd6c3acfa77491dd0777d2a747415f28ccc8c60/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2bcdc54ea267fe72bfc7d83c041e4eb58d7d8dc6f578dfddb52f037ce62f432", size = 451584, upload-time = "2025-06-15T19:06:51.834Z" }, + { url = "https://files.pythonhosted.org/packages/bd/d3/254cea30f918f489db09d6a8435a7de7047f8cb68584477a515f160541d6/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792", size = 454009, upload-time = "2025-06-15T19:06:52.896Z" }, + { url = "https://files.pythonhosted.org/packages/48/93/5c96bdb65e7f88f7da40645f34c0a3c317a2931ed82161e93c91e8eddd27/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7b3443f4ec3ba5aa00b0e9fa90cf31d98321cbff8b925a7c7b84161619870bc9", size = 406640, upload-time = "2025-06-15T19:06:54.868Z" }, + { url = "https://files.pythonhosted.org/packages/e3/25/09204836e93e1b99cce88802ce87264a1d20610c7a8f6de24def27ad95b1/watchfiles-1.1.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7049e52167fc75fc3cc418fc13d39a8e520cbb60ca08b47f6cedb85e181d2f2a", size = 398543, upload-time = "2025-06-15T19:06:55.95Z" }, + { url = "https://files.pythonhosted.org/packages/5e/dc/6f324a6f32c5ab73b54311b5f393a79df34c1584b8d2404cf7e6d780aa5d/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54062ef956807ba806559b3c3d52105ae1827a0d4ab47b621b31132b6b7e2866", size = 451787, upload-time = "2025-06-15T19:06:56.998Z" }, + { url = "https://files.pythonhosted.org/packages/45/5d/1d02ef4caa4ec02389e72d5594cdf9c67f1800a7c380baa55063c30c6598/watchfiles-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7bd57a1bb02f9d5c398c0c1675384e7ab1dd39da0ca50b7f09af45fa435277", size = 454272, upload-time = "2025-06-15T19:06:58.055Z" }, +] + +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" }, + { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" }, + { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" }, + { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" }, + { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" }, + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/36/db/3fff0bcbe339a6fa6a3b9e3fbc2bfb321ec2f4cd233692272c5a8d6cf801/websockets-15.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5f4c04ead5aed67c8a1a20491d54cdfba5884507a48dd798ecaf13c74c4489f5", size = 175424, upload-time = "2025-03-05T20:02:56.505Z" }, + { url = "https://files.pythonhosted.org/packages/46/e6/519054c2f477def4165b0ec060ad664ed174e140b0d1cbb9fafa4a54f6db/websockets-15.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:abdc0c6c8c648b4805c5eacd131910d2a7f6455dfd3becab248ef108e89ab16a", size = 173077, upload-time = "2025-03-05T20:02:58.37Z" }, + { url = "https://files.pythonhosted.org/packages/1a/21/c0712e382df64c93a0d16449ecbf87b647163485ca1cc3f6cbadb36d2b03/websockets-15.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a625e06551975f4b7ea7102bc43895b90742746797e2e14b70ed61c43a90f09b", size = 173324, upload-time = "2025-03-05T20:02:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/1c/cb/51ba82e59b3a664df54beed8ad95517c1b4dc1a913730e7a7db778f21291/websockets-15.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d591f8de75824cbb7acad4e05d2d710484f15f29d4a915092675ad3456f11770", size = 182094, upload-time = "2025-03-05T20:03:01.827Z" }, + { url = "https://files.pythonhosted.org/packages/fb/0f/bf3788c03fec679bcdaef787518dbe60d12fe5615a544a6d4cf82f045193/websockets-15.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47819cea040f31d670cc8d324bb6435c6f133b8c7a19ec3d61634e62f8d8f9eb", size = 181094, upload-time = "2025-03-05T20:03:03.123Z" }, + { url = "https://files.pythonhosted.org/packages/5e/da/9fb8c21edbc719b66763a571afbaf206cb6d3736d28255a46fc2fe20f902/websockets-15.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac017dd64572e5c3bd01939121e4d16cf30e5d7e110a119399cf3133b63ad054", size = 181397, upload-time = "2025-03-05T20:03:04.443Z" }, + { url = "https://files.pythonhosted.org/packages/2e/65/65f379525a2719e91d9d90c38fe8b8bc62bd3c702ac651b7278609b696c4/websockets-15.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4a9fac8e469d04ce6c25bb2610dc535235bd4aa14996b4e6dbebf5e007eba5ee", size = 181794, upload-time = "2025-03-05T20:03:06.708Z" }, + { url = "https://files.pythonhosted.org/packages/d9/26/31ac2d08f8e9304d81a1a7ed2851c0300f636019a57cbaa91342015c72cc/websockets-15.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363c6f671b761efcb30608d24925a382497c12c506b51661883c3e22337265ed", size = 181194, upload-time = "2025-03-05T20:03:08.844Z" }, + { url = "https://files.pythonhosted.org/packages/98/72/1090de20d6c91994cd4b357c3f75a4f25ee231b63e03adea89671cc12a3f/websockets-15.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2034693ad3097d5355bfdacfffcbd3ef5694f9718ab7f29c29689a9eae841880", size = 181164, upload-time = "2025-03-05T20:03:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/2d/37/098f2e1c103ae8ed79b0e77f08d83b0ec0b241cf4b7f2f10edd0126472e1/websockets-15.0.1-cp39-cp39-win32.whl", hash = "sha256:3b1ac0d3e594bf121308112697cf4b32be538fb1444468fb0a6ae4feebc83411", size = 176381, upload-time = "2025-03-05T20:03:12.77Z" }, + { url = "https://files.pythonhosted.org/packages/75/8b/a32978a3ab42cebb2ebdd5b05df0696a09f4d436ce69def11893afa301f0/websockets-15.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:b7643a03db5c95c799b89b31c036d5f27eeb4d259c798e878d6937d71832b1e4", size = 176841, upload-time = "2025-03-05T20:03:14.367Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" }, + { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/b7/48/4b67623bac4d79beb3a6bb27b803ba75c1bdedc06bd827e465803690a4b2/websockets-15.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7f493881579c90fc262d9cdbaa05a6b54b3811c2f300766748db79f098db9940", size = 173106, upload-time = "2025-03-05T20:03:29.404Z" }, + { url = "https://files.pythonhosted.org/packages/ed/f0/adb07514a49fe5728192764e04295be78859e4a537ab8fcc518a3dbb3281/websockets-15.0.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:47b099e1f4fbc95b701b6e85768e1fcdaf1630f3cbe4765fa216596f12310e2e", size = 173339, upload-time = "2025-03-05T20:03:30.755Z" }, + { url = "https://files.pythonhosted.org/packages/87/28/bd23c6344b18fb43df40d0700f6d3fffcd7cef14a6995b4f976978b52e62/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67f2b6de947f8c757db2db9c71527933ad0019737ec374a8a6be9a956786aaf9", size = 174597, upload-time = "2025-03-05T20:03:32.247Z" }, + { url = "https://files.pythonhosted.org/packages/6d/79/ca288495863d0f23a60f546f0905ae8f3ed467ad87f8b6aceb65f4c013e4/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d08eb4c2b7d6c41da6ca0600c077e93f5adcfd979cd777d747e9ee624556da4b", size = 174205, upload-time = "2025-03-05T20:03:33.731Z" }, + { url = "https://files.pythonhosted.org/packages/04/e4/120ff3180b0872b1fe6637f6f995bcb009fb5c87d597c1fc21456f50c848/websockets-15.0.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b826973a4a2ae47ba357e4e82fa44a463b8f168e1ca775ac64521442b19e87f", size = 174150, upload-time = "2025-03-05T20:03:35.757Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c3/30e2f9c539b8da8b1d76f64012f3b19253271a63413b2d3adb94b143407f/websockets-15.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:21c1fa28a6a7e3cbdc171c694398b6df4744613ce9b36b1a498e816787e28123", size = 176877, upload-time = "2025-03-05T20:03:37.199Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + +[[package]] +name = "winkerberos" +version = "0.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/75/86d470935167eb1c40d53498993e14cc021d9611a539d61c9b4202c291ab/winkerberos-0.12.2.tar.gz", hash = "sha256:ff91daed04727a0362892802ee093d8da11f08536393526bdf3bc64e04079faa", size = 35672, upload-time = "2025-04-02T14:41:48.274Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/ac/c6ce495af45371ffd85a6a3d24c2ced679b8dbcf3b8c6beca093706b1620/winkerberos-0.12.2-cp310-cp310-win32.whl", hash = "sha256:f8b751bd5a28e6a9146f154bed395c30ce4f245448addc763f98cb8843879027", size = 25331, upload-time = "2025-04-02T14:41:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/cb/7b/ad32174c3ed4710cd2ad8f20171f5061cb13603f091d714d5aa6b30d51f0/winkerberos-0.12.2-cp310-cp310-win_amd64.whl", hash = "sha256:4be3b0de548b80f52a6544dff9d571da6cdfde590176a01477358b3808b12dfa", size = 27670, upload-time = "2025-04-02T14:41:37.68Z" }, + { url = "https://files.pythonhosted.org/packages/91/12/23b29d359dee9f7a8243cb0040ea1834acd1af8cbc38cfe1c7ca82ab4ec0/winkerberos-0.12.2-cp311-cp311-win32.whl", hash = "sha256:ff2b2ec9b9246bbc05f0d4e6fe5f3f3563237357b9b35eaa58ec1a9ddf349ab8", size = 25332, upload-time = "2025-04-02T14:41:38.671Z" }, + { url = "https://files.pythonhosted.org/packages/23/d2/2bfa1dcdb4a47b7f989a9e758c892bd7393a156b0e1f0df63eca8304e892/winkerberos-0.12.2-cp311-cp311-win_amd64.whl", hash = "sha256:e6ac2b2cc329a68502821905f6ffe48e109d54a46aba7414ea231a30c75bb2d9", size = 27671, upload-time = "2025-04-02T14:41:40.104Z" }, + { url = "https://files.pythonhosted.org/packages/4f/01/26c5b1435654596c07b314653183ffe42b64ea07041c328f0fd4c68fe9f9/winkerberos-0.12.2-cp312-cp312-win32.whl", hash = "sha256:46dac1300e20738cbaf6c17c2e4832062ed7faee346c7a96f0e57f8bbe279c25", size = 25396, upload-time = "2025-04-02T14:41:41.6Z" }, + { url = "https://files.pythonhosted.org/packages/64/b1/6c4a1e4e50553798eb44dbb0d71ba6af48e2a62a0eb01bd0d4e2b41914e3/winkerberos-0.12.2-cp312-cp312-win_amd64.whl", hash = "sha256:2c5c7a70c0d4a43546b20d5654e7e7e5e5e96f42084a7f293864f7ad0fb1e953", size = 27710, upload-time = "2025-04-02T14:41:42.656Z" }, + { url = "https://files.pythonhosted.org/packages/5f/91/cff6750c7c3b2a9f35e12cd7c4df901251fc3be985edef707a3458c43e9a/winkerberos-0.12.2-cp313-cp313-win32.whl", hash = "sha256:482a72500b7822cc8f941d0c6eed668a24c030ac145c97732e175b51441bebbf", size = 25391, upload-time = "2025-04-02T14:41:43.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/98/defb037ad127c4006c4e992dd55ce0df92059626d3df5f5f4c5fc8502c26/winkerberos-0.12.2-cp313-cp313-win_amd64.whl", hash = "sha256:efd65ba54534512070916cb9c91ef9798a0f9fb0b04e12732c9631e71553fd69", size = 27704, upload-time = "2025-04-02T14:41:45.203Z" }, + { url = "https://files.pythonhosted.org/packages/be/17/b16e72e0b896cdf05666994cbc402a66f5911d56ea28d4e858714328b698/winkerberos-0.12.2-cp39-cp39-win32.whl", hash = "sha256:0c80eed53472a38d7f1dd015e27d93705b22a2acd2557bad13d8b5d688037b29", size = 25326, upload-time = "2025-04-02T14:41:46.216Z" }, + { url = "https://files.pythonhosted.org/packages/65/04/ae42e839e8d836fde613f94f30395953292a7b9be388247237196d1e5caa/winkerberos-0.12.2-cp39-cp39-win_amd64.whl", hash = "sha256:4b908aab5ab42e98bee44eca67dfebe4733d210bccf021e42b669bf4af2005a4", size = 27663, upload-time = "2025-04-02T14:41:47.294Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] + +[[package]] +name = "zope-event" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/9f/c443569a68d3844c044d9fa9711e08adb33649b527b4d432433f4c2a6a02/zope_event-5.1.1.tar.gz", hash = "sha256:c1ac931abf57efba71a2a313c5f4d57768a19b15c37e3f02f50eb1536be12d4e", size = 18811, upload-time = "2025-07-22T07:04:00.924Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/04/fd55695f6448abd22295fc68b2d3a135389558f0f49a24b0dffe019d0ecb/zope_event-5.1.1-py3-none-any.whl", hash = "sha256:8d5ea7b992c42ce73a6fa9c2ba99a004c52cd9f05d87f3220768ef0329b92df7", size = 7014, upload-time = "2025-07-22T07:03:59.9Z" }, +] + +[[package]] +name = "zope-interface" +version = "7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "setuptools" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/93/9210e7606be57a2dfc6277ac97dcc864fd8d39f142ca194fdc186d596fda/zope.interface-7.2.tar.gz", hash = "sha256:8b49f1a3d1ee4cdaf5b32d2e738362c7f5e40ac8b46dd7d1a65e82a4872728fe", size = 252960, upload-time = "2024-11-28T08:45:39.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/71/e6177f390e8daa7e75378505c5ab974e0bf59c1d3b19155638c7afbf4b2d/zope.interface-7.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce290e62229964715f1011c3dbeab7a4a1e4971fd6f31324c4519464473ef9f2", size = 208243, upload-time = "2024-11-28T08:47:29.781Z" }, + { url = "https://files.pythonhosted.org/packages/52/db/7e5f4226bef540f6d55acfd95cd105782bc6ee044d9b5587ce2c95558a5e/zope.interface-7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05b910a5afe03256b58ab2ba6288960a2892dfeef01336dc4be6f1b9ed02ab0a", size = 208759, upload-time = "2024-11-28T08:47:31.908Z" }, + { url = "https://files.pythonhosted.org/packages/28/ea/fdd9813c1eafd333ad92464d57a4e3a82b37ae57c19497bcffa42df673e4/zope.interface-7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:550f1c6588ecc368c9ce13c44a49b8d6b6f3ca7588873c679bd8fd88a1b557b6", size = 254922, upload-time = "2024-11-28T09:18:11.795Z" }, + { url = "https://files.pythonhosted.org/packages/3b/d3/0000a4d497ef9fbf4f66bb6828b8d0a235e690d57c333be877bec763722f/zope.interface-7.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ef9e2f865721553c6f22a9ff97da0f0216c074bd02b25cf0d3af60ea4d6931d", size = 249367, upload-time = "2024-11-28T08:48:24.238Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e5/0b359e99084f033d413419eff23ee9c2bd33bca2ca9f4e83d11856f22d10/zope.interface-7.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27f926f0dcb058211a3bb3e0e501c69759613b17a553788b2caeb991bed3b61d", size = 254488, upload-time = "2024-11-28T08:48:28.816Z" }, + { url = "https://files.pythonhosted.org/packages/7b/90/12d50b95f40e3b2fc0ba7f7782104093b9fd62806b13b98ef4e580f2ca61/zope.interface-7.2-cp310-cp310-win_amd64.whl", hash = "sha256:144964649eba4c5e4410bb0ee290d338e78f179cdbfd15813de1a664e7649b3b", size = 211947, upload-time = "2024-11-28T08:48:18.831Z" }, + { url = "https://files.pythonhosted.org/packages/98/7d/2e8daf0abea7798d16a58f2f3a2bf7588872eee54ac119f99393fdd47b65/zope.interface-7.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1909f52a00c8c3dcab6c4fad5d13de2285a4b3c7be063b239b8dc15ddfb73bd2", size = 208776, upload-time = "2024-11-28T08:47:53.009Z" }, + { url = "https://files.pythonhosted.org/packages/a0/2a/0c03c7170fe61d0d371e4c7ea5b62b8cb79b095b3d630ca16719bf8b7b18/zope.interface-7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:80ecf2451596f19fd607bb09953f426588fc1e79e93f5968ecf3367550396b22", size = 209296, upload-time = "2024-11-28T08:47:57.993Z" }, + { url = "https://files.pythonhosted.org/packages/49/b4/451f19448772b4a1159519033a5f72672221e623b0a1bd2b896b653943d8/zope.interface-7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:033b3923b63474800b04cba480b70f6e6243a62208071fc148354f3f89cc01b7", size = 260997, upload-time = "2024-11-28T09:18:13.935Z" }, + { url = "https://files.pythonhosted.org/packages/65/94/5aa4461c10718062c8f8711161faf3249d6d3679c24a0b81dd6fc8ba1dd3/zope.interface-7.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a102424e28c6b47c67923a1f337ede4a4c2bba3965b01cf707978a801fc7442c", size = 255038, upload-time = "2024-11-28T08:48:26.381Z" }, + { url = "https://files.pythonhosted.org/packages/9f/aa/1a28c02815fe1ca282b54f6705b9ddba20328fabdc37b8cf73fc06b172f0/zope.interface-7.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25e6a61dcb184453bb00eafa733169ab6d903e46f5c2ace4ad275386f9ab327a", size = 259806, upload-time = "2024-11-28T08:48:30.78Z" }, + { url = "https://files.pythonhosted.org/packages/a7/2c/82028f121d27c7e68632347fe04f4a6e0466e77bb36e104c8b074f3d7d7b/zope.interface-7.2-cp311-cp311-win_amd64.whl", hash = "sha256:3f6771d1647b1fc543d37640b45c06b34832a943c80d1db214a37c31161a93f1", size = 212305, upload-time = "2024-11-28T08:49:14.525Z" }, + { url = "https://files.pythonhosted.org/packages/68/0b/c7516bc3bad144c2496f355e35bd699443b82e9437aa02d9867653203b4a/zope.interface-7.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:086ee2f51eaef1e4a52bd7d3111a0404081dadae87f84c0ad4ce2649d4f708b7", size = 208959, upload-time = "2024-11-28T08:47:47.788Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e9/1463036df1f78ff8c45a02642a7bf6931ae4a38a4acd6a8e07c128e387a7/zope.interface-7.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:21328fcc9d5b80768bf051faa35ab98fb979080c18e6f84ab3f27ce703bce465", size = 209357, upload-time = "2024-11-28T08:47:50.897Z" }, + { url = "https://files.pythonhosted.org/packages/07/a8/106ca4c2add440728e382f1b16c7d886563602487bdd90004788d45eb310/zope.interface-7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6dd02ec01f4468da0f234da9d9c8545c5412fef80bc590cc51d8dd084138a89", size = 264235, upload-time = "2024-11-28T09:18:15.56Z" }, + { url = "https://files.pythonhosted.org/packages/fc/ca/57286866285f4b8a4634c12ca1957c24bdac06eae28fd4a3a578e30cf906/zope.interface-7.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e7da17f53e25d1a3bde5da4601e026adc9e8071f9f6f936d0fe3fe84ace6d54", size = 259253, upload-time = "2024-11-28T08:48:29.025Z" }, + { url = "https://files.pythonhosted.org/packages/96/08/2103587ebc989b455cf05e858e7fbdfeedfc3373358320e9c513428290b1/zope.interface-7.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cab15ff4832580aa440dc9790b8a6128abd0b88b7ee4dd56abacbc52f212209d", size = 264702, upload-time = "2024-11-28T08:48:37.363Z" }, + { url = "https://files.pythonhosted.org/packages/5f/c7/3c67562e03b3752ba4ab6b23355f15a58ac2d023a6ef763caaca430f91f2/zope.interface-7.2-cp312-cp312-win_amd64.whl", hash = "sha256:29caad142a2355ce7cfea48725aa8bcf0067e2b5cc63fcf5cd9f97ad12d6afb5", size = 212466, upload-time = "2024-11-28T08:49:14.397Z" }, + { url = "https://files.pythonhosted.org/packages/c6/3b/e309d731712c1a1866d61b5356a069dd44e5b01e394b6cb49848fa2efbff/zope.interface-7.2-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:3e0350b51e88658d5ad126c6a57502b19d5f559f6cb0a628e3dc90442b53dd98", size = 208961, upload-time = "2024-11-28T08:48:29.865Z" }, + { url = "https://files.pythonhosted.org/packages/49/65/78e7cebca6be07c8fc4032bfbb123e500d60efdf7b86727bb8a071992108/zope.interface-7.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15398c000c094b8855d7d74f4fdc9e73aa02d4d0d5c775acdef98cdb1119768d", size = 209356, upload-time = "2024-11-28T08:48:33.297Z" }, + { url = "https://files.pythonhosted.org/packages/11/b1/627384b745310d082d29e3695db5f5a9188186676912c14b61a78bbc6afe/zope.interface-7.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:802176a9f99bd8cc276dcd3b8512808716492f6f557c11196d42e26c01a69a4c", size = 264196, upload-time = "2024-11-28T09:18:17.584Z" }, + { url = "https://files.pythonhosted.org/packages/b8/f6/54548df6dc73e30ac6c8a7ff1da73ac9007ba38f866397091d5a82237bd3/zope.interface-7.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb23f58a446a7f09db85eda09521a498e109f137b85fb278edb2e34841055398", size = 259237, upload-time = "2024-11-28T08:48:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/b6/66/ac05b741c2129fdf668b85631d2268421c5cd1a9ff99be1674371139d665/zope.interface-7.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a71a5b541078d0ebe373a81a3b7e71432c61d12e660f1d67896ca62d9628045b", size = 264696, upload-time = "2024-11-28T08:48:41.161Z" }, + { url = "https://files.pythonhosted.org/packages/0a/2f/1bccc6f4cc882662162a1158cda1a7f616add2ffe322b28c99cb031b4ffc/zope.interface-7.2-cp313-cp313-win_amd64.whl", hash = "sha256:4893395d5dd2ba655c38ceb13014fd65667740f09fa5bb01caa1e6284e48c0cd", size = 212472, upload-time = "2024-11-28T08:49:56.587Z" }, + { url = "https://files.pythonhosted.org/packages/8c/2c/1f49dc8b4843c4f0848d8e43191aed312bad946a1563d1bf9e46cf2816ee/zope.interface-7.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7bd449c306ba006c65799ea7912adbbfed071089461a19091a228998b82b1fdb", size = 208349, upload-time = "2024-11-28T08:49:28.872Z" }, + { url = "https://files.pythonhosted.org/packages/ed/7d/83ddbfc8424c69579a90fc8edc2b797223da2a8083a94d8dfa0e374c5ed4/zope.interface-7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a19a6cc9c6ce4b1e7e3d319a473cf0ee989cbbe2b39201d7c19e214d2dfb80c7", size = 208799, upload-time = "2024-11-28T08:49:30.616Z" }, + { url = "https://files.pythonhosted.org/packages/36/22/b1abd91854c1be03f5542fe092e6a745096d2eca7704d69432e119100583/zope.interface-7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cd1790b48c16db85d51fbbd12d20949d7339ad84fd971427cf00d990c1f137", size = 254267, upload-time = "2024-11-28T09:18:21.059Z" }, + { url = "https://files.pythonhosted.org/packages/2a/dd/fcd313ee216ad0739ae00e6126bc22a0af62a74f76a9ca668d16cd276222/zope.interface-7.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52e446f9955195440e787596dccd1411f543743c359eeb26e9b2c02b077b0519", size = 248614, upload-time = "2024-11-28T08:48:41.953Z" }, + { url = "https://files.pythonhosted.org/packages/88/d4/4ba1569b856870527cec4bf22b91fe704b81a3c1a451b2ccf234e9e0666f/zope.interface-7.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ad9913fd858274db8dd867012ebe544ef18d218f6f7d1e3c3e6d98000f14b75", size = 253800, upload-time = "2024-11-28T08:48:46.637Z" }, + { url = "https://files.pythonhosted.org/packages/69/da/c9cfb384c18bd3a26d9fc6a9b5f32ccea49ae09444f097eaa5ca9814aff9/zope.interface-7.2-cp39-cp39-win_amd64.whl", hash = "sha256:1090c60116b3da3bfdd0c03406e2f14a1ff53e5771aebe33fec1edc0a350175d", size = 211980, upload-time = "2024-11-28T08:50:35.681Z" }, +] + +[[package]] +name = "zstandard" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/55/bd0487e86679db1823fc9ee0d8c9c78ae2413d34c0b461193b5f4c31d22f/zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9", size = 788701, upload-time = "2024-07-15T00:13:27.351Z" }, + { url = "https://files.pythonhosted.org/packages/e1/8a/ccb516b684f3ad987dfee27570d635822e3038645b1a950c5e8022df1145/zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880", size = 633678, upload-time = "2024-07-15T00:13:30.24Z" }, + { url = "https://files.pythonhosted.org/packages/12/89/75e633d0611c028e0d9af6df199423bf43f54bea5007e6718ab7132e234c/zstandard-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77da4c6bfa20dd5ea25cbf12c76f181a8e8cd7ea231c673828d0386b1740b8dc", size = 4941098, upload-time = "2024-07-15T00:13:32.526Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7a/bd7f6a21802de358b63f1ee636ab823711c25ce043a3e9f043b4fcb5ba32/zstandard-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2170c7e0367dde86a2647ed5b6f57394ea7f53545746104c6b09fc1f4223573", size = 5308798, upload-time = "2024-07-15T00:13:34.925Z" }, + { url = "https://files.pythonhosted.org/packages/79/3b/775f851a4a65013e88ca559c8ae42ac1352db6fcd96b028d0df4d7d1d7b4/zstandard-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c16842b846a8d2a145223f520b7e18b57c8f476924bda92aeee3a88d11cfc391", size = 5341840, upload-time = "2024-07-15T00:13:37.376Z" }, + { url = "https://files.pythonhosted.org/packages/09/4f/0cc49570141dd72d4d95dd6fcf09328d1b702c47a6ec12fbed3b8aed18a5/zstandard-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:157e89ceb4054029a289fb504c98c6a9fe8010f1680de0201b3eb5dc20aa6d9e", size = 5440337, upload-time = "2024-07-15T00:13:39.772Z" }, + { url = "https://files.pythonhosted.org/packages/e7/7c/aaa7cd27148bae2dc095191529c0570d16058c54c4597a7d118de4b21676/zstandard-0.23.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:203d236f4c94cd8379d1ea61db2fce20730b4c38d7f1c34506a31b34edc87bdd", size = 4861182, upload-time = "2024-07-15T00:13:42.495Z" }, + { url = "https://files.pythonhosted.org/packages/ac/eb/4b58b5c071d177f7dc027129d20bd2a44161faca6592a67f8fcb0b88b3ae/zstandard-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dc5d1a49d3f8262be192589a4b72f0d03b72dcf46c51ad5852a4fdc67be7b9e4", size = 4932936, upload-time = "2024-07-15T00:13:44.234Z" }, + { url = "https://files.pythonhosted.org/packages/44/f9/21a5fb9bb7c9a274b05ad700a82ad22ce82f7ef0f485980a1e98ed6e8c5f/zstandard-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:752bf8a74412b9892f4e5b58f2f890a039f57037f52c89a740757ebd807f33ea", size = 5464705, upload-time = "2024-07-15T00:13:46.822Z" }, + { url = "https://files.pythonhosted.org/packages/49/74/b7b3e61db3f88632776b78b1db597af3f44c91ce17d533e14a25ce6a2816/zstandard-0.23.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80080816b4f52a9d886e67f1f96912891074903238fe54f2de8b786f86baded2", size = 4857882, upload-time = "2024-07-15T00:13:49.297Z" }, + { url = "https://files.pythonhosted.org/packages/4a/7f/d8eb1cb123d8e4c541d4465167080bec88481ab54cd0b31eb4013ba04b95/zstandard-0.23.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:84433dddea68571a6d6bd4fbf8ff398236031149116a7fff6f777ff95cad3df9", size = 4697672, upload-time = "2024-07-15T00:13:51.447Z" }, + { url = "https://files.pythonhosted.org/packages/5e/05/f7dccdf3d121309b60342da454d3e706453a31073e2c4dac8e1581861e44/zstandard-0.23.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:ab19a2d91963ed9e42b4e8d77cd847ae8381576585bad79dbd0a8837a9f6620a", size = 5206043, upload-time = "2024-07-15T00:13:53.587Z" }, + { url = "https://files.pythonhosted.org/packages/86/9d/3677a02e172dccd8dd3a941307621c0cbd7691d77cb435ac3c75ab6a3105/zstandard-0.23.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:59556bf80a7094d0cfb9f5e50bb2db27fefb75d5138bb16fb052b61b0e0eeeb0", size = 5667390, upload-time = "2024-07-15T00:13:56.137Z" }, + { url = "https://files.pythonhosted.org/packages/41/7e/0012a02458e74a7ba122cd9cafe491facc602c9a17f590367da369929498/zstandard-0.23.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:27d3ef2252d2e62476389ca8f9b0cf2bbafb082a3b6bfe9d90cbcbb5529ecf7c", size = 5198901, upload-time = "2024-07-15T00:13:58.584Z" }, + { url = "https://files.pythonhosted.org/packages/65/3a/8f715b97bd7bcfc7342d8adcd99a026cb2fb550e44866a3b6c348e1b0f02/zstandard-0.23.0-cp310-cp310-win32.whl", hash = "sha256:5d41d5e025f1e0bccae4928981e71b2334c60f580bdc8345f824e7c0a4c2a813", size = 430596, upload-time = "2024-07-15T00:14:00.693Z" }, + { url = "https://files.pythonhosted.org/packages/19/b7/b2b9eca5e5a01111e4fe8a8ffb56bdcdf56b12448a24effe6cfe4a252034/zstandard-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:519fbf169dfac1222a76ba8861ef4ac7f0530c35dd79ba5727014613f91613d4", size = 495498, upload-time = "2024-07-15T00:14:02.741Z" }, + { url = "https://files.pythonhosted.org/packages/9e/40/f67e7d2c25a0e2dc1744dd781110b0b60306657f8696cafb7ad7579469bd/zstandard-0.23.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:34895a41273ad33347b2fc70e1bff4240556de3c46c6ea430a7ed91f9042aa4e", size = 788699, upload-time = "2024-07-15T00:14:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/e8/46/66d5b55f4d737dd6ab75851b224abf0afe5774976fe511a54d2eb9063a41/zstandard-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:77ea385f7dd5b5676d7fd943292ffa18fbf5c72ba98f7d09fc1fb9e819b34c23", size = 633681, upload-time = "2024-07-15T00:14:13.99Z" }, + { url = "https://files.pythonhosted.org/packages/63/b6/677e65c095d8e12b66b8f862b069bcf1f1d781b9c9c6f12eb55000d57583/zstandard-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:983b6efd649723474f29ed42e1467f90a35a74793437d0bc64a5bf482bedfa0a", size = 4944328, upload-time = "2024-07-15T00:14:16.588Z" }, + { url = "https://files.pythonhosted.org/packages/59/cc/e76acb4c42afa05a9d20827116d1f9287e9c32b7ad58cc3af0721ce2b481/zstandard-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80a539906390591dd39ebb8d773771dc4db82ace6372c4d41e2d293f8e32b8db", size = 5311955, upload-time = "2024-07-15T00:14:19.389Z" }, + { url = "https://files.pythonhosted.org/packages/78/e4/644b8075f18fc7f632130c32e8f36f6dc1b93065bf2dd87f03223b187f26/zstandard-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:445e4cb5048b04e90ce96a79b4b63140e3f4ab5f662321975679b5f6360b90e2", size = 5344944, upload-time = "2024-07-15T00:14:22.173Z" }, + { url = "https://files.pythonhosted.org/packages/76/3f/dbafccf19cfeca25bbabf6f2dd81796b7218f768ec400f043edc767015a6/zstandard-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd30d9c67d13d891f2360b2a120186729c111238ac63b43dbd37a5a40670b8ca", size = 5442927, upload-time = "2024-07-15T00:14:24.825Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c3/d24a01a19b6733b9f218e94d1a87c477d523237e07f94899e1c10f6fd06c/zstandard-0.23.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d20fd853fbb5807c8e84c136c278827b6167ded66c72ec6f9a14b863d809211c", size = 4864910, upload-time = "2024-07-15T00:14:26.982Z" }, + { url = "https://files.pythonhosted.org/packages/1c/a9/cf8f78ead4597264f7618d0875be01f9bc23c9d1d11afb6d225b867cb423/zstandard-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed1708dbf4d2e3a1c5c69110ba2b4eb6678262028afd6c6fbcc5a8dac9cda68e", size = 4935544, upload-time = "2024-07-15T00:14:29.582Z" }, + { url = "https://files.pythonhosted.org/packages/2c/96/8af1e3731b67965fb995a940c04a2c20997a7b3b14826b9d1301cf160879/zstandard-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:be9b5b8659dff1f913039c2feee1aca499cfbc19e98fa12bc85e037c17ec6ca5", size = 5467094, upload-time = "2024-07-15T00:14:40.126Z" }, + { url = "https://files.pythonhosted.org/packages/ff/57/43ea9df642c636cb79f88a13ab07d92d88d3bfe3e550b55a25a07a26d878/zstandard-0.23.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:65308f4b4890aa12d9b6ad9f2844b7ee42c7f7a4fd3390425b242ffc57498f48", size = 4860440, upload-time = "2024-07-15T00:14:42.786Z" }, + { url = "https://files.pythonhosted.org/packages/46/37/edb78f33c7f44f806525f27baa300341918fd4c4af9472fbc2c3094be2e8/zstandard-0.23.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:98da17ce9cbf3bfe4617e836d561e433f871129e3a7ac16d6ef4c680f13a839c", size = 4700091, upload-time = "2024-07-15T00:14:45.184Z" }, + { url = "https://files.pythonhosted.org/packages/c1/f1/454ac3962671a754f3cb49242472df5c2cced4eb959ae203a377b45b1a3c/zstandard-0.23.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:8ed7d27cb56b3e058d3cf684d7200703bcae623e1dcc06ed1e18ecda39fee003", size = 5208682, upload-time = "2024-07-15T00:14:47.407Z" }, + { url = "https://files.pythonhosted.org/packages/85/b2/1734b0fff1634390b1b887202d557d2dd542de84a4c155c258cf75da4773/zstandard-0.23.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:b69bb4f51daf461b15e7b3db033160937d3ff88303a7bc808c67bbc1eaf98c78", size = 5669707, upload-time = "2024-07-15T00:15:03.529Z" }, + { url = "https://files.pythonhosted.org/packages/52/5a/87d6971f0997c4b9b09c495bf92189fb63de86a83cadc4977dc19735f652/zstandard-0.23.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:034b88913ecc1b097f528e42b539453fa82c3557e414b3de9d5632c80439a473", size = 5201792, upload-time = "2024-07-15T00:15:28.372Z" }, + { url = "https://files.pythonhosted.org/packages/79/02/6f6a42cc84459d399bd1a4e1adfc78d4dfe45e56d05b072008d10040e13b/zstandard-0.23.0-cp311-cp311-win32.whl", hash = "sha256:f2d4380bf5f62daabd7b751ea2339c1a21d1c9463f1feb7fc2bdcea2c29c3160", size = 430586, upload-time = "2024-07-15T00:15:32.26Z" }, + { url = "https://files.pythonhosted.org/packages/be/a2/4272175d47c623ff78196f3c10e9dc7045c1b9caf3735bf041e65271eca4/zstandard-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:62136da96a973bd2557f06ddd4e8e807f9e13cbb0bfb9cc06cfe6d98ea90dfe0", size = 495420, upload-time = "2024-07-15T00:15:34.004Z" }, + { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713, upload-time = "2024-07-15T00:15:35.815Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459, upload-time = "2024-07-15T00:15:37.995Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707, upload-time = "2024-07-15T00:15:39.872Z" }, + { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545, upload-time = "2024-07-15T00:15:41.75Z" }, + { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533, upload-time = "2024-07-15T00:15:44.114Z" }, + { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510, upload-time = "2024-07-15T00:15:46.509Z" }, + { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973, upload-time = "2024-07-15T00:15:49.939Z" }, + { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968, upload-time = "2024-07-15T00:15:52.025Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179, upload-time = "2024-07-15T00:15:54.971Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577, upload-time = "2024-07-15T00:15:57.634Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899, upload-time = "2024-07-15T00:16:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964, upload-time = "2024-07-15T00:16:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398, upload-time = "2024-07-15T00:16:06.694Z" }, + { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313, upload-time = "2024-07-15T00:16:09.758Z" }, + { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877, upload-time = "2024-07-15T00:16:11.758Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595, upload-time = "2024-07-15T00:16:13.731Z" }, + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, + { url = "https://files.pythonhosted.org/packages/fb/96/4fcafeb7e013a2386d22f974b5b97a0b9a65004ed58c87ae001599bfbd48/zstandard-0.23.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa014d55c3af933c1315eb4bb06dd0459661cc0b15cd61077afa6489bec63bb", size = 788697, upload-time = "2024-07-15T00:17:31.236Z" }, + { url = "https://files.pythonhosted.org/packages/83/ff/a52ce725be69b86a2967ecba0497a8184540cc284c0991125515449e54e2/zstandard-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a7f0804bb3799414af278e9ad51be25edf67f78f916e08afdb983e74161b916", size = 633679, upload-time = "2024-07-15T00:17:32.911Z" }, + { url = "https://files.pythonhosted.org/packages/34/0f/3dc62db122f6a9c481c335fff6fc9f4e88d8f6e2d47321ee3937328addb4/zstandard-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb2b1ecfef1e67897d336de3a0e3f52478182d6a47eda86cbd42504c5cbd009a", size = 4940416, upload-time = "2024-07-15T00:17:34.849Z" }, + { url = "https://files.pythonhosted.org/packages/1d/e5/9fe0dd8c85fdc2f635e6660d07872a5dc4b366db566630161e39f9f804e1/zstandard-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:837bb6764be6919963ef41235fd56a6486b132ea64afe5fafb4cb279ac44f259", size = 5307693, upload-time = "2024-07-15T00:17:37.355Z" }, + { url = "https://files.pythonhosted.org/packages/73/bf/fe62c0cd865c171ee8ed5bc83174b5382a2cb729c8d6162edfb99a83158b/zstandard-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1516c8c37d3a053b01c1c15b182f3b5f5eef19ced9b930b684a73bad121addf4", size = 5341236, upload-time = "2024-07-15T00:17:40.213Z" }, + { url = "https://files.pythonhosted.org/packages/39/86/4fe79b30c794286110802a6cd44a73b6a314ac8196b9338c0fbd78c2407d/zstandard-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48ef6a43b1846f6025dde6ed9fee0c24e1149c1c25f7fb0a0585572b2f3adc58", size = 5439101, upload-time = "2024-07-15T00:17:42.284Z" }, + { url = "https://files.pythonhosted.org/packages/72/ed/cacec235c581ebf8c608c7fb3d4b6b70d1b490d0e5128ea6996f809ecaef/zstandard-0.23.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11e3bf3c924853a2d5835b24f03eeba7fc9b07d8ca499e247e06ff5676461a15", size = 4860320, upload-time = "2024-07-15T00:17:44.21Z" }, + { url = "https://files.pythonhosted.org/packages/f6/1e/2c589a2930f93946b132fc852c574a19d5edc23fad2b9e566f431050c7ec/zstandard-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2fb4535137de7e244c230e24f9d1ec194f61721c86ebea04e1581d9d06ea1269", size = 4931933, upload-time = "2024-07-15T00:17:46.455Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f5/30eadde3686d902b5d4692bb5f286977cbc4adc082145eb3f49d834b2eae/zstandard-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8c24f21fa2af4bb9f2c492a86fe0c34e6d2c63812a839590edaf177b7398f700", size = 5463878, upload-time = "2024-07-15T00:17:48.866Z" }, + { url = "https://files.pythonhosted.org/packages/e0/c8/8aed1f0ab9854ef48e5ad4431367fcb23ce73f0304f7b72335a8edc66556/zstandard-0.23.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:a8c86881813a78a6f4508ef9daf9d4995b8ac2d147dcb1a450448941398091c9", size = 4857192, upload-time = "2024-07-15T00:17:51.558Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/55e666cfbcd032b9e271865e8578fec56e5594d4faeac379d371526514f5/zstandard-0.23.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fe3b385d996ee0822fd46528d9f0443b880d4d05528fd26a9119a54ec3f91c69", size = 4696513, upload-time = "2024-07-15T00:17:53.924Z" }, + { url = "https://files.pythonhosted.org/packages/dc/bd/720b65bea63ec9de0ac7414c33b9baf271c8de8996e5ff324dc93fc90ff1/zstandard-0.23.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:82d17e94d735c99621bf8ebf9995f870a6b3e6d14543b99e201ae046dfe7de70", size = 5204823, upload-time = "2024-07-15T00:17:55.948Z" }, + { url = "https://files.pythonhosted.org/packages/d8/40/d678db1556e3941d330cd4e95623a63ef235b18547da98fa184cbc028ecf/zstandard-0.23.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c7c517d74bea1a6afd39aa612fa025e6b8011982a0897768a2f7c8ab4ebb78a2", size = 5666490, upload-time = "2024-07-15T00:17:58.327Z" }, + { url = "https://files.pythonhosted.org/packages/ed/cc/c89329723d7515898a1fc7ef5d251264078548c505719d13e9511800a103/zstandard-0.23.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fd7e0f1cfb70eb2f95a19b472ee7ad6d9a0a992ec0ae53286870c104ca939e5", size = 5196622, upload-time = "2024-07-15T00:18:00.404Z" }, + { url = "https://files.pythonhosted.org/packages/78/4c/634289d41e094327a94500dfc919e58841b10ea3a9efdfafbac614797ec2/zstandard-0.23.0-cp39-cp39-win32.whl", hash = "sha256:43da0f0092281bf501f9c5f6f3b4c975a8a0ea82de49ba3f7100e64d422a1274", size = 430620, upload-time = "2024-07-15T00:18:02.613Z" }, + { url = "https://files.pythonhosted.org/packages/a2/e2/0b0c5a0f4f7699fecd92c1ba6278ef9b01f2b0b0dd46f62bfc6729c05659/zstandard-0.23.0-cp39-cp39-win_amd64.whl", hash = "sha256:f8346bfa098532bc1fb6c7ef06783e969d87a99dd1d2a5a18a892c1d7a643c58", size = 495528, upload-time = "2024-07-15T00:18:04.452Z" }, +]